summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.txt172
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml201
-rw-r--r--Documentation/devicetree/bindings/dvfs/performance-domain.yaml14
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi16
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c43
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c3
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c20
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c2
9 files changed, 272 insertions, 201 deletions
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.txt
deleted file mode 100644
index 9299028ee712..000000000000
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.txt
+++ /dev/null
@@ -1,172 +0,0 @@
-Qualcomm Technologies, Inc. CPUFREQ Bindings
-
-CPUFREQ HW is a hardware engine used by some Qualcomm Technologies, Inc. (QTI)
-SoCs to manage frequency in hardware. It is capable of controlling frequency
-for multiple clusters.
-
-Properties:
-- compatible
- Usage: required
- Value type: <string>
- Definition: must be "qcom,cpufreq-hw" or "qcom,cpufreq-epss".
-
-- clocks
- Usage: required
- Value type: <phandle> From common clock binding.
- Definition: clock handle for XO clock and GPLL0 clock.
-
-- clock-names
- Usage: required
- Value type: <string> From common clock binding.
- Definition: must be "xo", "alternate".
-
-- reg
- Usage: required
- Value type: <prop-encoded-array>
- Definition: Addresses and sizes for the memory of the HW bases in
- each frequency domain.
-- reg-names
- Usage: Optional
- Value type: <string>
- Definition: Frequency domain name i.e.
- "freq-domain0", "freq-domain1".
-
-- #freq-domain-cells:
- Usage: required.
- Definition: Number of cells in a freqency domain specifier.
-
-* Property qcom,freq-domain
-Devices supporting freq-domain must set their "qcom,freq-domain" property with
-phandle to a cpufreq_hw followed by the Domain ID(0/1) in the CPU DT node.
-
-
-Example:
-
-Example 1: Dual-cluster, Quad-core per cluster. CPUs within a cluster switch
-DCVS state together.
-
-/ {
- cpus {
- #address-cells = <2>;
- #size-cells = <0>;
-
- CPU0: cpu@0 {
- device_type = "cpu";
- compatible = "qcom,kryo385";
- reg = <0x0 0x0>;
- enable-method = "psci";
- next-level-cache = <&L2_0>;
- qcom,freq-domain = <&cpufreq_hw 0>;
- L2_0: l2-cache {
- compatible = "cache";
- next-level-cache = <&L3_0>;
- L3_0: l3-cache {
- compatible = "cache";
- };
- };
- };
-
- CPU1: cpu@100 {
- device_type = "cpu";
- compatible = "qcom,kryo385";
- reg = <0x0 0x100>;
- enable-method = "psci";
- next-level-cache = <&L2_100>;
- qcom,freq-domain = <&cpufreq_hw 0>;
- L2_100: l2-cache {
- compatible = "cache";
- next-level-cache = <&L3_0>;
- };
- };
-
- CPU2: cpu@200 {
- device_type = "cpu";
- compatible = "qcom,kryo385";
- reg = <0x0 0x200>;
- enable-method = "psci";
- next-level-cache = <&L2_200>;
- qcom,freq-domain = <&cpufreq_hw 0>;
- L2_200: l2-cache {
- compatible = "cache";
- next-level-cache = <&L3_0>;
- };
- };
-
- CPU3: cpu@300 {
- device_type = "cpu";
- compatible = "qcom,kryo385";
- reg = <0x0 0x300>;
- enable-method = "psci";
- next-level-cache = <&L2_300>;
- qcom,freq-domain = <&cpufreq_hw 0>;
- L2_300: l2-cache {
- compatible = "cache";
- next-level-cache = <&L3_0>;
- };
- };
-
- CPU4: cpu@400 {
- device_type = "cpu";
- compatible = "qcom,kryo385";
- reg = <0x0 0x400>;
- enable-method = "psci";
- next-level-cache = <&L2_400>;
- qcom,freq-domain = <&cpufreq_hw 1>;
- L2_400: l2-cache {
- compatible = "cache";
- next-level-cache = <&L3_0>;
- };
- };
-
- CPU5: cpu@500 {
- device_type = "cpu";
- compatible = "qcom,kryo385";
- reg = <0x0 0x500>;
- enable-method = "psci";
- next-level-cache = <&L2_500>;
- qcom,freq-domain = <&cpufreq_hw 1>;
- L2_500: l2-cache {
- compatible = "cache";
- next-level-cache = <&L3_0>;
- };
- };
-
- CPU6: cpu@600 {
- device_type = "cpu";
- compatible = "qcom,kryo385";
- reg = <0x0 0x600>;
- enable-method = "psci";
- next-level-cache = <&L2_600>;
- qcom,freq-domain = <&cpufreq_hw 1>;
- L2_600: l2-cache {
- compatible = "cache";
- next-level-cache = <&L3_0>;
- };
- };
-
- CPU7: cpu@700 {
- device_type = "cpu";
- compatible = "qcom,kryo385";
- reg = <0x0 0x700>;
- enable-method = "psci";
- next-level-cache = <&L2_700>;
- qcom,freq-domain = <&cpufreq_hw 1>;
- L2_700: l2-cache {
- compatible = "cache";
- next-level-cache = <&L3_0>;
- };
- };
- };
-
- soc {
- cpufreq_hw: cpufreq@17d43000 {
- compatible = "qcom,cpufreq-hw";
- reg = <0x17d43000 0x1400>, <0x17d45800 0x1400>;
- reg-names = "freq-domain0", "freq-domain1";
-
- clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>;
- clock-names = "xo", "alternate";
-
- #freq-domain-cells = <1>;
- };
-}
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
new file mode 100644
index 000000000000..2f1b8b6852a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
@@ -0,0 +1,201 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/cpufreq/cpufreq-qcom-hw.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. CPUFREQ
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+description: |
+
+ CPUFREQ HW is a hardware engine used by some Qualcomm Technologies, Inc. (QTI)
+ SoCs to manage frequency in hardware. It is capable of controlling frequency
+ for multiple clusters.
+
+properties:
+ compatible:
+ oneOf:
+ - description: v1 of CPUFREQ HW
+ items:
+ - const: qcom,cpufreq-hw
+
+ - description: v2 of CPUFREQ HW (EPSS)
+ items:
+ - enum:
+ - qcom,sm8250-cpufreq-epss
+ - const: qcom,cpufreq-epss
+
+ reg:
+ minItems: 2
+ items:
+ - description: Frequency domain 0 register region
+ - description: Frequency domain 1 register region
+ - description: Frequency domain 2 register region
+
+ reg-names:
+ minItems: 2
+ items:
+ - const: freq-domain0
+ - const: freq-domain1
+ - const: freq-domain2
+
+ clocks:
+ items:
+ - description: XO Clock
+ - description: GPLL0 Clock
+
+ clock-names:
+ items:
+ - const: xo
+ - const: alternate
+
+ '#freq-domain-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#freq-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
+
+ // Example 1: Dual-cluster, Quad-core per cluster. CPUs within a cluster
+ // switch DCVS state together.
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_100>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&L2_200>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&L2_300>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&L2_400>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&L2_500>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ next-level-cache = <&L2_600>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ next-level-cache = <&L2_700>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpufreq@17d43000 {
+ compatible = "qcom,cpufreq-hw";
+ reg = <0x17d43000 0x1400>, <0x17d45800 0x1400>;
+ reg-names = "freq-domain0", "freq-domain1";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>;
+ clock-names = "xo", "alternate";
+
+ #freq-domain-cells = <1>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/dvfs/performance-domain.yaml b/Documentation/devicetree/bindings/dvfs/performance-domain.yaml
index c8b91207f34d..9e0bcf1a89fe 100644
--- a/Documentation/devicetree/bindings/dvfs/performance-domain.yaml
+++ b/Documentation/devicetree/bindings/dvfs/performance-domain.yaml
@@ -52,10 +52,16 @@ additionalProperties: true
examples:
- |
- performance: performance-controller@12340000 {
- compatible = "qcom,cpufreq-hw";
- reg = <0x12340000 0x1000>;
- #performance-domain-cells = <1>;
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ performance: performance-controller@11bc00 {
+ compatible = "mediatek,cpufreq-hw";
+ reg = <0 0x0011bc10 0 0x120>, <0 0x0011bd30 0 0x120>;
+
+ #performance-domain-cells = <1>;
+ };
};
// The node above defines a performance controller that is a performance
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 52a9aeecdbb2..5af6d58666f4 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -76,6 +76,22 @@
clock-latency = <61036>; /* two CLK32 periods */
clocks = <&clks IMX7D_CLK_ARM>;
cpu-idle-states = <&cpu_sleep_wait>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
+ nvmem-cells = <&fuse_grade>;
+ nvmem-cell-names = "speed_grade";
+ };
+ };
+
+ cpu0_opp_table: opp-table {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-792000000 {
+ opp-hz = /bits/ 64 <792000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <150000>;
+ opp-supported-hw = <0xf>, <0xf>;
};
};
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index db17196266e4..82d370ae6a4a 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -303,52 +303,48 @@ static u64 cppc_get_dmi_max_khz(void)
/*
* If CPPC lowest_freq and nominal_freq registers are exposed then we can
- * use them to convert perf to freq and vice versa
- *
- * If the perf/freq point lies between Nominal and Lowest, we can treat
- * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
- * and extrapolate the rest
- * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
+ * use them to convert perf to freq and vice versa. The conversion is
+ * extrapolated as an affine function passing by the 2 points:
+ * - (Low perf, Low freq)
+ * - (Nominal perf, Nominal perf)
*/
static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
unsigned int perf)
{
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
+ s64 retval, offset = 0;
static u64 max_khz;
u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
- if (perf >= caps->nominal_perf) {
- mul = caps->nominal_freq;
- div = caps->nominal_perf;
- } else {
- mul = caps->nominal_freq - caps->lowest_freq;
- div = caps->nominal_perf - caps->lowest_perf;
- }
+ mul = caps->nominal_freq - caps->lowest_freq;
+ div = caps->nominal_perf - caps->lowest_perf;
+ offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
} else {
if (!max_khz)
max_khz = cppc_get_dmi_max_khz();
mul = max_khz;
div = caps->highest_perf;
}
- return (u64)perf * mul / div;
+
+ retval = offset + div64_u64(perf * mul, div);
+ if (retval >= 0)
+ return retval;
+ return 0;
}
static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
unsigned int freq)
{
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
+ s64 retval, offset = 0;
static u64 max_khz;
u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
- if (freq >= caps->nominal_freq) {
- mul = caps->nominal_perf;
- div = caps->nominal_freq;
- } else {
- mul = caps->lowest_perf;
- div = caps->lowest_freq;
- }
+ mul = caps->nominal_perf - caps->lowest_perf;
+ div = caps->nominal_freq - caps->lowest_freq;
+ offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
} else {
if (!max_khz)
max_khz = cppc_get_dmi_max_khz();
@@ -356,7 +352,10 @@ static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
div = max_khz;
}
- return (u64)freq * mul / div;
+ retval = offset + div64_u64(freq * mul, div);
+ if (retval >= 0)
+ return retval;
+ return 0;
}
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index ca1d103ec449..96de1536e1cb 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -110,6 +110,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "fsl,imx7ulp", },
{ .compatible = "fsl,imx7d", },
+ { .compatible = "fsl,imx7s", },
{ .compatible = "fsl,imx8mq", },
{ .compatible = "fsl,imx8mm", },
{ .compatible = "fsl,imx8mn", },
@@ -138,9 +139,11 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,msm8996", },
{ .compatible = "qcom,qcs404", },
{ .compatible = "qcom,sa8155p" },
+ { .compatible = "qcom,sa8540p" },
{ .compatible = "qcom,sc7180", },
{ .compatible = "qcom,sc7280", },
{ .compatible = "qcom,sc8180x", },
+ { .compatible = "qcom,sc8280xp", },
{ .compatible = "qcom,sdm845", },
{ .compatible = "qcom,sm6350", },
{ .compatible = "qcom,sm8150", },
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index effbb680b453..f9d593ff4718 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -28,6 +28,7 @@
struct qcom_cpufreq_soc_data {
u32 reg_enable;
+ u32 reg_dcvs_ctrl;
u32 reg_freq_lut;
u32 reg_volt_lut;
u32 reg_current_vote;
@@ -50,6 +51,8 @@ struct qcom_cpufreq_data {
bool cancel_throttle;
struct delayed_work throttle_work;
struct cpufreq_policy *policy;
+
+ bool per_core_dcvs;
};
static unsigned long cpu_hw_rate, xo_rate;
@@ -102,9 +105,14 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
struct qcom_cpufreq_data *data = policy->driver_data;
const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
unsigned long freq = policy->freq_table[index].frequency;
+ unsigned int i;
writel_relaxed(index, data->base + soc_data->reg_perf_state);
+ if (data->per_core_dcvs)
+ for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
+ writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
+
if (icc_scaling_enabled)
qcom_cpufreq_set_bw(policy, freq);
@@ -137,10 +145,15 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
struct qcom_cpufreq_data *data = policy->driver_data;
const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
unsigned int index;
+ unsigned int i;
index = policy->cached_resolved_idx;
writel_relaxed(index, data->base + soc_data->reg_perf_state);
+ if (data->per_core_dcvs)
+ for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
+ writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
+
return policy->freq_table[index].frequency;
}
@@ -342,6 +355,7 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
static const struct qcom_cpufreq_soc_data qcom_soc_data = {
.reg_enable = 0x0,
+ .reg_dcvs_ctrl = 0xbc,
.reg_freq_lut = 0x110,
.reg_volt_lut = 0x114,
.reg_current_vote = 0x704,
@@ -351,6 +365,7 @@ static const struct qcom_cpufreq_soc_data qcom_soc_data = {
static const struct qcom_cpufreq_soc_data epss_soc_data = {
.reg_enable = 0x0,
+ .reg_dcvs_ctrl = 0xb0,
.reg_freq_lut = 0x100,
.reg_volt_lut = 0x200,
.reg_perf_state = 0x320,
@@ -481,8 +496,11 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
goto error;
}
+ if (readl_relaxed(base + data->soc_data->reg_dcvs_ctrl) & 0x1)
+ data->per_core_dcvs = true;
+
qcom_get_related_cpus(index, policy->cpus);
- if (!cpumask_weight(policy->cpus)) {
+ if (cpumask_empty(policy->cpus)) {
dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
ret = -ENOENT;
goto error;
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index d1744b5d9619..6dfa86971a75 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -130,7 +130,7 @@ static void get_krait_bin_format_b(struct device *cpu_dev,
}
/* Check PVS_BLOW_STATUS */
- pte_efuse = *(((u32 *)buf) + 4);
+ pte_efuse = *(((u32 *)buf) + 1);
pte_efuse &= BIT(21);
if (pte_efuse) {
dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs);
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 1e0cd4d165f0..919fa6e3f462 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -154,7 +154,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
* table and opp-shared.
*/
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
- if (ret || !cpumask_weight(priv->opp_shared_cpus)) {
+ if (ret || cpumask_empty(priv->opp_shared_cpus)) {
/*
* Either opp-table is not set or no opp-shared was found.
* Use the CPU mask from SCMI to designate CPUs sharing an OPP