diff options
31 files changed, 221 insertions, 257 deletions
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 0f04feb6cafa..c9ebacf5c88e 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -73,20 +73,17 @@ static unsigned int acpi_pstate_strict; static bool boost_state(unsigned int cpu) { - u32 lo, hi; u64 msr; switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: - rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); - msr = lo | ((u64)hi << 32); + rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr); return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); case X86_VENDOR_HYGON: case X86_VENDOR_AMD: - rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); - msr = lo | ((u64)hi << 32); + rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr); return !(msr & MSR_K7_HWCR_CPB_DIS); } return false; @@ -1028,7 +1025,7 @@ static struct platform_driver acpi_cpufreq_platdrv = { .driver = { .name = "acpi-cpufreq", }, - .remove_new = acpi_cpufreq_remove, + .remove = acpi_cpufreq_remove, }; static int __init acpi_cpufreq_init(void) diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c index f66701514d90..a261d7300951 100644 --- a/drivers/cpufreq/amd-pstate-ut.c +++ b/drivers/cpufreq/amd-pstate-ut.c @@ -227,10 +227,10 @@ static void amd_pstate_ut_check_freq(u32 index) goto skip_test; } - if (cpudata->min_freq != policy->min) { + if (cpudata->lowest_nonlinear_freq != policy->min) { amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; - pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n", - __func__, cpu, cpudata->min_freq, policy->min); + pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n", + __func__, cpu, cpudata->lowest_nonlinear_freq, policy->min); goto skip_test; } diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index b63863f77c67..f834cc8205e2 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -233,7 +233,7 @@ static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata) return index; } -static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, +static void msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf, u32 des_perf, u32 max_perf, bool fast_switch) { if (fast_switch) @@ -243,7 +243,7 @@ static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, READ_ONCE(cpudata->cppc_req_cached)); } -DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); +DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf); static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, u32 des_perf, @@ -306,11 +306,17 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata, return ret; } -static inline int pstate_enable(bool enable) +static inline int msr_cppc_enable(bool enable) { int ret, cpu; unsigned long logical_proc_id_mask = 0; + /* + * MSR_AMD_CPPC_ENABLE is write-once, once set it cannot be cleared. + */ + if (!enable) + return 0; + if (enable == cppc_enabled) return 0; @@ -332,7 +338,7 @@ static inline int pstate_enable(bool enable) return 0; } -static int cppc_enable(bool enable) +static int shmem_cppc_enable(bool enable) { int cpu, ret = 0; struct cppc_perf_ctrls perf_ctrls; @@ -359,14 +365,14 @@ static int cppc_enable(bool enable) return ret; } -DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable); +DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable); -static inline int amd_pstate_enable(bool enable) +static inline int amd_pstate_cppc_enable(bool enable) { - return static_call(amd_pstate_enable)(enable); + return static_call(amd_pstate_cppc_enable)(enable); } -static int pstate_init_perf(struct amd_cpudata *cpudata) +static int msr_init_perf(struct amd_cpudata *cpudata) { u64 cap1; @@ -385,7 +391,7 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) return 0; } -static int cppc_init_perf(struct amd_cpudata *cpudata) +static int shmem_init_perf(struct amd_cpudata *cpudata) { struct cppc_perf_caps cppc_perf; @@ -420,14 +426,14 @@ static int cppc_init_perf(struct amd_cpudata *cpudata) return ret; } -DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); +DEFINE_STATIC_CALL(amd_pstate_init_perf, msr_init_perf); static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) { return static_call(amd_pstate_init_perf)(cpudata); } -static void cppc_update_perf(struct amd_cpudata *cpudata, +static void shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf, u32 des_perf, u32 max_perf, bool fast_switch) { @@ -527,9 +533,28 @@ cpufreq_policy_put: cpufreq_cpu_put(policy); } -static int amd_pstate_verify(struct cpufreq_policy_data *policy) +static int amd_pstate_verify(struct cpufreq_policy_data *policy_data) { - cpufreq_verify_within_cpu_limits(policy); + /* + * Initialize lower frequency limit (i.e.policy->min) with + * lowest_nonlinear_frequency which is the most energy efficient + * frequency. Override the initial value set by cpufreq core and + * amd-pstate qos_requests. + */ + if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) { + struct cpufreq_policy *policy = cpufreq_cpu_get(policy_data->cpu); + struct amd_cpudata *cpudata; + + if (!policy) + return -EINVAL; + + cpudata = policy->driver_data; + policy_data->min = cpudata->lowest_nonlinear_freq; + cpufreq_cpu_put(policy); + } + + cpufreq_verify_within_cpu_limits(policy_data); + pr_debug("policy_max =%d, policy_min=%d\n", policy_data->max, policy_data->min); return 0; } @@ -665,34 +690,12 @@ static void amd_pstate_adjust_perf(unsigned int cpu, static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on) { struct amd_cpudata *cpudata = policy->driver_data; - struct cppc_perf_ctrls perf_ctrls; - u32 highest_perf, nominal_perf, nominal_freq, max_freq; + u32 nominal_freq, max_freq; int ret = 0; - highest_perf = READ_ONCE(cpudata->highest_perf); - nominal_perf = READ_ONCE(cpudata->nominal_perf); nominal_freq = READ_ONCE(cpudata->nominal_freq); max_freq = READ_ONCE(cpudata->max_freq); - if (boot_cpu_has(X86_FEATURE_CPPC)) { - u64 value = READ_ONCE(cpudata->cppc_req_cached); - - value &= ~GENMASK_ULL(7, 0); - value |= on ? highest_perf : nominal_perf; - WRITE_ONCE(cpudata->cppc_req_cached, value); - - wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); - } else { - perf_ctrls.max_perf = on ? highest_perf : nominal_perf; - ret = cppc_set_perf(cpudata->cpu, &perf_ctrls); - if (ret) { - cpufreq_cpu_release(policy); - pr_debug("Failed to set max perf on CPU:%d. ret:%d\n", - cpudata->cpu, ret); - return ret; - } - } - if (on) policy->cpuinfo.max_freq = max_freq; else if (policy->cpuinfo.max_freq > nominal_freq * 1000) @@ -1001,7 +1004,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) policy->fast_switch_possible = true; ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], - FREQ_QOS_MIN, policy->cpuinfo.min_freq); + FREQ_QOS_MIN, FREQ_QOS_MIN_DEFAULT_VALUE); if (ret < 0) { dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); goto free_cpudata1; @@ -1045,7 +1048,7 @@ static int amd_pstate_cpu_resume(struct cpufreq_policy *policy) { int ret; - ret = amd_pstate_enable(true); + ret = amd_pstate_cppc_enable(true); if (ret) pr_err("failed to enable amd-pstate during resume, return %d\n", ret); @@ -1056,7 +1059,7 @@ static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy) { int ret; - ret = amd_pstate_enable(false); + ret = amd_pstate_cppc_enable(false); if (ret) pr_err("failed to disable amd-pstate during suspend, return %d\n", ret); @@ -1189,25 +1192,41 @@ static ssize_t show_energy_performance_preference( static void amd_pstate_driver_cleanup(void) { - amd_pstate_enable(false); + amd_pstate_cppc_enable(false); cppc_state = AMD_PSTATE_DISABLE; current_pstate_driver = NULL; } +static int amd_pstate_set_driver(int mode_idx) +{ + if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) { + cppc_state = mode_idx; + if (cppc_state == AMD_PSTATE_DISABLE) + pr_info("driver is explicitly disabled\n"); + + if (cppc_state == AMD_PSTATE_ACTIVE) + current_pstate_driver = &amd_pstate_epp_driver; + + if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED) + current_pstate_driver = &amd_pstate_driver; + + return 0; + } + + return -EINVAL; +} + static int amd_pstate_register_driver(int mode) { int ret; - if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED) - current_pstate_driver = &amd_pstate_driver; - else if (mode == AMD_PSTATE_ACTIVE) - current_pstate_driver = &amd_pstate_epp_driver; - else - return -EINVAL; + ret = amd_pstate_set_driver(mode); + if (ret) + return ret; cppc_state = mode; - ret = amd_pstate_enable(true); + ret = amd_pstate_cppc_enable(true); if (ret) { pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n", ret); @@ -1485,6 +1504,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) WRITE_ONCE(cpudata->cppc_cap1_cached, value); } + current_pstate_driver->adjust_perf = NULL; + return 0; free_cpudata1: @@ -1507,26 +1528,13 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; - u32 max_perf, min_perf, min_limit_perf, max_limit_perf; + u32 max_perf, min_perf; u64 value; s16 epp; - if (cpudata->boost_supported && !policy->boost_enabled) - max_perf = READ_ONCE(cpudata->nominal_perf); - else - max_perf = READ_ONCE(cpudata->highest_perf); + max_perf = READ_ONCE(cpudata->highest_perf); min_perf = READ_ONCE(cpudata->lowest_perf); - max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq); - min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq); - - if (min_limit_perf < min_perf) - min_limit_perf = min_perf; - - if (max_limit_perf < min_limit_perf) - max_limit_perf = min_limit_perf; - - WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf); - WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf); + amd_pstate_update_min_max_limit(policy); max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf, cpudata->max_limit_perf); @@ -1535,7 +1543,7 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) value = READ_ONCE(cpudata->cppc_req_cached); if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) - min_perf = max_perf; + min_perf = min(cpudata->nominal_perf, max_perf); /* Initial min/max values for CPPC Performance Controls Register */ value &= ~AMD_CPPC_MIN_PERF(~0L); @@ -1563,12 +1571,6 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) epp = 0; - /* Set initial EPP value */ - if (cpu_feature_enabled(X86_FEATURE_CPPC)) { - value &= ~GENMASK_ULL(31, 24); - value |= (u64)epp << 24; - } - WRITE_ONCE(cpudata->cppc_req_cached, value); return amd_pstate_set_epp(cpudata, epp); } @@ -1605,7 +1607,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) u64 value, max_perf; int ret; - ret = amd_pstate_enable(true); + ret = amd_pstate_cppc_enable(true); if (ret) pr_err("failed to enable amd pstate during resume, return %d\n", ret); @@ -1616,8 +1618,9 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); } else { perf_ctrls.max_perf = max_perf; - perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached); cppc_set_perf(cpudata->cpu, &perf_ctrls); + perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached); + cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1); } } @@ -1657,9 +1660,11 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy) wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); } else { perf_ctrls.desired_perf = 0; + perf_ctrls.min_perf = min_perf; perf_ctrls.max_perf = min_perf; - perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE); cppc_set_perf(cpudata->cpu, &perf_ctrls); + perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE); + cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1); } mutex_unlock(&amd_pstate_limits_lock); } @@ -1679,13 +1684,6 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) return 0; } -static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy) -{ - cpufreq_verify_within_cpu_limits(policy); - pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min); - return 0; -} - static int amd_pstate_epp_suspend(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; @@ -1699,7 +1697,7 @@ static int amd_pstate_epp_suspend(struct cpufreq_policy *policy) cpudata->suspended = true; /* disable CPPC in lowlevel firmware */ - ret = amd_pstate_enable(false); + ret = amd_pstate_cppc_enable(false); if (ret) pr_err("failed to suspend, return %d\n", ret); @@ -1741,7 +1739,7 @@ static struct cpufreq_driver amd_pstate_driver = { static struct cpufreq_driver amd_pstate_epp_driver = { .flags = CPUFREQ_CONST_LOOPS, - .verify = amd_pstate_epp_verify_policy, + .verify = amd_pstate_verify, .setpolicy = amd_pstate_epp_set_policy, .init = amd_pstate_epp_cpu_init, .exit = amd_pstate_epp_cpu_exit, @@ -1755,26 +1753,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = { .attr = amd_pstate_epp_attr, }; -static int __init amd_pstate_set_driver(int mode_idx) -{ - if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) { - cppc_state = mode_idx; - if (cppc_state == AMD_PSTATE_DISABLE) - pr_info("driver is explicitly disabled\n"); - - if (cppc_state == AMD_PSTATE_ACTIVE) - current_pstate_driver = &amd_pstate_epp_driver; - - if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED) - current_pstate_driver = &amd_pstate_driver; - - return 0; - } - - return -EINVAL; -} - -/** +/* * CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F. * show the debug message that helps to check if the CPU has CPPC support for loading issue. */ @@ -1864,10 +1843,10 @@ static int __init amd_pstate_init(void) if (cppc_state == AMD_PSTATE_UNDEFINED) { /* Disable on the following configs by default: * 1. Undefined platforms - * 2. Server platforms + * 2. Server platforms with CPUs older than Family 0x1A. */ if (amd_pstate_acpi_pm_profile_undefined() || - amd_pstate_acpi_pm_profile_server()) { + (amd_pstate_acpi_pm_profile_server() && boot_cpu_data.x86 < 0x1A)) { pr_info("driver load is disabled, boot with specific mode to enable this\n"); return -ENODEV; } @@ -1875,50 +1854,31 @@ static int __init amd_pstate_init(void) cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE; } - switch (cppc_state) { - case AMD_PSTATE_DISABLE: + if (cppc_state == AMD_PSTATE_DISABLE) { pr_info("driver load is disabled, boot with specific mode to enable this\n"); return -ENODEV; - case AMD_PSTATE_PASSIVE: - case AMD_PSTATE_ACTIVE: - case AMD_PSTATE_GUIDED: - ret = amd_pstate_set_driver(cppc_state); - if (ret) - return ret; - break; - default: - return -EINVAL; } /* capability check */ if (cpu_feature_enabled(X86_FEATURE_CPPC)) { pr_debug("AMD CPPC MSR based functionality is supported\n"); - if (cppc_state != AMD_PSTATE_ACTIVE) - current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; } else { pr_debug("AMD CPPC shared memory based functionality is supported\n"); - static_call_update(amd_pstate_enable, cppc_enable); - static_call_update(amd_pstate_init_perf, cppc_init_perf); - static_call_update(amd_pstate_update_perf, cppc_update_perf); + static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable); + static_call_update(amd_pstate_init_perf, shmem_init_perf); + static_call_update(amd_pstate_update_perf, shmem_update_perf); } - if (amd_pstate_prefcore) { - ret = amd_detect_prefcore(&amd_pstate_prefcore); - if (ret) - return ret; - } - - /* enable amd pstate feature */ - ret = amd_pstate_enable(true); + ret = amd_pstate_register_driver(cppc_state); if (ret) { - pr_err("failed to enable driver mode(%d)\n", cppc_state); + pr_err("failed to register with return %d\n", ret); return ret; } - ret = cpufreq_register_driver(current_pstate_driver); - if (ret) { - pr_err("failed to register with return %d\n", ret); - goto disable_driver; + if (amd_pstate_prefcore) { + ret = amd_detect_prefcore(&amd_pstate_prefcore); + if (ret) + return ret; } dev_root = bus_get_dev_root(&cpu_subsys); @@ -1935,8 +1895,7 @@ static int __init amd_pstate_init(void) global_attr_free: cpufreq_unregister_driver(current_pstate_driver); -disable_driver: - amd_pstate_enable(false); + amd_pstate_cppc_enable(false); return ret; } device_initcall(amd_pstate_init); diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index ea8438550b49..5d03a295a085 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -777,7 +777,7 @@ static struct platform_driver brcm_avs_cpufreq_platdrv = { .of_match_table = brcm_avs_cpufreq_match, }, .probe = brcm_avs_cpufreq_probe, - .remove_new = brcm_avs_cpufreq_remove, + .remove = brcm_avs_cpufreq_remove, }; module_platform_driver(brcm_avs_cpufreq_platdrv); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 983443396f8f..3a7c3372bda7 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -345,7 +345,7 @@ static struct platform_driver dt_cpufreq_platdrv = { .name = "cpufreq-dt", }, .probe = dt_cpufreq_probe, - .remove_new = dt_cpufreq_remove, + .remove = dt_cpufreq_remove, }; module_platform_driver(dt_cpufreq_platdrv); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index f98c9438760c..1a4cae54a01b 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1520,7 +1520,7 @@ static int cpufreq_online(unsigned int cpu) * frequency for longer duration. Hence, a BUG_ON(). */ BUG_ON(ret); - pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n", + pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n", __func__, policy->cpu, old_freq, policy->cur); } } diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c index 7d2754411d8c..8736be3a06ce 100644 --- a/drivers/cpufreq/davinci-cpufreq.c +++ b/drivers/cpufreq/davinci-cpufreq.c @@ -145,7 +145,7 @@ static struct platform_driver davinci_cpufreq_driver = { .driver = { .name = "cpufreq-davinci", }, - .remove_new = __exit_p(davinci_cpufreq_remove), + .remove = __exit_p(davinci_cpufreq_remove), }; int __init davinci_cpufreq_init(void) diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c index 577bb9e2f112..1492c92ffc1a 100644 --- a/drivers/cpufreq/imx-cpufreq-dt.c +++ b/drivers/cpufreq/imx-cpufreq-dt.c @@ -183,7 +183,7 @@ static void imx_cpufreq_dt_remove(struct platform_device *pdev) static struct platform_driver imx_cpufreq_dt_driver = { .probe = imx_cpufreq_dt_probe, - .remove_new = imx_cpufreq_dt_remove, + .remove = imx_cpufreq_dt_remove, .driver = { .name = "imx-cpufreq-dt", }, diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index c20d3ecc5a81..f3c99f378ad6 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -522,7 +522,7 @@ static struct platform_driver imx6q_cpufreq_platdrv = { .name = "imx6q-cpufreq", }, .probe = imx6q_cpufreq_probe, - .remove_new = imx6q_cpufreq_remove, + .remove = imx6q_cpufreq_remove, }; module_platform_driver(imx6q_cpufreq_platdrv); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b0018f371ea3..b8e2396a708a 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1028,26 +1028,29 @@ static void hybrid_update_cpu_capacity_scaling(void) } } -static void __hybrid_init_cpu_capacity_scaling(void) +static void __hybrid_refresh_cpu_capacity_scaling(void) { hybrid_max_perf_cpu = NULL; hybrid_update_cpu_capacity_scaling(); } -static void hybrid_init_cpu_capacity_scaling(void) +static void hybrid_refresh_cpu_capacity_scaling(void) { - bool disable_itmt = false; + guard(mutex)(&hybrid_capacity_lock); - mutex_lock(&hybrid_capacity_lock); + __hybrid_refresh_cpu_capacity_scaling(); +} +static void hybrid_init_cpu_capacity_scaling(bool refresh) +{ /* * If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity * scaling has been enabled already and the driver is just changing the * operation mode. */ - if (hybrid_max_perf_cpu) { - __hybrid_init_cpu_capacity_scaling(); - goto unlock; + if (refresh) { + hybrid_refresh_cpu_capacity_scaling(); + return; } /* @@ -1056,19 +1059,25 @@ static void hybrid_init_cpu_capacity_scaling(void) * do not do that when SMT is in use. */ if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) { - __hybrid_init_cpu_capacity_scaling(); - disable_itmt = true; + hybrid_refresh_cpu_capacity_scaling(); + /* + * Disabling ITMT causes sched domains to be rebuilt to disable asym + * packing and enable asym capacity. + */ + sched_clear_itmt_support(); } +} -unlock: - mutex_unlock(&hybrid_capacity_lock); +static bool hybrid_clear_max_perf_cpu(void) +{ + bool ret; - /* - * Disabling ITMT causes sched domains to be rebuilt to disable asym - * packing and enable asym capacity. - */ - if (disable_itmt) - sched_clear_itmt_support(); + guard(mutex)(&hybrid_capacity_lock); + + ret = !!hybrid_max_perf_cpu; + hybrid_max_perf_cpu = NULL; + + return ret; } static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) @@ -1392,7 +1401,7 @@ static void intel_pstate_update_limits_for_all(void) mutex_lock(&hybrid_capacity_lock); if (hybrid_max_perf_cpu) - __hybrid_init_cpu_capacity_scaling(); + __hybrid_refresh_cpu_capacity_scaling(); mutex_unlock(&hybrid_capacity_lock); } @@ -2263,6 +2272,11 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) } else { cpu->pstate.scaling = perf_ctl_scaling; } + /* + * If the CPU is going online for the first time and it was + * offline initially, asym capacity scaling needs to be updated. + */ + hybrid_update_capacity(cpu); } else { cpu->pstate.scaling = perf_ctl_scaling; cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu); @@ -3352,6 +3366,7 @@ static void intel_pstate_driver_cleanup(void) static int intel_pstate_register_driver(struct cpufreq_driver *driver) { + bool refresh_cpu_cap_scaling; int ret; if (driver == &intel_pstate) @@ -3364,6 +3379,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) arch_set_max_freq_ratio(global.turbo_disabled); + refresh_cpu_cap_scaling = hybrid_clear_max_perf_cpu(); + intel_pstate_driver = driver; ret = cpufreq_register_driver(intel_pstate_driver); if (ret) { @@ -3373,7 +3390,7 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) global.min_perf_pct = min_perf_pct_min(); - hybrid_init_cpu_capacity_scaling(); + hybrid_init_cpu_capacity_scaling(refresh_cpu_cap_scaling); return 0; } @@ -3638,6 +3655,8 @@ static const struct x86_cpu_id intel_epp_default[] = { X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), + X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), + X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, HWP_SET_DEF_BALANCE_PERF_EPP(32)), X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, 179, 64, 16)), X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index fd20b986d1f2..312f2654d1d5 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -189,7 +189,7 @@ static void kirkwood_cpufreq_remove(struct platform_device *pdev) static struct platform_driver kirkwood_cpufreq_platform_driver = { .probe = kirkwood_cpufreq_probe, - .remove_new = kirkwood_cpufreq_remove, + .remove = kirkwood_cpufreq_remove, .driver = { .name = "kirkwood-cpufreq", }, diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c index 6b5e6798d9a2..61ebebf69455 100644 --- a/drivers/cpufreq/loongson3_cpufreq.c +++ b/drivers/cpufreq/loongson3_cpufreq.c @@ -386,7 +386,7 @@ static struct platform_driver loongson3_platform_driver = { }, .id_table = cpufreq_id_table, .probe = loongson3_cpufreq_probe, - .remove_new = loongson3_cpufreq_remove, + .remove = loongson3_cpufreq_remove, }; module_platform_driver(loongson3_platform_driver); diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index 8925e096d5b9..f7db5f4ad306 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -344,7 +344,7 @@ MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match); static struct platform_driver mtk_cpufreq_hw_driver = { .probe = mtk_cpufreq_hw_driver_probe, - .remove_new = mtk_cpufreq_hw_driver_remove, + .remove = mtk_cpufreq_hw_driver_remove, .driver = { .name = "mtk-cpufreq-hw", .of_match_table = mtk_cpufreq_hw_match, diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index de8be0a8932d..106220c0fd11 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -188,7 +188,7 @@ static struct platform_driver omap_cpufreq_platdrv = { .name = "omap-cpufreq", }, .probe = omap_cpufreq_probe, - .remove_new = omap_cpufreq_remove, + .remove = omap_cpufreq_remove, }; module_platform_driver(omap_cpufreq_platdrv); diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 771efbf51a48..ac2e90a65f0c 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -615,7 +615,7 @@ static struct platform_driver pcc_cpufreq_platdrv = { .driver = { .name = "pcc-cpufreq", }, - .remove_new = pcc_cpufreq_remove, + .remove = pcc_cpufreq_remove, }; static int __init pcc_cpufreq_init(void) diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 900d6844c43d..98129565acb8 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -736,7 +736,7 @@ static void qcom_cpufreq_hw_driver_remove(struct platform_device *pdev) static struct platform_driver qcom_cpufreq_hw_driver = { .probe = qcom_cpufreq_hw_driver_probe, - .remove_new = qcom_cpufreq_hw_driver_remove, + .remove = qcom_cpufreq_hw_driver_remove, .driver = { .name = "qcom-cpufreq-hw", .of_match_table = qcom_cpufreq_hw_match, diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 703308fb891a..08e518c89fc3 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -604,7 +604,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(qcom_cpufreq_pm_ops, qcom_cpufreq_suspend, NULL) static struct platform_driver qcom_cpufreq_driver = { .probe = qcom_cpufreq_probe, - .remove_new = qcom_cpufreq_remove, + .remove = qcom_cpufreq_remove, .driver = { .name = "qcom-cpufreq-nvmem", .pm = pm_sleep_ptr(&qcom_cpufreq_pm_ops), diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index 3519bf34d397..a37ce051236c 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -296,7 +296,7 @@ static struct platform_driver qoriq_cpufreq_platform_driver = { .name = "qoriq-cpufreq", }, .probe = qoriq_cpufreq_probe, - .remove_new = qoriq_cpufreq_remove, + .remove = qoriq_cpufreq_remove, }; module_platform_driver(qoriq_cpufreq_platform_driver); diff --git a/drivers/cpufreq/raspberrypi-cpufreq.c b/drivers/cpufreq/raspberrypi-cpufreq.c index e0705cc9a57d..5050932954e3 100644 --- a/drivers/cpufreq/raspberrypi-cpufreq.c +++ b/drivers/cpufreq/raspberrypi-cpufreq.c @@ -85,7 +85,7 @@ static struct platform_driver raspberrypi_cpufreq_driver = { .name = "raspberrypi-cpufreq", }, .probe = raspberrypi_cpufreq_probe, - .remove_new = raspberrypi_cpufreq_remove, + .remove = raspberrypi_cpufreq_remove, }; module_platform_driver(raspberrypi_cpufreq_driver); diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 8d73e6e8be2a..cd89c1b9832c 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -217,7 +217,7 @@ static struct platform_driver scpi_cpufreq_platdrv = { .name = "scpi-cpufreq", }, .probe = scpi_cpufreq_probe, - .remove_new = scpi_cpufreq_remove, + .remove = scpi_cpufreq_remove, }; module_platform_driver(scpi_cpufreq_platdrv); diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index 293921acec93..352e1a69a85e 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -283,7 +283,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev) static struct platform_driver sun50i_cpufreq_driver = { .probe = sun50i_cpufreq_nvmem_probe, - .remove_new = sun50i_cpufreq_nvmem_remove, + .remove = sun50i_cpufreq_nvmem_remove, .driver = { .name = "sun50i-cpufreq-nvmem", }, diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 7b8fcfa55038..c7761eb99f3c 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -276,7 +276,7 @@ static struct platform_driver tegra186_cpufreq_platform_driver = { .of_match_table = tegra186_cpufreq_of_match, }, .probe = tegra186_cpufreq_probe, - .remove_new = tegra186_cpufreq_remove, + .remove = tegra186_cpufreq_remove, }; module_platform_driver(tegra186_cpufreq_platform_driver); diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index 07ea7ed61b68..9055dd398e7f 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -818,7 +818,7 @@ static struct platform_driver tegra194_ccplex_driver = { .of_match_table = tegra194_cpufreq_of_match, }, .probe = tegra194_cpufreq_probe, - .remove_new = tegra194_cpufreq_remove, + .remove = tegra194_cpufreq_remove, }; module_platform_driver(tegra194_ccplex_driver); diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 3fadf536c429..0f86cdb7ec8a 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -565,7 +565,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = { .name = "vexpress-spc-cpufreq", }, .probe = ve_spc_cpufreq_probe, - .remove_new = ve_spc_cpufreq_remove, + .remove = ve_spc_cpufreq_remove, }; module_platform_driver(ve_spc_cpufreq_platdrv); diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 7cfb980a357d..caba6f4bb1b7 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -139,7 +139,7 @@ out_kfree_drv: * * Initializes arm cpuidle driver for all CPUs, if any CPU fails * to register cpuidle driver then rollback to cancel all CPUs - * registeration. + * registration. */ static int __init arm_idle_init(void) { diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c index 1fc9968eae19..3ab240e0e122 100644 --- a/drivers/cpuidle/cpuidle-qcom-spm.c +++ b/drivers/cpuidle/cpuidle-qcom-spm.c @@ -48,7 +48,7 @@ static int qcom_cpu_spc(struct spm_driver_data *drv) ret = cpu_suspend(0, qcom_pm_collapse); /* * ARM common code executes WFI without calling into our driver and - * if the SPM mode is not reset, then we may accidently power down the + * if the SPM mode is not reset, then we may accidentally power down the * cpu when we intended only to gate the cpu clock. * Ensure the state is set to standby before returning. */ diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 9e418aec1755..06ace16f9e71 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -406,7 +406,7 @@ void cpuidle_reflect(struct cpuidle_device *dev, int index) * Min polling interval of 10usec is a guess. It is assuming that * for most users, the time for a single ping-pong workload like * perf bench pipe would generally complete within 10usec but - * this is hardware dependant. Actual time can be estimated with + * this is hardware dependent. Actual time can be estimated with * * perf bench sched pipe -l 10000 * diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index cf5873cc45dc..9bbfa594c442 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -261,7 +261,7 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv) * @drv: a pointer to a valid struct cpuidle_driver * * Register the driver under a lock to prevent concurrent attempts to - * [un]register the driver from occuring at the same time. + * [un]register the driver from occurring at the same time. * * Returns 0 on success, a negative error code (returned by * __cpuidle_register_driver()) otherwise. @@ -296,7 +296,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_driver); * @drv: a pointer to a valid struct cpuidle_driver * * Unregisters the cpuidle driver under a lock to prevent concurrent attempts - * to [un]register the driver from occuring at the same time. @drv has to + * to [un]register the driver from occurring at the same time. @drv has to * match the currently registered driver. */ void cpuidle_unregister_driver(struct cpuidle_driver *drv) diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index f3c9d49f0f2a..28363bfa3e4c 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -19,7 +19,7 @@ #include "gov.h" -#define BUCKETS 12 +#define BUCKETS 6 #define INTERVAL_SHIFT 3 #define INTERVALS (1UL << INTERVAL_SHIFT) #define RESOLUTION 1024 @@ -29,12 +29,11 @@ /* * Concepts and ideas behind the menu governor * - * For the menu governor, there are 3 decision factors for picking a C + * For the menu governor, there are 2 decision factors for picking a C * state: * 1) Energy break even point - * 2) Performance impact - * 3) Latency tolerance (from pmqos infrastructure) - * These three factors are treated independently. + * 2) Latency tolerance (from pmqos infrastructure) + * These two factors are treated independently. * * Energy break even point * ----------------------- @@ -75,30 +74,6 @@ * intervals and if the stand deviation of these 8 intervals is below a * threshold value, we use the average of these intervals as prediction. * - * Limiting Performance Impact - * --------------------------- - * C states, especially those with large exit latencies, can have a real - * noticeable impact on workloads, which is not acceptable for most sysadmins, - * and in addition, less performance has a power price of its own. - * - * As a general rule of thumb, menu assumes that the following heuristic - * holds: - * The busier the system, the less impact of C states is acceptable - * - * This rule-of-thumb is implemented using a performance-multiplier: - * If the exit latency times the performance multiplier is longer than - * the predicted duration, the C state is not considered a candidate - * for selection due to a too high performance impact. So the higher - * this multiplier is, the longer we need to be idle to pick a deep C - * state, and thus the less likely a busy CPU will hit such a deep - * C state. - * - * Currently there is only one value determining the factor: - * 10 points are added for each process that is waiting for IO on this CPU. - * (This value was experimentally determined.) - * Utilization is no longer a factor as it was shown that it never contributed - * significantly to the performance multiplier in the first place. - * */ struct menu_device { @@ -112,19 +87,10 @@ struct menu_device { int interval_ptr; }; -static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters) +static inline int which_bucket(u64 duration_ns) { int bucket = 0; - /* - * We keep two groups of stats; one with no - * IO pending, one without. - * This allows us to calculate - * E(duration)|iowait - */ - if (nr_iowaiters) - bucket = BUCKETS/2; - if (duration_ns < 10ULL * NSEC_PER_USEC) return bucket; if (duration_ns < 100ULL * NSEC_PER_USEC) @@ -138,19 +104,6 @@ static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters) return bucket + 5; } -/* - * Return a multiplier for the exit latency that is intended - * to take performance requirements into account. - * The more performance critical we estimate the system - * to be, the higher this multiplier, and thus the higher - * the barrier to go to an expensive C state. - */ -static inline int performance_multiplier(unsigned int nr_iowaiters) -{ - /* for IO wait tasks (per cpu!) we add 10x each */ - return 1 + 10 * nr_iowaiters; -} - static DEFINE_PER_CPU(struct menu_device, menu_devices); static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); @@ -258,8 +211,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, struct menu_device *data = this_cpu_ptr(&menu_devices); s64 latency_req = cpuidle_governor_latency_req(dev->cpu); u64 predicted_ns; - u64 interactivity_req; - unsigned int nr_iowaiters; ktime_t delta, delta_tick; int i, idx; @@ -268,8 +219,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, data->needs_update = 0; } - nr_iowaiters = nr_iowait_cpu(dev->cpu); - /* Find the shortest expected idle interval. */ predicted_ns = get_typical_interval(data) * NSEC_PER_USEC; if (predicted_ns > RESIDENCY_THRESHOLD_NS) { @@ -283,7 +232,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, } data->next_timer_ns = delta; - data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters); + data->bucket = which_bucket(data->next_timer_ns); /* Round up the result for half microseconds. */ timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 + @@ -301,7 +250,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, */ data->next_timer_ns = KTIME_MAX; delta_tick = TICK_NSEC / 2; - data->bucket = which_bucket(KTIME_MAX, nr_iowaiters); + data->bucket = which_bucket(KTIME_MAX); } if (unlikely(drv->state_count <= 1 || latency_req == 0) || @@ -328,15 +277,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, */ if (predicted_ns < TICK_NSEC) predicted_ns = data->next_timer_ns; - } else { - /* - * Use the performance multiplier and the user-configurable - * latency_req to determine the maximum exit latency. - */ - interactivity_req = div64_u64(predicted_ns, - performance_multiplier(nr_iowaiters)); - if (latency_req > interactivity_req) - latency_req = interactivity_req; + } else if (latency_req > predicted_ns) { + latency_req = predicted_ns; } /* diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 67aebfe0fed6..ac4d8faa3886 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1069,6 +1069,47 @@ static struct cpuidle_state gnr_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state gnrd_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 4, + .target_residency = 4, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_INIT_XSTATE | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, + .exit_latency = 220, + .target_residency = 650, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6P", + .desc = "MWAIT 0x21", + .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_INIT_XSTATE | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, + .exit_latency = 240, + .target_residency = 750, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static struct cpuidle_state atom_cstates[] __initdata = { { .name = "C1E", @@ -1508,6 +1549,12 @@ static const struct idle_cpu idle_cpu_gnr __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_gnrd __initconst = { + .state_table = gnrd_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_avn __initconst = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, @@ -1593,6 +1640,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &idle_cpu_spr), X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &idle_cpu_spr), X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &idle_cpu_gnr), + X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &idle_cpu_gnrd), X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &idle_cpu_knl), X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &idle_cpu_knl), X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &idle_cpu_bxt), diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index c6ba15388ea7..28c77904ea74 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -783,9 +783,8 @@ static int sugov_init(struct cpufreq_policy *policy) if (ret) goto fail; - sugov_eas_rebuild_sd(); - out: + sugov_eas_rebuild_sd(); mutex_unlock(&global_tunables_lock); return 0; |