From c95a05f9f359c895dc6bd63862923ceba059ef22 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 4 Jul 2017 22:09:21 -0500 Subject: cpufreq: speedstep: remove unnecessary static in speedstep_detect_chipset() Remove unnecessary static on local variable hostbridge. Such variable is initialized before being used, on every execution path throughout the function. The static has no benefit and, removing it reduces the code size. This issue was detected using Coccinelle and the following semantic patch: @bad exists@ position p; identifier x; type T; @@ static T x@p; ... x = <+...x...+> @@ identifier x; expression e; type T; position p != bad.p; @@ -static T x@p; ... when != x when strict ?x = e; In the following log you can see the difference in the code size. Also, there is a significant difference in the bss segment. This log is the output of the size command, before and after the code change: before: text data bss dec hex filename 5084 3392 256 8732 221c drivers/cpufreq/speedstep-ich.o after: text data bss dec hex filename 5062 3304 192 8558 216e drivers/cpufreq/speedstep-ich.o Signed-off-by: Gustavo A. R. Silva Acked-by: Viresh Kumar Acked-by: Dominik Brodowski Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/speedstep-ich.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index b86953a3ddc4..0412a246a785 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c @@ -207,7 +207,7 @@ static unsigned int speedstep_detect_chipset(void) * 8100 which use a pretty old revision of the 82815 * host bridge. Abort on these systems. */ - static struct pci_dev *hostbridge; + struct pci_dev *hostbridge; hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_MC, -- cgit v1.2.3 From a5685781dfe9e7c2391d004f6a725042e7121c3e Mon Sep 17 00:00:00 2001 From: Shubhrajyoti Datta Date: Thu, 13 Jul 2017 11:19:10 +0200 Subject: cpufreq: dt: Add zynqmp to the cpufreq dt platdev Add zynqmp to the cpufreq dt platform device. Signed-off-by: Shubhrajyoti Datta Signed-off-by: Michal Simek Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt-platdev.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 1c262923fe58..2eb40d46d357 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -94,6 +94,7 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "ti,omap5", }, { .compatible = "xlnx,zynq-7000", }, + { .compatible = "xlnx,zynqmp", }, { .compatible = "zte,zx296718", }, -- cgit v1.2.3 From 5b60697cd89cf5a438b2984e11859228e5ec1c6b Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 12 Jul 2017 09:21:49 +0530 Subject: PM / OPP: OF: Use pr_debug() instead of pr_err() while adding OPP table Some platforms add the OPPs dynamically from platform specific drivers instead of getting them statically from DT. The cpufreq-dt driver already ignores the return value of dev_pm_opp_of_cpumask_add_table() to not error out for such cases, but we still end up printing error message from that routine. That's not nice. Convert the print message to use pr_debug() instead. Reported-by: Mason Signed-off-by: Viresh Kumar Reviewed-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp/of.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 57eec1ca0569..6f0497b377c7 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c @@ -539,8 +539,12 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) ret = dev_pm_opp_of_add_table(cpu_dev); if (ret) { - pr_err("%s: couldn't find opp table for cpu:%d, %d\n", - __func__, cpu, ret); + /* + * OPP may get registered dynamically, don't print error + * message here. + */ + pr_debug("%s: couldn't find opp table for cpu:%d, %d\n", + __func__, cpu, ret); /* Free all other OPPs */ dev_pm_opp_of_cpumask_remove_table(cpumask); -- cgit v1.2.3 From 501c574f4e3a59d30b3b915d843811485e3e702d Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Tue, 18 Jul 2017 14:01:43 +0800 Subject: cpufreq: mediatek: Add support of cpufreq to MT2701/MT7623 SoC MT2701/MT7623 is a 32-bit ARMv7 based quad-core (4 * Cortex-A7) with single cluster and this hardware is also compatible with the existing driver through enabling CPU frequency feature with operating-points-v2 bindings. Also, this driver actually supports all MediaTek SoCs, the Kconfig menu entry and file name itself should be updated with more generic name to drop "MT8173" Signed-off-by: Sean Wang Acked-by: Viresh Kumar Reviewed-by: Jean Delvare Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/Kconfig.arm | 7 +- drivers/cpufreq/Makefile | 2 +- drivers/cpufreq/mediatek-cpufreq.c | 621 +++++++++++++++++++++++++++++++++++++ drivers/cpufreq/mt8173-cpufreq.c | 619 ------------------------------------ 4 files changed, 625 insertions(+), 624 deletions(-) create mode 100644 drivers/cpufreq/mediatek-cpufreq.c delete mode 100644 drivers/cpufreq/mt8173-cpufreq.c (limited to 'drivers') diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 2011fec2d6ad..49ec1c0c395a 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -96,14 +96,13 @@ config ARM_KIRKWOOD_CPUFREQ This adds the CPUFreq driver for Marvell Kirkwood SoCs. -config ARM_MT8173_CPUFREQ - tristate "Mediatek MT8173 CPUFreq support" +config ARM_MEDIATEK_CPUFREQ + tristate "CPU Frequency scaling support for MediaTek SoCs" depends on ARCH_MEDIATEK && REGULATOR - depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) depends on !CPU_THERMAL || THERMAL select PM_OPP help - This adds the CPUFreq driver support for Mediatek MT8173 SoC. + This adds the CPUFreq driver support for MediaTek SoCs. config ARM_OMAP2PLUS_CPUFREQ bool "TI OMAP2+" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ab3a42cd29ef..51221b1e1f15 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -58,7 +58,7 @@ obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o -obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o +obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c new file mode 100644 index 000000000000..008e088fc246 --- /dev/null +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -0,0 +1,621 @@ +/* + * Copyright (c) 2015 Linaro Ltd. + * Author: Pi-Cheng Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MIN_VOLT_SHIFT (100000) +#define MAX_VOLT_SHIFT (200000) +#define MAX_VOLT_LIMIT (1150000) +#define VOLT_TOL (10000) + +/* + * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS + * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in + * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two + * voltage inputs need to be controlled under a hardware limitation: + * 100mV < Vsram - Vproc < 200mV + * + * When scaling the clock frequency of a CPU clock domain, the clock source + * needs to be switched to another stable PLL clock temporarily until + * the original PLL becomes stable at target frequency. + */ +struct mtk_cpu_dvfs_info { + struct cpumask cpus; + struct device *cpu_dev; + struct regulator *proc_reg; + struct regulator *sram_reg; + struct clk *cpu_clk; + struct clk *inter_clk; + struct thermal_cooling_device *cdev; + struct list_head list_head; + int intermediate_voltage; + bool need_voltage_tracking; +}; + +static LIST_HEAD(dvfs_info_list); + +static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu) +{ + struct mtk_cpu_dvfs_info *info; + + list_for_each_entry(info, &dvfs_info_list, list_head) { + if (cpumask_test_cpu(cpu, &info->cpus)) + return info; + } + + return NULL; +} + +static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info, + int new_vproc) +{ + struct regulator *proc_reg = info->proc_reg; + struct regulator *sram_reg = info->sram_reg; + int old_vproc, old_vsram, new_vsram, vsram, vproc, ret; + + old_vproc = regulator_get_voltage(proc_reg); + if (old_vproc < 0) { + pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc); + return old_vproc; + } + /* Vsram should not exceed the maximum allowed voltage of SoC. */ + new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT); + + if (old_vproc < new_vproc) { + /* + * When scaling up voltages, Vsram and Vproc scale up step + * by step. At each step, set Vsram to (Vproc + 200mV) first, + * then set Vproc to (Vsram - 100mV). + * Keep doing it until Vsram and Vproc hit target voltages. + */ + do { + old_vsram = regulator_get_voltage(sram_reg); + if (old_vsram < 0) { + pr_err("%s: invalid Vsram value: %d\n", + __func__, old_vsram); + return old_vsram; + } + old_vproc = regulator_get_voltage(proc_reg); + if (old_vproc < 0) { + pr_err("%s: invalid Vproc value: %d\n", + __func__, old_vproc); + return old_vproc; + } + + vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT); + + if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) { + vsram = MAX_VOLT_LIMIT; + + /* + * If the target Vsram hits the maximum voltage, + * try to set the exact voltage value first. + */ + ret = regulator_set_voltage(sram_reg, vsram, + vsram); + if (ret) + ret = regulator_set_voltage(sram_reg, + vsram - VOLT_TOL, + vsram); + + vproc = new_vproc; + } else { + ret = regulator_set_voltage(sram_reg, vsram, + vsram + VOLT_TOL); + + vproc = vsram - MIN_VOLT_SHIFT; + } + if (ret) + return ret; + + ret = regulator_set_voltage(proc_reg, vproc, + vproc + VOLT_TOL); + if (ret) { + regulator_set_voltage(sram_reg, old_vsram, + old_vsram); + return ret; + } + } while (vproc < new_vproc || vsram < new_vsram); + } else if (old_vproc > new_vproc) { + /* + * When scaling down voltages, Vsram and Vproc scale down step + * by step. At each step, set Vproc to (Vsram - 200mV) first, + * then set Vproc to (Vproc + 100mV). + * Keep doing it until Vsram and Vproc hit target voltages. + */ + do { + old_vproc = regulator_get_voltage(proc_reg); + if (old_vproc < 0) { + pr_err("%s: invalid Vproc value: %d\n", + __func__, old_vproc); + return old_vproc; + } + old_vsram = regulator_get_voltage(sram_reg); + if (old_vsram < 0) { + pr_err("%s: invalid Vsram value: %d\n", + __func__, old_vsram); + return old_vsram; + } + + vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT); + ret = regulator_set_voltage(proc_reg, vproc, + vproc + VOLT_TOL); + if (ret) + return ret; + + if (vproc == new_vproc) + vsram = new_vsram; + else + vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT); + + if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) { + vsram = MAX_VOLT_LIMIT; + + /* + * If the target Vsram hits the maximum voltage, + * try to set the exact voltage value first. + */ + ret = regulator_set_voltage(sram_reg, vsram, + vsram); + if (ret) + ret = regulator_set_voltage(sram_reg, + vsram - VOLT_TOL, + vsram); + } else { + ret = regulator_set_voltage(sram_reg, vsram, + vsram + VOLT_TOL); + } + + if (ret) { + regulator_set_voltage(proc_reg, old_vproc, + old_vproc); + return ret; + } + } while (vproc > new_vproc + VOLT_TOL || + vsram > new_vsram + VOLT_TOL); + } + + return 0; +} + +static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc) +{ + if (info->need_voltage_tracking) + return mtk_cpufreq_voltage_tracking(info, vproc); + else + return regulator_set_voltage(info->proc_reg, vproc, + vproc + VOLT_TOL); +} + +static int mtk_cpufreq_set_target(struct cpufreq_policy *policy, + unsigned int index) +{ + struct cpufreq_frequency_table *freq_table = policy->freq_table; + struct clk *cpu_clk = policy->clk; + struct clk *armpll = clk_get_parent(cpu_clk); + struct mtk_cpu_dvfs_info *info = policy->driver_data; + struct device *cpu_dev = info->cpu_dev; + struct dev_pm_opp *opp; + long freq_hz, old_freq_hz; + int vproc, old_vproc, inter_vproc, target_vproc, ret; + + inter_vproc = info->intermediate_voltage; + + old_freq_hz = clk_get_rate(cpu_clk); + old_vproc = regulator_get_voltage(info->proc_reg); + if (old_vproc < 0) { + pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc); + return old_vproc; + } + + freq_hz = freq_table[index].frequency * 1000; + + opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz); + if (IS_ERR(opp)) { + pr_err("cpu%d: failed to find OPP for %ld\n", + policy->cpu, freq_hz); + return PTR_ERR(opp); + } + vproc = dev_pm_opp_get_voltage(opp); + dev_pm_opp_put(opp); + + /* + * If the new voltage or the intermediate voltage is higher than the + * current voltage, scale up voltage first. + */ + target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc; + if (old_vproc < target_vproc) { + ret = mtk_cpufreq_set_voltage(info, target_vproc); + if (ret) { + pr_err("cpu%d: failed to scale up voltage!\n", + policy->cpu); + mtk_cpufreq_set_voltage(info, old_vproc); + return ret; + } + } + + /* Reparent the CPU clock to intermediate clock. */ + ret = clk_set_parent(cpu_clk, info->inter_clk); + if (ret) { + pr_err("cpu%d: failed to re-parent cpu clock!\n", + policy->cpu); + mtk_cpufreq_set_voltage(info, old_vproc); + WARN_ON(1); + return ret; + } + + /* Set the original PLL to target rate. */ + ret = clk_set_rate(armpll, freq_hz); + if (ret) { + pr_err("cpu%d: failed to scale cpu clock rate!\n", + policy->cpu); + clk_set_parent(cpu_clk, armpll); + mtk_cpufreq_set_voltage(info, old_vproc); + return ret; + } + + /* Set parent of CPU clock back to the original PLL. */ + ret = clk_set_parent(cpu_clk, armpll); + if (ret) { + pr_err("cpu%d: failed to re-parent cpu clock!\n", + policy->cpu); + mtk_cpufreq_set_voltage(info, inter_vproc); + WARN_ON(1); + return ret; + } + + /* + * If the new voltage is lower than the intermediate voltage or the + * original voltage, scale down to the new voltage. + */ + if (vproc < inter_vproc || vproc < old_vproc) { + ret = mtk_cpufreq_set_voltage(info, vproc); + if (ret) { + pr_err("cpu%d: failed to scale down voltage!\n", + policy->cpu); + clk_set_parent(cpu_clk, info->inter_clk); + clk_set_rate(armpll, old_freq_hz); + clk_set_parent(cpu_clk, armpll); + return ret; + } + } + + return 0; +} + +#define DYNAMIC_POWER "dynamic-power-coefficient" + +static void mtk_cpufreq_ready(struct cpufreq_policy *policy) +{ + struct mtk_cpu_dvfs_info *info = policy->driver_data; + struct device_node *np = of_node_get(info->cpu_dev->of_node); + u32 capacitance = 0; + + if (WARN_ON(!np)) + return; + + if (of_find_property(np, "#cooling-cells", NULL)) { + of_property_read_u32(np, DYNAMIC_POWER, &capacitance); + + info->cdev = of_cpufreq_power_cooling_register(np, + policy, capacitance, NULL); + + if (IS_ERR(info->cdev)) { + dev_err(info->cpu_dev, + "running cpufreq without cooling device: %ld\n", + PTR_ERR(info->cdev)); + + info->cdev = NULL; + } + } + + of_node_put(np); +} + +static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) +{ + struct device *cpu_dev; + struct regulator *proc_reg = ERR_PTR(-ENODEV); + struct regulator *sram_reg = ERR_PTR(-ENODEV); + struct clk *cpu_clk = ERR_PTR(-ENODEV); + struct clk *inter_clk = ERR_PTR(-ENODEV); + struct dev_pm_opp *opp; + unsigned long rate; + int ret; + + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("failed to get cpu%d device\n", cpu); + return -ENODEV; + } + + cpu_clk = clk_get(cpu_dev, "cpu"); + if (IS_ERR(cpu_clk)) { + if (PTR_ERR(cpu_clk) == -EPROBE_DEFER) + pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu); + else + pr_err("failed to get cpu clk for cpu%d\n", cpu); + + ret = PTR_ERR(cpu_clk); + return ret; + } + + inter_clk = clk_get(cpu_dev, "intermediate"); + if (IS_ERR(inter_clk)) { + if (PTR_ERR(inter_clk) == -EPROBE_DEFER) + pr_warn("intermediate clk for cpu%d not ready, retry.\n", + cpu); + else + pr_err("failed to get intermediate clk for cpu%d\n", + cpu); + + ret = PTR_ERR(inter_clk); + goto out_free_resources; + } + + proc_reg = regulator_get_exclusive(cpu_dev, "proc"); + if (IS_ERR(proc_reg)) { + if (PTR_ERR(proc_reg) == -EPROBE_DEFER) + pr_warn("proc regulator for cpu%d not ready, retry.\n", + cpu); + else + pr_err("failed to get proc regulator for cpu%d\n", + cpu); + + ret = PTR_ERR(proc_reg); + goto out_free_resources; + } + + /* Both presence and absence of sram regulator are valid cases. */ + sram_reg = regulator_get_exclusive(cpu_dev, "sram"); + + /* Get OPP-sharing information from "operating-points-v2" bindings */ + ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus); + if (ret) { + pr_err("failed to get OPP-sharing information for cpu%d\n", + cpu); + goto out_free_resources; + } + + ret = dev_pm_opp_of_cpumask_add_table(&info->cpus); + if (ret) { + pr_warn("no OPP table for cpu%d\n", cpu); + goto out_free_resources; + } + + /* Search a safe voltage for intermediate frequency. */ + rate = clk_get_rate(inter_clk); + opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate); + if (IS_ERR(opp)) { + pr_err("failed to get intermediate opp for cpu%d\n", cpu); + ret = PTR_ERR(opp); + goto out_free_opp_table; + } + info->intermediate_voltage = dev_pm_opp_get_voltage(opp); + dev_pm_opp_put(opp); + + info->cpu_dev = cpu_dev; + info->proc_reg = proc_reg; + info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg; + info->cpu_clk = cpu_clk; + info->inter_clk = inter_clk; + + /* + * If SRAM regulator is present, software "voltage tracking" is needed + * for this CPU power domain. + */ + info->need_voltage_tracking = !IS_ERR(sram_reg); + + return 0; + +out_free_opp_table: + dev_pm_opp_of_cpumask_remove_table(&info->cpus); + +out_free_resources: + if (!IS_ERR(proc_reg)) + regulator_put(proc_reg); + if (!IS_ERR(sram_reg)) + regulator_put(sram_reg); + if (!IS_ERR(cpu_clk)) + clk_put(cpu_clk); + if (!IS_ERR(inter_clk)) + clk_put(inter_clk); + + return ret; +} + +static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info) +{ + if (!IS_ERR(info->proc_reg)) + regulator_put(info->proc_reg); + if (!IS_ERR(info->sram_reg)) + regulator_put(info->sram_reg); + if (!IS_ERR(info->cpu_clk)) + clk_put(info->cpu_clk); + if (!IS_ERR(info->inter_clk)) + clk_put(info->inter_clk); + + dev_pm_opp_of_cpumask_remove_table(&info->cpus); +} + +static int mtk_cpufreq_init(struct cpufreq_policy *policy) +{ + struct mtk_cpu_dvfs_info *info; + struct cpufreq_frequency_table *freq_table; + int ret; + + info = mtk_cpu_dvfs_info_lookup(policy->cpu); + if (!info) { + pr_err("dvfs info for cpu%d is not initialized.\n", + policy->cpu); + return -EINVAL; + } + + ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table); + if (ret) { + pr_err("failed to init cpufreq table for cpu%d: %d\n", + policy->cpu, ret); + return ret; + } + + ret = cpufreq_table_validate_and_show(policy, freq_table); + if (ret) { + pr_err("%s: invalid frequency table: %d\n", __func__, ret); + goto out_free_cpufreq_table; + } + + cpumask_copy(policy->cpus, &info->cpus); + policy->driver_data = info; + policy->clk = info->cpu_clk; + + return 0; + +out_free_cpufreq_table: + dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table); + return ret; +} + +static int mtk_cpufreq_exit(struct cpufreq_policy *policy) +{ + struct mtk_cpu_dvfs_info *info = policy->driver_data; + + cpufreq_cooling_unregister(info->cdev); + dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table); + + return 0; +} + +static struct cpufreq_driver mt8173_cpufreq_driver = { + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + CPUFREQ_HAVE_GOVERNOR_PER_POLICY, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = mtk_cpufreq_set_target, + .get = cpufreq_generic_get, + .init = mtk_cpufreq_init, + .exit = mtk_cpufreq_exit, + .ready = mtk_cpufreq_ready, + .name = "mtk-cpufreq", + .attr = cpufreq_generic_attr, +}; + +static int mt8173_cpufreq_probe(struct platform_device *pdev) +{ + struct mtk_cpu_dvfs_info *info, *tmp; + int cpu, ret; + + for_each_possible_cpu(cpu) { + info = mtk_cpu_dvfs_info_lookup(cpu); + if (info) + continue; + + info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto release_dvfs_info_list; + } + + ret = mtk_cpu_dvfs_info_init(info, cpu); + if (ret) { + dev_err(&pdev->dev, + "failed to initialize dvfs info for cpu%d\n", + cpu); + goto release_dvfs_info_list; + } + + list_add(&info->list_head, &dvfs_info_list); + } + + ret = cpufreq_register_driver(&mt8173_cpufreq_driver); + if (ret) { + dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n"); + goto release_dvfs_info_list; + } + + return 0; + +release_dvfs_info_list: + list_for_each_entry_safe(info, tmp, &dvfs_info_list, list_head) { + mtk_cpu_dvfs_info_release(info); + list_del(&info->list_head); + } + + return ret; +} + +static struct platform_driver mt8173_cpufreq_platdrv = { + .driver = { + .name = "mt8173-cpufreq", + }, + .probe = mt8173_cpufreq_probe, +}; + +/* List of machines supported by this driver */ +static const struct of_device_id mt8173_cpufreq_machines[] __initconst = { + { .compatible = "mediatek,mt2701", }, + { .compatible = "mediatek,mt7623", }, + { .compatible = "mediatek,mt817x", }, + { .compatible = "mediatek,mt8173", }, + { .compatible = "mediatek,mt8176", }, + + { } +}; + +static int __init mt8173_cpufreq_driver_init(void) +{ + struct device_node *np; + const struct of_device_id *match; + struct platform_device *pdev; + int err; + + np = of_find_node_by_path("/"); + if (!np) + return -ENODEV; + + match = of_match_node(mt8173_cpufreq_machines, np); + of_node_put(np); + if (!match) { + pr_warn("Machine is not compatible with mt8173-cpufreq\n"); + return -ENODEV; + } + + err = platform_driver_register(&mt8173_cpufreq_platdrv); + if (err) + return err; + + /* + * Since there's no place to hold device registration code and no + * device tree based way to match cpufreq driver yet, both the driver + * and the device registration codes are put here to handle defer + * probing. + */ + pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0); + if (IS_ERR(pdev)) { + pr_err("failed to register mtk-cpufreq platform device\n"); + return PTR_ERR(pdev); + } + + return 0; +} +device_initcall(mt8173_cpufreq_driver_init); diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c deleted file mode 100644 index f9f00fb4bc3a..000000000000 --- a/drivers/cpufreq/mt8173-cpufreq.c +++ /dev/null @@ -1,619 +0,0 @@ -/* - * Copyright (c) 2015 Linaro Ltd. - * Author: Pi-Cheng Chen - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MIN_VOLT_SHIFT (100000) -#define MAX_VOLT_SHIFT (200000) -#define MAX_VOLT_LIMIT (1150000) -#define VOLT_TOL (10000) - -/* - * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS - * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in - * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two - * voltage inputs need to be controlled under a hardware limitation: - * 100mV < Vsram - Vproc < 200mV - * - * When scaling the clock frequency of a CPU clock domain, the clock source - * needs to be switched to another stable PLL clock temporarily until - * the original PLL becomes stable at target frequency. - */ -struct mtk_cpu_dvfs_info { - struct cpumask cpus; - struct device *cpu_dev; - struct regulator *proc_reg; - struct regulator *sram_reg; - struct clk *cpu_clk; - struct clk *inter_clk; - struct thermal_cooling_device *cdev; - struct list_head list_head; - int intermediate_voltage; - bool need_voltage_tracking; -}; - -static LIST_HEAD(dvfs_info_list); - -static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu) -{ - struct mtk_cpu_dvfs_info *info; - - list_for_each_entry(info, &dvfs_info_list, list_head) { - if (cpumask_test_cpu(cpu, &info->cpus)) - return info; - } - - return NULL; -} - -static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info, - int new_vproc) -{ - struct regulator *proc_reg = info->proc_reg; - struct regulator *sram_reg = info->sram_reg; - int old_vproc, old_vsram, new_vsram, vsram, vproc, ret; - - old_vproc = regulator_get_voltage(proc_reg); - if (old_vproc < 0) { - pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc); - return old_vproc; - } - /* Vsram should not exceed the maximum allowed voltage of SoC. */ - new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT); - - if (old_vproc < new_vproc) { - /* - * When scaling up voltages, Vsram and Vproc scale up step - * by step. At each step, set Vsram to (Vproc + 200mV) first, - * then set Vproc to (Vsram - 100mV). - * Keep doing it until Vsram and Vproc hit target voltages. - */ - do { - old_vsram = regulator_get_voltage(sram_reg); - if (old_vsram < 0) { - pr_err("%s: invalid Vsram value: %d\n", - __func__, old_vsram); - return old_vsram; - } - old_vproc = regulator_get_voltage(proc_reg); - if (old_vproc < 0) { - pr_err("%s: invalid Vproc value: %d\n", - __func__, old_vproc); - return old_vproc; - } - - vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT); - - if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) { - vsram = MAX_VOLT_LIMIT; - - /* - * If the target Vsram hits the maximum voltage, - * try to set the exact voltage value first. - */ - ret = regulator_set_voltage(sram_reg, vsram, - vsram); - if (ret) - ret = regulator_set_voltage(sram_reg, - vsram - VOLT_TOL, - vsram); - - vproc = new_vproc; - } else { - ret = regulator_set_voltage(sram_reg, vsram, - vsram + VOLT_TOL); - - vproc = vsram - MIN_VOLT_SHIFT; - } - if (ret) - return ret; - - ret = regulator_set_voltage(proc_reg, vproc, - vproc + VOLT_TOL); - if (ret) { - regulator_set_voltage(sram_reg, old_vsram, - old_vsram); - return ret; - } - } while (vproc < new_vproc || vsram < new_vsram); - } else if (old_vproc > new_vproc) { - /* - * When scaling down voltages, Vsram and Vproc scale down step - * by step. At each step, set Vproc to (Vsram - 200mV) first, - * then set Vproc to (Vproc + 100mV). - * Keep doing it until Vsram and Vproc hit target voltages. - */ - do { - old_vproc = regulator_get_voltage(proc_reg); - if (old_vproc < 0) { - pr_err("%s: invalid Vproc value: %d\n", - __func__, old_vproc); - return old_vproc; - } - old_vsram = regulator_get_voltage(sram_reg); - if (old_vsram < 0) { - pr_err("%s: invalid Vsram value: %d\n", - __func__, old_vsram); - return old_vsram; - } - - vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT); - ret = regulator_set_voltage(proc_reg, vproc, - vproc + VOLT_TOL); - if (ret) - return ret; - - if (vproc == new_vproc) - vsram = new_vsram; - else - vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT); - - if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) { - vsram = MAX_VOLT_LIMIT; - - /* - * If the target Vsram hits the maximum voltage, - * try to set the exact voltage value first. - */ - ret = regulator_set_voltage(sram_reg, vsram, - vsram); - if (ret) - ret = regulator_set_voltage(sram_reg, - vsram - VOLT_TOL, - vsram); - } else { - ret = regulator_set_voltage(sram_reg, vsram, - vsram + VOLT_TOL); - } - - if (ret) { - regulator_set_voltage(proc_reg, old_vproc, - old_vproc); - return ret; - } - } while (vproc > new_vproc + VOLT_TOL || - vsram > new_vsram + VOLT_TOL); - } - - return 0; -} - -static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc) -{ - if (info->need_voltage_tracking) - return mtk_cpufreq_voltage_tracking(info, vproc); - else - return regulator_set_voltage(info->proc_reg, vproc, - vproc + VOLT_TOL); -} - -static int mtk_cpufreq_set_target(struct cpufreq_policy *policy, - unsigned int index) -{ - struct cpufreq_frequency_table *freq_table = policy->freq_table; - struct clk *cpu_clk = policy->clk; - struct clk *armpll = clk_get_parent(cpu_clk); - struct mtk_cpu_dvfs_info *info = policy->driver_data; - struct device *cpu_dev = info->cpu_dev; - struct dev_pm_opp *opp; - long freq_hz, old_freq_hz; - int vproc, old_vproc, inter_vproc, target_vproc, ret; - - inter_vproc = info->intermediate_voltage; - - old_freq_hz = clk_get_rate(cpu_clk); - old_vproc = regulator_get_voltage(info->proc_reg); - if (old_vproc < 0) { - pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc); - return old_vproc; - } - - freq_hz = freq_table[index].frequency * 1000; - - opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz); - if (IS_ERR(opp)) { - pr_err("cpu%d: failed to find OPP for %ld\n", - policy->cpu, freq_hz); - return PTR_ERR(opp); - } - vproc = dev_pm_opp_get_voltage(opp); - dev_pm_opp_put(opp); - - /* - * If the new voltage or the intermediate voltage is higher than the - * current voltage, scale up voltage first. - */ - target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc; - if (old_vproc < target_vproc) { - ret = mtk_cpufreq_set_voltage(info, target_vproc); - if (ret) { - pr_err("cpu%d: failed to scale up voltage!\n", - policy->cpu); - mtk_cpufreq_set_voltage(info, old_vproc); - return ret; - } - } - - /* Reparent the CPU clock to intermediate clock. */ - ret = clk_set_parent(cpu_clk, info->inter_clk); - if (ret) { - pr_err("cpu%d: failed to re-parent cpu clock!\n", - policy->cpu); - mtk_cpufreq_set_voltage(info, old_vproc); - WARN_ON(1); - return ret; - } - - /* Set the original PLL to target rate. */ - ret = clk_set_rate(armpll, freq_hz); - if (ret) { - pr_err("cpu%d: failed to scale cpu clock rate!\n", - policy->cpu); - clk_set_parent(cpu_clk, armpll); - mtk_cpufreq_set_voltage(info, old_vproc); - return ret; - } - - /* Set parent of CPU clock back to the original PLL. */ - ret = clk_set_parent(cpu_clk, armpll); - if (ret) { - pr_err("cpu%d: failed to re-parent cpu clock!\n", - policy->cpu); - mtk_cpufreq_set_voltage(info, inter_vproc); - WARN_ON(1); - return ret; - } - - /* - * If the new voltage is lower than the intermediate voltage or the - * original voltage, scale down to the new voltage. - */ - if (vproc < inter_vproc || vproc < old_vproc) { - ret = mtk_cpufreq_set_voltage(info, vproc); - if (ret) { - pr_err("cpu%d: failed to scale down voltage!\n", - policy->cpu); - clk_set_parent(cpu_clk, info->inter_clk); - clk_set_rate(armpll, old_freq_hz); - clk_set_parent(cpu_clk, armpll); - return ret; - } - } - - return 0; -} - -#define DYNAMIC_POWER "dynamic-power-coefficient" - -static void mtk_cpufreq_ready(struct cpufreq_policy *policy) -{ - struct mtk_cpu_dvfs_info *info = policy->driver_data; - struct device_node *np = of_node_get(info->cpu_dev->of_node); - u32 capacitance = 0; - - if (WARN_ON(!np)) - return; - - if (of_find_property(np, "#cooling-cells", NULL)) { - of_property_read_u32(np, DYNAMIC_POWER, &capacitance); - - info->cdev = of_cpufreq_power_cooling_register(np, - policy, capacitance, NULL); - - if (IS_ERR(info->cdev)) { - dev_err(info->cpu_dev, - "running cpufreq without cooling device: %ld\n", - PTR_ERR(info->cdev)); - - info->cdev = NULL; - } - } - - of_node_put(np); -} - -static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) -{ - struct device *cpu_dev; - struct regulator *proc_reg = ERR_PTR(-ENODEV); - struct regulator *sram_reg = ERR_PTR(-ENODEV); - struct clk *cpu_clk = ERR_PTR(-ENODEV); - struct clk *inter_clk = ERR_PTR(-ENODEV); - struct dev_pm_opp *opp; - unsigned long rate; - int ret; - - cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) { - pr_err("failed to get cpu%d device\n", cpu); - return -ENODEV; - } - - cpu_clk = clk_get(cpu_dev, "cpu"); - if (IS_ERR(cpu_clk)) { - if (PTR_ERR(cpu_clk) == -EPROBE_DEFER) - pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu); - else - pr_err("failed to get cpu clk for cpu%d\n", cpu); - - ret = PTR_ERR(cpu_clk); - return ret; - } - - inter_clk = clk_get(cpu_dev, "intermediate"); - if (IS_ERR(inter_clk)) { - if (PTR_ERR(inter_clk) == -EPROBE_DEFER) - pr_warn("intermediate clk for cpu%d not ready, retry.\n", - cpu); - else - pr_err("failed to get intermediate clk for cpu%d\n", - cpu); - - ret = PTR_ERR(inter_clk); - goto out_free_resources; - } - - proc_reg = regulator_get_exclusive(cpu_dev, "proc"); - if (IS_ERR(proc_reg)) { - if (PTR_ERR(proc_reg) == -EPROBE_DEFER) - pr_warn("proc regulator for cpu%d not ready, retry.\n", - cpu); - else - pr_err("failed to get proc regulator for cpu%d\n", - cpu); - - ret = PTR_ERR(proc_reg); - goto out_free_resources; - } - - /* Both presence and absence of sram regulator are valid cases. */ - sram_reg = regulator_get_exclusive(cpu_dev, "sram"); - - /* Get OPP-sharing information from "operating-points-v2" bindings */ - ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus); - if (ret) { - pr_err("failed to get OPP-sharing information for cpu%d\n", - cpu); - goto out_free_resources; - } - - ret = dev_pm_opp_of_cpumask_add_table(&info->cpus); - if (ret) { - pr_warn("no OPP table for cpu%d\n", cpu); - goto out_free_resources; - } - - /* Search a safe voltage for intermediate frequency. */ - rate = clk_get_rate(inter_clk); - opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate); - if (IS_ERR(opp)) { - pr_err("failed to get intermediate opp for cpu%d\n", cpu); - ret = PTR_ERR(opp); - goto out_free_opp_table; - } - info->intermediate_voltage = dev_pm_opp_get_voltage(opp); - dev_pm_opp_put(opp); - - info->cpu_dev = cpu_dev; - info->proc_reg = proc_reg; - info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg; - info->cpu_clk = cpu_clk; - info->inter_clk = inter_clk; - - /* - * If SRAM regulator is present, software "voltage tracking" is needed - * for this CPU power domain. - */ - info->need_voltage_tracking = !IS_ERR(sram_reg); - - return 0; - -out_free_opp_table: - dev_pm_opp_of_cpumask_remove_table(&info->cpus); - -out_free_resources: - if (!IS_ERR(proc_reg)) - regulator_put(proc_reg); - if (!IS_ERR(sram_reg)) - regulator_put(sram_reg); - if (!IS_ERR(cpu_clk)) - clk_put(cpu_clk); - if (!IS_ERR(inter_clk)) - clk_put(inter_clk); - - return ret; -} - -static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info) -{ - if (!IS_ERR(info->proc_reg)) - regulator_put(info->proc_reg); - if (!IS_ERR(info->sram_reg)) - regulator_put(info->sram_reg); - if (!IS_ERR(info->cpu_clk)) - clk_put(info->cpu_clk); - if (!IS_ERR(info->inter_clk)) - clk_put(info->inter_clk); - - dev_pm_opp_of_cpumask_remove_table(&info->cpus); -} - -static int mtk_cpufreq_init(struct cpufreq_policy *policy) -{ - struct mtk_cpu_dvfs_info *info; - struct cpufreq_frequency_table *freq_table; - int ret; - - info = mtk_cpu_dvfs_info_lookup(policy->cpu); - if (!info) { - pr_err("dvfs info for cpu%d is not initialized.\n", - policy->cpu); - return -EINVAL; - } - - ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table); - if (ret) { - pr_err("failed to init cpufreq table for cpu%d: %d\n", - policy->cpu, ret); - return ret; - } - - ret = cpufreq_table_validate_and_show(policy, freq_table); - if (ret) { - pr_err("%s: invalid frequency table: %d\n", __func__, ret); - goto out_free_cpufreq_table; - } - - cpumask_copy(policy->cpus, &info->cpus); - policy->driver_data = info; - policy->clk = info->cpu_clk; - - return 0; - -out_free_cpufreq_table: - dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table); - return ret; -} - -static int mtk_cpufreq_exit(struct cpufreq_policy *policy) -{ - struct mtk_cpu_dvfs_info *info = policy->driver_data; - - cpufreq_cooling_unregister(info->cdev); - dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table); - - return 0; -} - -static struct cpufreq_driver mt8173_cpufreq_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | - CPUFREQ_HAVE_GOVERNOR_PER_POLICY, - .verify = cpufreq_generic_frequency_table_verify, - .target_index = mtk_cpufreq_set_target, - .get = cpufreq_generic_get, - .init = mtk_cpufreq_init, - .exit = mtk_cpufreq_exit, - .ready = mtk_cpufreq_ready, - .name = "mtk-cpufreq", - .attr = cpufreq_generic_attr, -}; - -static int mt8173_cpufreq_probe(struct platform_device *pdev) -{ - struct mtk_cpu_dvfs_info *info, *tmp; - int cpu, ret; - - for_each_possible_cpu(cpu) { - info = mtk_cpu_dvfs_info_lookup(cpu); - if (info) - continue; - - info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); - if (!info) { - ret = -ENOMEM; - goto release_dvfs_info_list; - } - - ret = mtk_cpu_dvfs_info_init(info, cpu); - if (ret) { - dev_err(&pdev->dev, - "failed to initialize dvfs info for cpu%d\n", - cpu); - goto release_dvfs_info_list; - } - - list_add(&info->list_head, &dvfs_info_list); - } - - ret = cpufreq_register_driver(&mt8173_cpufreq_driver); - if (ret) { - dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n"); - goto release_dvfs_info_list; - } - - return 0; - -release_dvfs_info_list: - list_for_each_entry_safe(info, tmp, &dvfs_info_list, list_head) { - mtk_cpu_dvfs_info_release(info); - list_del(&info->list_head); - } - - return ret; -} - -static struct platform_driver mt8173_cpufreq_platdrv = { - .driver = { - .name = "mt8173-cpufreq", - }, - .probe = mt8173_cpufreq_probe, -}; - -/* List of machines supported by this driver */ -static const struct of_device_id mt8173_cpufreq_machines[] __initconst = { - { .compatible = "mediatek,mt817x", }, - { .compatible = "mediatek,mt8173", }, - { .compatible = "mediatek,mt8176", }, - - { } -}; - -static int __init mt8173_cpufreq_driver_init(void) -{ - struct device_node *np; - const struct of_device_id *match; - struct platform_device *pdev; - int err; - - np = of_find_node_by_path("/"); - if (!np) - return -ENODEV; - - match = of_match_node(mt8173_cpufreq_machines, np); - of_node_put(np); - if (!match) { - pr_warn("Machine is not compatible with mt8173-cpufreq\n"); - return -ENODEV; - } - - err = platform_driver_register(&mt8173_cpufreq_platdrv); - if (err) - return err; - - /* - * Since there's no place to hold device registration code and no - * device tree based way to match cpufreq driver yet, both the driver - * and the device registration codes are put here to handle defer - * probing. - */ - pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0); - if (IS_ERR(pdev)) { - pr_err("failed to register mtk-cpufreq platform device\n"); - return PTR_ERR(pdev); - } - - return 0; -} -device_initcall(mt8173_cpufreq_driver_init); -- cgit v1.2.3 From 9dbd224f9e4e3285a1aba4c3c5683cee20e3c30c Mon Sep 17 00:00:00 2001 From: Marc Gonzalez Date: Tue, 18 Jul 2017 18:48:39 +0200 Subject: cpufreq: dt: Don't use generic platdev driver for tango On tango platforms, firmware configures the CPU clock, and Linux is then only allowed to use the cpu_clk_divider to change the frequency. Build the OPP table dynamically at init, in order to support whatever firmware throws at us. Signed-off-by: Marc Gonzalez Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/Kconfig.arm | 5 +++++ drivers/cpufreq/Makefile | 1 + drivers/cpufreq/cpufreq-dt-platdev.c | 2 -- drivers/cpufreq/tango-cpufreq.c | 38 ++++++++++++++++++++++++++++++++++++ 4 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 drivers/cpufreq/tango-cpufreq.c (limited to 'drivers') diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 49ec1c0c395a..4c277d11831b 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -241,6 +241,11 @@ config ARM_STI_CPUFREQ this config option if you wish to add CPUFreq support for STi based SoCs. +config ARM_TANGO_CPUFREQ + bool + depends on CPUFREQ_DT && ARCH_TANGO + default y + config ARM_TEGRA20_CPUFREQ bool "Tegra20 CPUFreq support" depends on ARCH_TEGRA diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 51221b1e1f15..227922b0d42c 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -75,6 +75,7 @@ obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o +obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 2eb40d46d357..096aea7fcb67 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -80,8 +80,6 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "rockchip,rk3368", }, { .compatible = "rockchip,rk3399", }, - { .compatible = "sigma,tango4" }, - { .compatible = "socionext,uniphier-pro5", }, { .compatible = "socionext,uniphier-pxs2", }, { .compatible = "socionext,uniphier-ld6b", }, diff --git a/drivers/cpufreq/tango-cpufreq.c b/drivers/cpufreq/tango-cpufreq.c new file mode 100644 index 000000000000..89a7f860bfe8 --- /dev/null +++ b/drivers/cpufreq/tango-cpufreq.c @@ -0,0 +1,38 @@ +#include +#include +#include +#include +#include + +static const struct of_device_id machines[] __initconst = { + { .compatible = "sigma,tango4" }, + { /* sentinel */ } +}; + +static int __init tango_cpufreq_init(void) +{ + struct device *cpu_dev = get_cpu_device(0); + unsigned long max_freq; + struct clk *cpu_clk; + void *res; + + if (!of_match_node(machines, of_root)) + return -ENODEV; + + cpu_clk = clk_get(cpu_dev, NULL); + if (IS_ERR(cpu_clk)) + return -ENODEV; + + max_freq = clk_get_rate(cpu_clk); + + dev_pm_opp_add(cpu_dev, max_freq / 1, 0); + dev_pm_opp_add(cpu_dev, max_freq / 2, 0); + dev_pm_opp_add(cpu_dev, max_freq / 3, 0); + dev_pm_opp_add(cpu_dev, max_freq / 5, 0); + dev_pm_opp_add(cpu_dev, max_freq / 9, 0); + + res = platform_device_register_data(NULL, "cpufreq-dt", -1, NULL, 0); + + return PTR_ERR_OR_ZERO(res); +} +device_initcall(tango_cpufreq_init); -- cgit v1.2.3 From 2d045036322c29b69c22f06530f1130338d06373 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 19 Jul 2017 15:42:41 +0530 Subject: cpufreq: governor: Drop min_sampling_rate The cpufreq core and governors aren't supposed to set a limit on how fast we want to try changing the frequency. This is currently done for the legacy governors with help of min_sampling_rate. At worst, we may end up setting the sampling rate to a value lower than the rate at which frequency can be changed and then one of the CPUs in the policy will be only changing frequency for ever. But that is something for the user to decide and there is no need to have special handling for such cases in the core. Leave it for the user to figure out. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq_conservative.c | 6 ------ drivers/cpufreq/cpufreq_governor.c | 10 ++-------- drivers/cpufreq/cpufreq_governor.h | 1 - drivers/cpufreq/cpufreq_ondemand.c | 12 ------------ 4 files changed, 2 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 88220ff3e1c2..f20f20a77d4d 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -246,7 +246,6 @@ gov_show_one_common(sampling_rate); gov_show_one_common(sampling_down_factor); gov_show_one_common(up_threshold); gov_show_one_common(ignore_nice_load); -gov_show_one_common(min_sampling_rate); gov_show_one(cs, down_threshold); gov_show_one(cs, freq_step); @@ -254,12 +253,10 @@ gov_attr_rw(sampling_rate); gov_attr_rw(sampling_down_factor); gov_attr_rw(up_threshold); gov_attr_rw(ignore_nice_load); -gov_attr_ro(min_sampling_rate); gov_attr_rw(down_threshold); gov_attr_rw(freq_step); static struct attribute *cs_attributes[] = { - &min_sampling_rate.attr, &sampling_rate.attr, &sampling_down_factor.attr, &up_threshold.attr, @@ -297,10 +294,7 @@ static int cs_init(struct dbs_data *dbs_data) dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; dbs_data->ignore_nice_load = 0; - dbs_data->tuners = tuners; - dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * - jiffies_to_usecs(10); return 0; } diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 47e24b5384b3..858081f9c3d7 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -47,14 +47,11 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, { struct dbs_data *dbs_data = to_dbs_data(attr_set); struct policy_dbs_info *policy_dbs; - unsigned int rate; int ret; - ret = sscanf(buf, "%u", &rate); + ret = sscanf(buf, "%u", &dbs_data->sampling_rate); if (ret != 1) return -EINVAL; - dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate); - /* * We are operating under dbs_data->mutex and so the list and its * entries can't be freed concurrently. @@ -437,10 +434,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) latency = 1; /* Bring kernel and HW constraints together */ - dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_data->sampling_rate = max(dbs_data->min_sampling_rate, - LATENCY_MULTIPLIER * latency); + dbs_data->sampling_rate = LATENCY_MULTIPLIER * latency; if (!have_governor_per_policy()) gov->gdbs_data = dbs_data; diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 0236ec2cd654..95f207eb820e 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -41,7 +41,6 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; struct dbs_data { struct gov_attr_set attr_set; void *tuners; - unsigned int min_sampling_rate; unsigned int ignore_nice_load; unsigned int sampling_rate; unsigned int sampling_down_factor; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 3937acf7e026..6b423eebfd5d 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -319,7 +319,6 @@ gov_show_one_common(sampling_rate); gov_show_one_common(up_threshold); gov_show_one_common(sampling_down_factor); gov_show_one_common(ignore_nice_load); -gov_show_one_common(min_sampling_rate); gov_show_one_common(io_is_busy); gov_show_one(od, powersave_bias); @@ -329,10 +328,8 @@ gov_attr_rw(up_threshold); gov_attr_rw(sampling_down_factor); gov_attr_rw(ignore_nice_load); gov_attr_rw(powersave_bias); -gov_attr_ro(min_sampling_rate); static struct attribute *od_attributes[] = { - &min_sampling_rate.attr, &sampling_rate.attr, &up_threshold.attr, &sampling_down_factor.attr, @@ -373,17 +370,8 @@ static int od_init(struct dbs_data *dbs_data) if (idle_time != -1ULL) { /* Idle micro accounting is supported. Use finer thresholds */ dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - /* - * In nohz/micro accounting case we set the minimum frequency - * not depending on HZ, but fixed (very low). - */ - dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; } else { dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; - - /* For correct statistics, we need 10 ticks for each measure */ - dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * - jiffies_to_usecs(10); } dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; -- cgit v1.2.3 From aa7519af450d3c62a057aece24877c34562fa25a Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 19 Jul 2017 15:42:42 +0530 Subject: cpufreq: Use transition_delay_us for legacy governors as well The policy->transition_delay_us field is used only by the schedutil governor currently, and this field describes how fast the driver wants the cpufreq governor to change CPUs frequency. It should rather be a common thing across all governors, as it doesn't have any schedutil dependency here. Create a new helper cpufreq_policy_transition_delay_us() to get the transition delay across all governors. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 15 +++++++++++++++ drivers/cpufreq/cpufreq_governor.c | 9 +-------- 2 files changed, 16 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 9bf97a366029..c426d21822f7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -524,6 +524,21 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, } EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); +unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy) +{ + unsigned int latency; + + if (policy->transition_delay_us) + return policy->transition_delay_us; + + latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC; + if (latency) + return latency * LATENCY_MULTIPLIER; + + return LATENCY_MULTIPLIER; +} +EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us); + /********************************************************************* * SYSFS INTERFACE * *********************************************************************/ diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 858081f9c3d7..eed069ecfd5e 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -389,7 +389,6 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) struct dbs_governor *gov = dbs_governor_of(policy); struct dbs_data *dbs_data; struct policy_dbs_info *policy_dbs; - unsigned int latency; int ret = 0; /* State should be equivalent to EXIT */ @@ -428,13 +427,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) if (ret) goto free_policy_dbs_info; - /* policy latency is in ns. Convert it to us first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - /* Bring kernel and HW constraints together */ - dbs_data->sampling_rate = LATENCY_MULTIPLIER * latency; + dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy); if (!have_governor_per_policy()) gov->gdbs_data = dbs_data; -- cgit v1.2.3 From 8d8b2441db9647890251538f60b75a4e45fdef8d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 19 Jul 2017 02:38:44 +0200 Subject: PM / sleep: Do not print debug messages by default Debug messages from the system suspend/hibernation infrastructure can fill up the entire kernel log buffer in some cases and anyway they are only useful for debugging. They depend on CONFIG_PM_DEBUG, but that is set as a rule as some generally useful diagnostic facilities depend on it too. For this reason, avoid printing those messages by default, but make it possible to turn them on as needed with the help of a new sysfs attribute under /sys/power/. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/main.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c99f8730de82..4f5fc212d684 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -418,7 +418,6 @@ static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, dev_name(dev), pm_verb(state.event), info, error); } -#ifdef CONFIG_PM_DEBUG static void dpm_show_time(ktime_t starttime, pm_message_t state, const char *info) { @@ -432,14 +431,11 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, usecs = usecs64; if (usecs == 0) usecs = 1; - pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", - info ?: "", info ? " " : "", pm_verb(state.event), - usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); + + pm_pr_dbg("%s%s%s of devices complete after %ld.%03ld msecs\n", + info ?: "", info ? " " : "", pm_verb(state.event), + usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); } -#else -static inline void dpm_show_time(ktime_t starttime, pm_message_t state, - const char *info) {} -#endif /* CONFIG_PM_DEBUG */ static int dpm_run_callback(pm_callback_t cb, struct device *dev, pm_message_t state, const char *info) -- cgit v1.2.3 From afece3ab9a3640f9c1e064f56c8cdd4d783f6144 Mon Sep 17 00:00:00 2001 From: Thara Gopinath Date: Fri, 14 Jul 2017 13:10:15 -0400 Subject: PM / Domains: Add time accounting to various genpd states This patch adds support to calculate the time spent by the generic power domains in on and various idle states. Signed-off-by: Thara Gopinath Acked-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'drivers') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 60303aa28587..431488914982 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -209,6 +209,34 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } +#ifdef CONFIG_DEBUG_FS +static void genpd_update_accounting(struct generic_pm_domain *genpd) +{ + ktime_t delta, now; + + now = ktime_get(); + delta = ktime_sub(now, genpd->accounting_time); + + /* + * If genpd->status is active, it means we are just + * out of off and so update the idle time and vice + * versa. + */ + if (genpd->status == GPD_STATE_ACTIVE) { + int state_idx = genpd->state_idx; + + genpd->states[state_idx].idle_time = + ktime_add(genpd->states[state_idx].idle_time, delta); + } else { + genpd->on_time = ktime_add(genpd->on_time, delta); + } + + genpd->accounting_time = now; +} +#else +static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} +#endif + static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; @@ -361,6 +389,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, } genpd->status = GPD_STATE_POWER_OFF; + genpd_update_accounting(genpd); list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); @@ -413,6 +442,8 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) goto err; genpd->status = GPD_STATE_ACTIVE; + genpd_update_accounting(genpd); + return 0; err: @@ -1540,6 +1571,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, genpd->max_off_time_changed = true; genpd->provider = NULL; genpd->has_provider = false; + genpd->accounting_time = ktime_get(); genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; genpd->domain.ops.runtime_resume = genpd_runtime_resume; genpd->domain.ops.prepare = pm_genpd_prepare; -- cgit v1.2.3 From b6a1d093f96b6357080aaaec617fae198eee2783 Mon Sep 17 00:00:00 2001 From: Thara Gopinath Date: Fri, 14 Jul 2017 13:10:16 -0400 Subject: PM / Domains: Extend generic power domain debugfs This patch extends the existing generic power domain debugfs. Changes involve the following - Introduce a unique debugfs entry for each generic power domain with the following attributes - current_state - Displays current state of the domain. - devices - Displays the devices associated with this domain. - sub_domains - Displays the sub power domains. - active_time - Displays the time the domain was in active state in ms. - total_idle_time - Displays the time the domain was in any of the idle states in ms. - idle_states - Displays the various idle states and the time spent in each idle state in ms. Signed-off-by: Thara Gopinath Acked-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 205 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 195 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 431488914982..43fd08e50ae9 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2359,7 +2359,7 @@ exit: return 0; } -static int pm_genpd_summary_show(struct seq_file *s, void *data) +static int genpd_summary_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd; int ret = 0; @@ -2382,21 +2382,187 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) return ret; } -static int pm_genpd_summary_open(struct inode *inode, struct file *file) +static int genpd_status_show(struct seq_file *s, void *data) { - return single_open(file, pm_genpd_summary_show, NULL); + static const char * const status_lookup[] = { + [GPD_STATE_ACTIVE] = "on", + [GPD_STATE_POWER_OFF] = "off" + }; + + struct generic_pm_domain *genpd = s->private; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) + goto exit; + + if (genpd->status == GPD_STATE_POWER_OFF) + seq_printf(s, "%s-%u\n", status_lookup[genpd->status], + genpd->state_idx); + else + seq_printf(s, "%s\n", status_lookup[genpd->status]); +exit: + genpd_unlock(genpd); + return ret; } -static const struct file_operations pm_genpd_summary_fops = { - .open = pm_genpd_summary_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +static int genpd_sub_domains_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + struct gpd_link *link; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(link, &genpd->master_links, master_node) + seq_printf(s, "%s\n", link->slave->name); + + genpd_unlock(genpd); + return ret; +} + +static int genpd_idle_states_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + unsigned int i; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + seq_puts(s, "State Time Spent(ms)\n"); + + for (i = 0; i < genpd->state_count; i++) { + ktime_t delta = 0; + s64 msecs; + + if ((genpd->status == GPD_STATE_POWER_OFF) && + (genpd->state_idx == i)) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + msecs = ktime_to_ms( + ktime_add(genpd->states[i].idle_time, delta)); + seq_printf(s, "S%-13i %lld\n", i, msecs); + } + + genpd_unlock(genpd); + return ret; +} + +static int genpd_active_time_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + ktime_t delta = 0; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (genpd->status == GPD_STATE_ACTIVE) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + seq_printf(s, "%lld ms\n", ktime_to_ms( + ktime_add(genpd->on_time, delta))); + + genpd_unlock(genpd); + return ret; +} + +static int genpd_total_idle_time_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + ktime_t delta = 0, total = 0; + unsigned int i; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + for (i = 0; i < genpd->state_count; i++) { + + if ((genpd->status == GPD_STATE_POWER_OFF) && + (genpd->state_idx == i)) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + total = ktime_add(total, genpd->states[i].idle_time); + } + total = ktime_add(total, delta); + + seq_printf(s, "%lld ms\n", ktime_to_ms(total)); + + genpd_unlock(genpd); + return ret; +} + + +static int genpd_devices_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + struct pm_domain_data *pm_data; + const char *kobj_path; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(pm_data, &genpd->dev_list, list_node) { + kobj_path = kobject_get_path(&pm_data->dev->kobj, + genpd_is_irq_safe(genpd) ? + GFP_ATOMIC : GFP_KERNEL); + if (kobj_path == NULL) + continue; + + seq_printf(s, "%s\n", kobj_path); + kfree(kobj_path); + } + + genpd_unlock(genpd); + return ret; +} + +#define define_genpd_open_function(name) \ +static int genpd_##name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, genpd_##name##_show, inode->i_private); \ +} + +define_genpd_open_function(summary); +define_genpd_open_function(status); +define_genpd_open_function(sub_domains); +define_genpd_open_function(idle_states); +define_genpd_open_function(active_time); +define_genpd_open_function(total_idle_time); +define_genpd_open_function(devices); + +#define define_genpd_debugfs_fops(name) \ +static const struct file_operations genpd_##name##_fops = { \ + .open = genpd_##name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +define_genpd_debugfs_fops(summary); +define_genpd_debugfs_fops(status); +define_genpd_debugfs_fops(sub_domains); +define_genpd_debugfs_fops(idle_states); +define_genpd_debugfs_fops(active_time); +define_genpd_debugfs_fops(total_idle_time); +define_genpd_debugfs_fops(devices); static int __init pm_genpd_debug_init(void) { struct dentry *d; + struct generic_pm_domain *genpd; pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); @@ -2404,10 +2570,29 @@ static int __init pm_genpd_debug_init(void) return -ENOMEM; d = debugfs_create_file("pm_genpd_summary", S_IRUGO, - pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); + pm_genpd_debugfs_dir, NULL, &genpd_summary_fops); if (!d) return -ENOMEM; + list_for_each_entry(genpd, &gpd_list, gpd_list_node) { + d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir); + if (!d) + return -ENOMEM; + + debugfs_create_file("current_state", 0444, + d, genpd, &genpd_status_fops); + debugfs_create_file("sub_domains", 0444, + d, genpd, &genpd_sub_domains_fops); + debugfs_create_file("idle_states", 0444, + d, genpd, &genpd_idle_states_fops); + debugfs_create_file("active_time", 0444, + d, genpd, &genpd_active_time_fops); + debugfs_create_file("total_idle_time", 0444, + d, genpd, &genpd_total_idle_time_fops); + debugfs_create_file("devices", 0444, + d, genpd, &genpd_devices_fops); + } + return 0; } late_initcall(pm_genpd_debug_init); -- cgit v1.2.3 From 786f41fb6b008ea4341355b99083a38853a5311d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 21 Jul 2017 02:09:22 +0200 Subject: PM / core: Split dpm_suspend_noirq() and dpm_resume_noirq() Put the device interrupts disabling and enabling as well as cpuidle_pause() and cpuidle_resume() called during the "noirq" stages of system suspend into separate functions to allow the core suspend-to-idle code to be optimized (later). The only functional difference this makes is that debug facilities and diagnostic tools will not include the above operations into the "noirq" device suspend/resume duration measurements. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/main.c | 67 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 46 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 4f5fc212d684..9b194e9f49a9 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -598,14 +598,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie) put_device(dev); } -/** - * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. - * @state: PM transition of the system being carried out. - * - * Call the "noirq" resume handlers for all devices in dpm_noirq_list and - * enable device drivers to receive interrupts. - */ -void dpm_resume_noirq(pm_message_t state) +void dpm_noirq_resume_devices(pm_message_t state) { struct device *dev; ktime_t starttime = ktime_get(); @@ -651,10 +644,27 @@ void dpm_resume_noirq(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, "noirq"); + trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); +} + +void dpm_noirq_end(void) +{ resume_device_irqs(); device_wakeup_disarm_wake_irqs(); cpuidle_resume(); - trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); +} + +/** + * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. + * @state: PM transition of the system being carried out. + * + * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and + * allow device drivers' interrupt handlers to be called. + */ +void dpm_resume_noirq(pm_message_t state) +{ + dpm_noirq_resume_devices(state); + dpm_noirq_end(); } /** @@ -1154,22 +1164,19 @@ static int device_suspend_noirq(struct device *dev) return __device_suspend_noirq(dev, pm_transition, false); } -/** - * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. - * @state: PM transition of the system being carried out. - * - * Prevent device drivers from receiving interrupts and call the "noirq" suspend - * handlers for all non-sysdev devices. - */ -int dpm_suspend_noirq(pm_message_t state) +void dpm_noirq_begin(void) +{ + cpuidle_pause(); + device_wakeup_arm_wake_irqs(); + suspend_device_irqs(); +} + +int dpm_noirq_suspend_devices(pm_message_t state) { ktime_t starttime = ktime_get(); int error = 0; trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); - cpuidle_pause(); - device_wakeup_arm_wake_irqs(); - suspend_device_irqs(); mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; @@ -1204,7 +1211,6 @@ int dpm_suspend_noirq(pm_message_t state) if (error) { suspend_stats.failed_suspend_noirq++; dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); - dpm_resume_noirq(resume_event(state)); } else { dpm_show_time(starttime, state, "noirq"); } @@ -1212,6 +1218,25 @@ int dpm_suspend_noirq(pm_message_t state) return error; } +/** + * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. + * @state: PM transition of the system being carried out. + * + * Prevent device drivers' interrupt handlers from being called and invoke + * "noirq" suspend callbacks for all non-sysdev devices. + */ +int dpm_suspend_noirq(pm_message_t state) +{ + int ret; + + dpm_noirq_begin(); + ret = dpm_noirq_suspend_devices(state); + if (ret) + dpm_resume_noirq(resume_event(state)); + + return ret; +} + /** * device_suspend_late - Execute a "late suspend" callback for given device. * @dev: Device to handle. -- cgit v1.2.3 From 48059c095bafc2ba23da1b3fa26b6ba142cb854f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 21 Jul 2017 02:10:22 +0200 Subject: PM / core: Add error argument to dpm_show_time() Make the core device suspend/resume code also call dpm_show_time() on failures and add an error argument to this function so that the message printed by it can reflect the success or failure condition. This makes the debug messages in question look less confusing in the failing cases. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/main.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9b194e9f49a9..f64e5b819e5c 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -418,7 +418,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, dev_name(dev), pm_verb(state.event), info, error); } -static void dpm_show_time(ktime_t starttime, pm_message_t state, +static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, const char *info) { ktime_t calltime; @@ -432,8 +432,9 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, if (usecs == 0) usecs = 1; - pm_pr_dbg("%s%s%s of devices complete after %ld.%03ld msecs\n", + pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n", info ?: "", info ? " " : "", pm_verb(state.event), + error ? "aborted" : "complete", usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); } @@ -643,7 +644,7 @@ void dpm_noirq_resume_devices(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - dpm_show_time(starttime, state, "noirq"); + dpm_show_time(starttime, state, 0, "noirq"); trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); } @@ -782,7 +783,7 @@ void dpm_resume_early(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - dpm_show_time(starttime, state, "early"); + dpm_show_time(starttime, state, 0, "early"); trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); } @@ -954,7 +955,7 @@ void dpm_resume(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - dpm_show_time(starttime, state, NULL); + dpm_show_time(starttime, state, 0, NULL); cpufreq_resume(); trace_suspend_resume(TPS("dpm_resume"), state.event, false); @@ -1211,9 +1212,8 @@ int dpm_noirq_suspend_devices(pm_message_t state) if (error) { suspend_stats.failed_suspend_noirq++; dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); - } else { - dpm_show_time(starttime, state, "noirq"); } + dpm_show_time(starttime, state, error, "noirq"); trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); return error; } @@ -1371,9 +1371,8 @@ int dpm_suspend_late(pm_message_t state) suspend_stats.failed_suspend_late++; dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); - } else { - dpm_show_time(starttime, state, "late"); } + dpm_show_time(starttime, state, error, "late"); trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); return error; } @@ -1639,8 +1638,8 @@ int dpm_suspend(pm_message_t state) if (error) { suspend_stats.failed_suspend++; dpm_save_failed_step(SUSPEND_SUSPEND); - } else - dpm_show_time(starttime, state, NULL); + } + dpm_show_time(starttime, state, error, NULL); trace_suspend_resume(TPS("dpm_suspend"), state.event, false); return error; } -- cgit v1.2.3 From 9a3ebe3523cc8297301d5d95332536ad123856bf Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 21 Jul 2017 02:12:10 +0200 Subject: PM / sleep: Check pm_wakeup_pending() in __device_suspend_noirq() Restore the pm_wakeup_pending() check in __device_suspend_noirq() removed by commit eed4d47efe95 (ACPI / sleep: Ignore spurious SCI wakeups from suspend-to-idle) as that allows the function to return earlier if there's a wakeup event pending already (so that it may spend less time on carrying out operations that will be reversed shortly anyway) and rework the main suspend-to-idle loop to take that optimization into account. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/main.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index f64e5b819e5c..ea1732ed7a9d 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1105,6 +1105,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a if (async_error) goto Complete; + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } + if (dev->power.syscore || dev->power.direct_complete) goto Complete; -- cgit v1.2.3 From b8b78825a2d9b423d5e7cc2943c07c3b8140941e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 19 Jul 2017 15:42:44 +0530 Subject: cpufreq: Don't set transition_latency for setpolicy drivers The transition_latency field isn't used for drivers with ->setpolicy() callback present and there is no point setting it from the drivers. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 1 - drivers/cpufreq/longrun.c | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b7fb8b7c980d..c1100a3e3325 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2138,7 +2138,6 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) if (ret) return ret; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c index 074971b12635..542aa9adba1a 100644 --- a/drivers/cpufreq/longrun.c +++ b/drivers/cpufreq/longrun.c @@ -270,7 +270,6 @@ static int longrun_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = longrun_low_freq; policy->cpuinfo.max_freq = longrun_high_freq; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; longrun_get_policy(policy); return 0; -- cgit v1.2.3 From 768608a578255c0add26bfdcb720d320eca1556b Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 19 Jul 2017 15:42:45 +0530 Subject: cpufreq: arm_big_little: Make ->get_transition_latency() mandatory All users of arm_big_little driver are defining it and there is no need to keep it optional. Make it mandatory to remove the always true conditional statement. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/arm_big_little.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index ea6d62547b10..17504129fd77 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -483,11 +483,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) return ret; } - if (arm_bL_ops->get_transition_latency) - policy->cpuinfo.transition_latency = - arm_bL_ops->get_transition_latency(cpu_dev); - else - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + policy->cpuinfo.transition_latency = + arm_bL_ops->get_transition_latency(cpu_dev); if (is_bL_switching_enabled()) per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); @@ -622,7 +619,8 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) return -EBUSY; } - if (!ops || !strlen(ops->name) || !ops->init_opp_table) { + if (!ops || !strlen(ops->name) || !ops->init_opp_table || + !ops->get_transition_latency) { pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__); return -ENODEV; } -- cgit v1.2.3 From ed4676e254630c1f00a4fc8f3821890fff7e3643 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 19 Jul 2017 15:42:46 +0530 Subject: cpufreq: Replace "max_transition_latency" with "dynamic_switching" There is no limitation in the ondemand or conservative governors which disallow the transition_latency to be greater than 10 ms. The max_transition_latency field is rather used to disallow automatic dynamic frequency switching for platforms which didn't wanted these governors to run. Replace max_transition_latency with a boolean (dynamic_switching) and check for transition_latency == CPUFREQ_ETERNAL along with that. This makes it pretty straight forward to read/understand now. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 8 ++++---- drivers/cpufreq/cpufreq_governor.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index c426d21822f7..88139e5e87da 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2003,13 +2003,13 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy) if (!policy->governor) return -EINVAL; - if (policy->governor->max_transition_latency && - policy->cpuinfo.transition_latency > - policy->governor->max_transition_latency) { + /* Platform doesn't want dynamic frequency switching ? */ + if (policy->governor->dynamic_switching && + policy->cpuinfo.transition_latency == CPUFREQ_ETERNAL) { struct cpufreq_governor *gov = cpufreq_fallback_governor(); if (gov) { - pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", + pr_warn("Transition latency set to CPUFREQ_ETERNAL, can't use %s governor. Fallback to %s governor\n", policy->governor->name, gov->name); policy->governor = gov; } else { diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 95f207eb820e..8463f5def0f5 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -159,7 +159,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy); #define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \ { \ .name = _name_, \ - .max_transition_latency = TRANSITION_LATENCY_LIMIT, \ + .dynamic_switching = true, \ .owner = THIS_MODULE, \ .init = cpufreq_dbs_governor_init, \ .exit = cpufreq_dbs_governor_exit, \ -- cgit v1.2.3 From fe829ed8ef1f3c7ac22843bd594ef2f6c4044288 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 19 Jul 2017 15:42:48 +0530 Subject: cpufreq: Add CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING cpufreq driver flag The policy->transition_latency field is used for multiple purposes today and its not straight forward at all. This is how it is used: A. Set the correct transition_latency value. B. Set it to CPUFREQ_ETERNAL because: 1. We don't want automatic dynamic switching (with ondemand/conservative) to happen at all. 2. We don't know the transition latency. This patch handles the B.1. case in a more readable way. A new flag for the cpufreq drivers is added to disallow use of cpufreq governors which have dynamic_switching flag set. All the current cpufreq drivers which are setting transition_latency unconditionally to CPUFREQ_ETERNAL are updated to use it. They don't need to set transition_latency anymore. There shouldn't be any functional change after this patch. Signed-off-by: Viresh Kumar Reviewed-by: Dominik Brodowski Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-nforce2.c | 2 +- drivers/cpufreq/cpufreq.c | 5 +++-- drivers/cpufreq/elanfreq.c | 4 +--- drivers/cpufreq/gx-suspmod.c | 2 +- drivers/cpufreq/pmac32-cpufreq.c | 7 +++++-- drivers/cpufreq/sa1100-cpufreq.c | 5 +++-- drivers/cpufreq/sa1110-cpufreq.c | 5 +++-- drivers/cpufreq/sh-cpufreq.c | 3 +-- drivers/cpufreq/speedstep-smi.c | 2 +- drivers/cpufreq/unicore2-cpufreq.c | 3 +-- 10 files changed, 20 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index 5503d491b016..dbf82f36d270 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -357,7 +357,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100; policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; return 0; } @@ -369,6 +368,7 @@ static int nforce2_cpu_exit(struct cpufreq_policy *policy) static struct cpufreq_driver nforce2_driver = { .name = "nforce2", + .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = nforce2_verify, .target = nforce2_target, .get = nforce2_get, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 88139e5e87da..6ec589c048b2 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2005,11 +2005,12 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy) /* Platform doesn't want dynamic frequency switching ? */ if (policy->governor->dynamic_switching && - policy->cpuinfo.transition_latency == CPUFREQ_ETERNAL) { + (cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING || + policy->cpuinfo.transition_latency == CPUFREQ_ETERNAL)) { struct cpufreq_governor *gov = cpufreq_fallback_governor(); if (gov) { - pr_warn("Transition latency set to CPUFREQ_ETERNAL, can't use %s governor. Fallback to %s governor\n", + pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n", policy->governor->name, gov->name); policy->governor = gov; } else { diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index bfce11cba1df..45e2ca62515e 100644 --- a/drivers/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c @@ -165,9 +165,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) if (pos->frequency > max_freq) pos->frequency = CPUFREQ_ENTRY_INVALID; - /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return cpufreq_table_validate_and_show(policy, elanfreq_table); } @@ -196,6 +193,7 @@ __setup("elanfreq=", elanfreq_setup); static struct cpufreq_driver elanfreq_driver = { .get = elanfreq_get_cpu_frequency, + .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = cpufreq_generic_frequency_table_verify, .target_index = elanfreq_target, .init = elanfreq_cpu_init, diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index 3488c9c175eb..8f52a06664e3 100644 --- a/drivers/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c @@ -428,7 +428,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) policy->max = maxfreq; policy->cpuinfo.min_freq = maxfreq / max_duration; policy->cpuinfo.max_freq = maxfreq; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; return 0; } @@ -438,6 +437,7 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) * MediaGX/Geode GX initialize cpufreq driver */ static struct cpufreq_driver gx_suspmod_driver = { + .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .get = gx_get_cpuspeed, .verify = cpufreq_gx_verify, .target = cpufreq_gx_target, diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index ff44016ea031..61ae06ca008e 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -442,7 +442,8 @@ static struct cpufreq_driver pmac_cpufreq_driver = { .init = pmac_cpufreq_cpu_init, .suspend = pmac_cpufreq_suspend, .resume = pmac_cpufreq_resume, - .flags = CPUFREQ_PM_NO_WARN, + .flags = CPUFREQ_PM_NO_WARN | + CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .attr = cpufreq_generic_attr, .name = "powermac", }; @@ -626,14 +627,16 @@ static int __init pmac_cpufreq_setup(void) if (!value) goto out; cur_freq = (*value) / 1000; - transition_latency = CPUFREQ_ETERNAL; /* Check for 7447A based MacRISC3 */ if (of_machine_is_compatible("MacRISC3") && of_get_property(cpunode, "dynamic-power-step", NULL) && PVR_VER(mfspr(SPRN_PVR)) == 0x8003) { pmac_cpufreq_init_7447A(cpunode); + + /* Allow dynamic switching */ transition_latency = 8000000; + pmac_cpufreq_driver.flags &= ~CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING; /* Check for other MacRISC3 machines */ } else if (of_machine_is_compatible("PowerBook3,4") || of_machine_is_compatible("PowerBook3,5") || diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c index 728eab77e8e0..e2d8a77c36d5 100644 --- a/drivers/cpufreq/sa1100-cpufreq.c +++ b/drivers/cpufreq/sa1100-cpufreq.c @@ -197,11 +197,12 @@ static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr) static int __init sa1100_cpu_init(struct cpufreq_policy *policy) { - return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); + return cpufreq_generic_init(policy, sa11x0_freq_table, 0); } static struct cpufreq_driver sa1100_driver __refdata = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = cpufreq_generic_frequency_table_verify, .target_index = sa1100_target, .get = sa11x0_getspeed, diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c index 2bac9b6cfeea..66e5fb088ecc 100644 --- a/drivers/cpufreq/sa1110-cpufreq.c +++ b/drivers/cpufreq/sa1110-cpufreq.c @@ -306,13 +306,14 @@ static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr) static int __init sa1110_cpu_init(struct cpufreq_policy *policy) { - return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); + return cpufreq_generic_init(policy, sa11x0_freq_table, 0); } /* sa1110_driver needs __refdata because it must remain after init registers * it with cpufreq_register_driver() */ static struct cpufreq_driver sa1110_driver __refdata = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = cpufreq_generic_frequency_table_verify, .target_index = sa1110_target, .get = sa11x0_getspeed, diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index 719c3d9f07fb..28893d435cf5 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -137,8 +137,6 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; } - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, " "Maximum %u.%03u MHz.\n", policy->min / 1000, policy->min % 1000, @@ -159,6 +157,7 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) static struct cpufreq_driver sh_cpufreq_driver = { .name = "sh", + .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .get = sh_cpufreq_get, .target = sh_cpufreq_target, .verify = sh_cpufreq_verify, diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index 37b30071c220..d23f24ccff38 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c @@ -266,7 +266,6 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) pr_debug("workaround worked.\n"); } - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; return cpufreq_table_validate_and_show(policy, speedstep_freqs); } @@ -290,6 +289,7 @@ static int speedstep_resume(struct cpufreq_policy *policy) static struct cpufreq_driver speedstep_driver = { .name = "speedstep-smi", + .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = cpufreq_generic_frequency_table_verify, .target_index = speedstep_target, .init = speedstep_cpu_init, diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c index 6f9dfa80563a..db62d9844751 100644 --- a/drivers/cpufreq/unicore2-cpufreq.c +++ b/drivers/cpufreq/unicore2-cpufreq.c @@ -58,13 +58,12 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy) policy->min = policy->cpuinfo.min_freq = 250000; policy->max = policy->cpuinfo.max_freq = 1000000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; policy->clk = clk_get(NULL, "MAIN_CLK"); return PTR_ERR_OR_ZERO(policy->clk); } static struct cpufreq_driver ucv2_driver = { - .flags = CPUFREQ_STICKY, + .flags = CPUFREQ_STICKY | CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = ucv2_verify_speed, .target = ucv2_target, .get = cpufreq_generic_get, -- cgit v1.2.3 From fc4c709fc88d5603b235ba18d11f6dba811e1664 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 19 Jul 2017 15:42:49 +0530 Subject: cpufreq: Allow dynamic switching with CPUFREQ_ETERNAL latency With the recent updates, CPUFREQ_ETERNAL is only used by the drivers which don't know their transition latency but want to use dynamic switching. Anyway, the routine cpufreq_policy_transition_delay_us() caps the value of transition latency to 10 ms now and that can be used safely with such platforms. Remove the check from cpufreq_init_governor() and allow dynamic switching for such configurations as well. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6ec589c048b2..b2cc98551fc3 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2005,8 +2005,7 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy) /* Platform doesn't want dynamic frequency switching ? */ if (policy->governor->dynamic_switching && - (cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING || - policy->cpuinfo.transition_latency == CPUFREQ_ETERNAL)) { + cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) { struct cpufreq_governor *gov = cpufreq_fallback_governor(); if (gov) { -- cgit v1.2.3 From 9d0ef7af1f2dfd813ddadc17b768f096104e5077 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 25 Jul 2017 00:10:59 +0200 Subject: cpufreq: intel_pstate: Do not use PID-based P-state selection All systems with a defined ACPI preferred profile that are not "servers" have been using the load-based P-state selection algorithm in intel_pstate since 4.12-rc1 (mobile systems and laptops have been using it since 4.10-rc1) and no problems with it have been reported to date. In particular, no regressions with respect to the PID-based P-state selection have been reported. Also testing indicates that the P-state selection algorithm based on CPU load is generally on par with the PID-based algorithm performance-wise, and for some workloads it turns out to be better than the other one, while being more straightforward and easier to understand at the same time. Moreover, the PID-based P-state selection algorithm in intel_pstate is known to be unstable in some situation and generally problematic, the issues with it are hard to address and it has become a significant maintenance burden. For these reasons, make intel_pstate use the "powersave" P-state selection algorithm based on CPU load in the active mode on all systems and drop the PID-based P-state selection code along with all things related to it from the driver. Also update the documentation accordingly. Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 276 +---------------------------------------- 1 file changed, 2 insertions(+), 274 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 6cd503525638..0eedc4f04a44 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -172,28 +172,6 @@ struct vid_data { int32_t ratio; }; -/** - * struct _pid - Stores PID data - * @setpoint: Target set point for busyness or performance - * @integral: Storage for accumulated error values - * @p_gain: PID proportional gain - * @i_gain: PID integral gain - * @d_gain: PID derivative gain - * @deadband: PID deadband - * @last_err: Last error storage for integral part of PID calculation - * - * Stores PID coefficients and last error for PID controller. - */ -struct _pid { - int setpoint; - int32_t integral; - int32_t p_gain; - int32_t i_gain; - int32_t d_gain; - int deadband; - int32_t last_err; -}; - /** * struct global_params - Global parameters, mostly tunable via sysfs. * @no_turbo: Whether or not to use turbo P-states. @@ -223,7 +201,6 @@ struct global_params { * @last_update: Time of the last update. * @pstate: Stores P state limits for this CPU * @vid: Stores VID limits for this CPU - * @pid: Stores PID parameters for this CPU * @last_sample_time: Last Sample time * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented * This shift is a multiplier to mperf delta to @@ -258,7 +235,6 @@ struct cpudata { struct pstate_data pstate; struct vid_data vid; - struct _pid pid; u64 last_update; u64 last_sample_time; @@ -283,28 +259,6 @@ struct cpudata { static struct cpudata **all_cpu_data; -/** - * struct pstate_adjust_policy - Stores static PID configuration data - * @sample_rate_ms: PID calculation sample rate in ms - * @sample_rate_ns: Sample rate calculation in ns - * @deadband: PID deadband - * @setpoint: PID Setpoint - * @p_gain_pct: PID proportional gain - * @i_gain_pct: PID integral gain - * @d_gain_pct: PID derivative gain - * - * Stores per CPU model static PID configuration data. - */ -struct pstate_adjust_policy { - int sample_rate_ms; - s64 sample_rate_ns; - int deadband; - int setpoint; - int p_gain_pct; - int d_gain_pct; - int i_gain_pct; -}; - /** * struct pstate_funcs - Per CPU model specific callbacks * @get_max: Callback to get maximum non turbo effective P state @@ -333,15 +287,6 @@ struct pstate_funcs { }; static struct pstate_funcs pstate_funcs __read_mostly; -static struct pstate_adjust_policy pid_params __read_mostly = { - .sample_rate_ms = 10, - .sample_rate_ns = 10 * NSEC_PER_MSEC, - .deadband = 0, - .setpoint = 97, - .p_gain_pct = 20, - .d_gain_pct = 0, - .i_gain_pct = 0, -}; static int hwp_active __read_mostly; static bool per_cpu_limits __read_mostly; @@ -509,56 +454,6 @@ static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) } #endif -static signed int pid_calc(struct _pid *pid, int32_t busy) -{ - signed int result; - int32_t pterm, dterm, fp_error; - int32_t integral_limit; - - fp_error = pid->setpoint - busy; - - if (abs(fp_error) <= pid->deadband) - return 0; - - pterm = mul_fp(pid->p_gain, fp_error); - - pid->integral += fp_error; - - /* - * We limit the integral here so that it will never - * get higher than 30. This prevents it from becoming - * too large an input over long periods of time and allows - * it to get factored out sooner. - * - * The value of 30 was chosen through experimentation. - */ - integral_limit = int_tofp(30); - if (pid->integral > integral_limit) - pid->integral = integral_limit; - if (pid->integral < -integral_limit) - pid->integral = -integral_limit; - - dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); - pid->last_err = fp_error; - - result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; - result = result + (1 << (FRAC_BITS-1)); - return (signed int)fp_toint(result); -} - -static inline void intel_pstate_pid_reset(struct cpudata *cpu) -{ - struct _pid *pid = &cpu->pid; - - pid->p_gain = percent_fp(pid_params.p_gain_pct); - pid->d_gain = percent_fp(pid_params.d_gain_pct); - pid->i_gain = percent_fp(pid_params.i_gain_pct); - pid->setpoint = int_tofp(pid_params.setpoint); - pid->last_err = pid->setpoint - int_tofp(100); - pid->deadband = int_tofp(pid_params.deadband); - pid->integral = 0; -} - static inline void update_turbo_state(void) { u64 misc_en; @@ -911,82 +806,6 @@ static void intel_pstate_update_policies(void) cpufreq_update_policy(cpu); } -/************************** debugfs begin ************************/ -static int pid_param_set(void *data, u64 val) -{ - unsigned int cpu; - - *(u32 *)data = val; - pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; - for_each_possible_cpu(cpu) - if (all_cpu_data[cpu]) - intel_pstate_pid_reset(all_cpu_data[cpu]); - - return 0; -} - -static int pid_param_get(void *data, u64 *val) -{ - *val = *(u32 *)data; - return 0; -} -DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); - -static struct dentry *debugfs_parent; - -struct pid_param { - char *name; - void *value; - struct dentry *dentry; -}; - -static struct pid_param pid_files[] = { - {"sample_rate_ms", &pid_params.sample_rate_ms, }, - {"d_gain_pct", &pid_params.d_gain_pct, }, - {"i_gain_pct", &pid_params.i_gain_pct, }, - {"deadband", &pid_params.deadband, }, - {"setpoint", &pid_params.setpoint, }, - {"p_gain_pct", &pid_params.p_gain_pct, }, - {NULL, NULL, } -}; - -static void intel_pstate_debug_expose_params(void) -{ - int i; - - debugfs_parent = debugfs_create_dir("pstate_snb", NULL); - if (IS_ERR_OR_NULL(debugfs_parent)) - return; - - for (i = 0; pid_files[i].name; i++) { - struct dentry *dentry; - - dentry = debugfs_create_file(pid_files[i].name, 0660, - debugfs_parent, pid_files[i].value, - &fops_pid_param); - if (!IS_ERR(dentry)) - pid_files[i].dentry = dentry; - } -} - -static void intel_pstate_debug_hide_params(void) -{ - int i; - - if (IS_ERR_OR_NULL(debugfs_parent)) - return; - - for (i = 0; pid_files[i].name; i++) { - debugfs_remove(pid_files[i].dentry); - pid_files[i].dentry = NULL; - } - - debugfs_remove(debugfs_parent); - debugfs_parent = NULL; -} - -/************************** debugfs end ************************/ - /************************** sysfs begin ************************/ #define show_one(file_name, object) \ static ssize_t show_##file_name \ @@ -1661,44 +1480,6 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) return target; } -static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) -{ - int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; - u64 duration_ns; - - /* - * perf_scaled is the ratio of the average P-state during the last - * sampling period to the P-state requested last time (in percent). - * - * That measures the system's response to the previous P-state - * selection. - */ - max_pstate = cpu->pstate.max_pstate_physical; - current_pstate = cpu->pstate.current_pstate; - perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, - div_fp(100 * max_pstate, current_pstate)); - - /* - * Since our utilization update callback will not run unless we are - * in C0, check if the actual elapsed time is significantly greater (3x) - * than our sample interval. If it is, then we were idle for a long - * enough period of time to adjust our performance metric. - */ - duration_ns = cpu->sample.time - cpu->last_sample_time; - if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { - sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); - perf_scaled = mul_fp(perf_scaled, sample_ratio); - } else { - sample_ratio = div_fp(100 * (cpu->sample.mperf << cpu->aperf_mperf_shift), - cpu->sample.tsc); - if (sample_ratio < int_tofp(1)) - perf_scaled = 0; - } - - cpu->sample.busy_scaled = perf_scaled; - return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); -} - static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) { int max_pstate = intel_pstate_get_base_pstate(cpu); @@ -1741,23 +1522,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate) fp_toint(cpu->iowait_boost * 100)); } -static void intel_pstate_update_util_pid(struct update_util_data *data, - u64 time, unsigned int flags) -{ - struct cpudata *cpu = container_of(data, struct cpudata, update_util); - u64 delta_ns = time - cpu->sample.time; - - if ((s64)delta_ns < pid_params.sample_rate_ns) - return; - - if (intel_pstate_sample(cpu, time)) { - int target_pstate; - - target_pstate = get_target_pstate_use_performance(cpu); - intel_pstate_adjust_pstate(cpu, target_pstate); - } -} - static void intel_pstate_update_util(struct update_util_data *data, u64 time, unsigned int flags) { @@ -1792,7 +1556,7 @@ static struct pstate_funcs core_funcs = { .get_turbo = core_get_turbo_pstate, .get_scaling = core_get_scaling, .get_val = core_get_val, - .update_util = intel_pstate_update_util_pid, + .update_util = intel_pstate_update_util, }; static const struct pstate_funcs silvermont_funcs = { @@ -1825,7 +1589,7 @@ static const struct pstate_funcs knl_funcs = { .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, .get_scaling = core_get_scaling, .get_val = core_get_val, - .update_util = intel_pstate_update_util_pid, + .update_util = intel_pstate_update_util, }; static const struct pstate_funcs bxt_funcs = { @@ -1879,8 +1643,6 @@ static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { {} }; -static bool pid_in_use(void); - static int intel_pstate_init_cpu(unsigned int cpunum) { struct cpudata *cpu; @@ -1911,8 +1673,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) intel_pstate_disable_ee(cpunum); intel_pstate_hwp_enable(cpu); - } else if (pid_in_use()) { - intel_pstate_pid_reset(cpu); } intel_pstate_get_cpu_pstates(cpu); @@ -2270,12 +2030,6 @@ static struct cpufreq_driver intel_cpufreq = { static struct cpufreq_driver *default_driver = &intel_pstate; -static bool pid_in_use(void) -{ - return intel_pstate_driver == &intel_pstate && - pstate_funcs.update_util == intel_pstate_update_util_pid; -} - static void intel_pstate_driver_cleanup(void) { unsigned int cpu; @@ -2310,9 +2064,6 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) global.min_perf_pct = min_perf_pct_min(); - if (pid_in_use()) - intel_pstate_debug_expose_params(); - return 0; } @@ -2321,9 +2072,6 @@ static int intel_pstate_unregister_driver(void) if (hwp_active) return -EBUSY; - if (pid_in_use()) - intel_pstate_debug_hide_params(); - cpufreq_unregister_driver(intel_pstate_driver); intel_pstate_driver_cleanup(); @@ -2391,24 +2139,6 @@ static int __init intel_pstate_msrs_not_valid(void) return 0; } -#ifdef CONFIG_ACPI -static void intel_pstate_use_acpi_profile(void) -{ - switch (acpi_gbl_FADT.preferred_profile) { - case PM_MOBILE: - case PM_TABLET: - case PM_APPLIANCE_PC: - case PM_DESKTOP: - case PM_WORKSTATION: - pstate_funcs.update_util = intel_pstate_update_util; - } -} -#else -static void intel_pstate_use_acpi_profile(void) -{ -} -#endif - static void __init copy_cpu_funcs(struct pstate_funcs *funcs) { pstate_funcs.get_max = funcs->get_max; @@ -2420,8 +2150,6 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs) pstate_funcs.get_vid = funcs->get_vid; pstate_funcs.update_util = funcs->update_util; pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; - - intel_pstate_use_acpi_profile(); } #ifdef CONFIG_ACPI -- cgit v1.2.3 From c4f3f70cacba2fa19545389a12d09b606d2ad1cf Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 25 Jul 2017 00:12:20 +0200 Subject: cpufreq: intel_pstate: Drop ->update_util from pstate_funcs All systems use the same P-state selection "powersave" algorithm in the active mode if HWP is not used, so there's no need to provide a pointer for it in struct pstate_funcs any more. Drop ->update_util from struct pstate_funcs and make intel_pstate_set_update_util_hook() use intel_pstate_update_util() directly. Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 0eedc4f04a44..f562ca74bd3b 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -268,7 +268,6 @@ static struct cpudata **all_cpu_data; * @get_scaling: Callback to get frequency scaling factor * @get_val: Callback to convert P state to actual MSR write value * @get_vid: Callback to get VID data for Atom platforms - * @update_util: Active mode utilization update callback. * * Core and Atom CPU models have different way to get P State limits. This * structure is used to store those callbacks. @@ -282,8 +281,6 @@ struct pstate_funcs { int (*get_aperf_mperf_shift)(void); u64 (*get_val)(struct cpudata*, int pstate); void (*get_vid)(struct cpudata *); - void (*update_util)(struct update_util_data *data, u64 time, - unsigned int flags); }; static struct pstate_funcs pstate_funcs __read_mostly; @@ -1556,7 +1553,6 @@ static struct pstate_funcs core_funcs = { .get_turbo = core_get_turbo_pstate, .get_scaling = core_get_scaling, .get_val = core_get_val, - .update_util = intel_pstate_update_util, }; static const struct pstate_funcs silvermont_funcs = { @@ -1567,7 +1563,6 @@ static const struct pstate_funcs silvermont_funcs = { .get_val = atom_get_val, .get_scaling = silvermont_get_scaling, .get_vid = atom_get_vid, - .update_util = intel_pstate_update_util, }; static const struct pstate_funcs airmont_funcs = { @@ -1578,7 +1573,6 @@ static const struct pstate_funcs airmont_funcs = { .get_val = atom_get_val, .get_scaling = airmont_get_scaling, .get_vid = atom_get_vid, - .update_util = intel_pstate_update_util, }; static const struct pstate_funcs knl_funcs = { @@ -1589,7 +1583,6 @@ static const struct pstate_funcs knl_funcs = { .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, .get_scaling = core_get_scaling, .get_val = core_get_val, - .update_util = intel_pstate_update_util, }; static const struct pstate_funcs bxt_funcs = { @@ -1599,7 +1592,6 @@ static const struct pstate_funcs bxt_funcs = { .get_turbo = core_get_turbo_pstate, .get_scaling = core_get_scaling, .get_val = core_get_val, - .update_util = intel_pstate_update_util, }; #define ICPU(model, policy) \ @@ -1702,7 +1694,7 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num) /* Prevent intel_pstate_update_util() from using stale data. */ cpu->sample.time = 0; cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, - pstate_funcs.update_util); + intel_pstate_update_util); cpu->update_util_set = true; } @@ -2148,7 +2140,6 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs) pstate_funcs.get_scaling = funcs->get_scaling; pstate_funcs.get_val = funcs->get_val; pstate_funcs.get_vid = funcs->get_vid; - pstate_funcs.update_util = funcs->update_util; pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; } @@ -2293,9 +2284,7 @@ static int __init intel_pstate_init(void) if (x86_match_cpu(hwp_support_ids)) { copy_cpu_funcs(&core_funcs); - if (no_hwp) { - pstate_funcs.update_util = intel_pstate_update_util; - } else { + if (!no_hwp) { hwp_active++; intel_pstate.attr = hwp_cpufreq_attrs; goto hwp_cpu_matched; -- cgit v1.2.3 From 38c1c6a9f3c4c919c82cc20d4c70814c64574887 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 21 Jul 2017 21:53:30 +0200 Subject: cpufreq: s5pv210: add missing of_node_put() for_each_compatible_node performs an of_node_get on each iteration, so a return from the loop requires an of_node_put. The semantic patch that fixes this problem is as follows (http://coccinelle.lip6.fr): // @@ local idexpression n; expression e,e1,e2; statement S; iterator i1; iterator name for_each_compatible_node; @@ for_each_compatible_node(n,e1,e2) { ... ( of_node_put(n); | e = n | return n; | i1(...,n,...) S | + of_node_put(n); ? return ...; ) ... } // Additionally, call of_node_put on the previous value of np, obtained from of_find_compatible_node, that is no longer accessible at the point of the for_each_compatible_node. Signed-off-by: Julia Lawall Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/s5pv210-cpufreq.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index f82074eea779..5d31c2db12a3 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -602,6 +602,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) } clk_base = of_iomap(np, 0); + of_node_put(np); if (!clk_base) { pr_err("%s: failed to map clock registers\n", __func__); return -EFAULT; @@ -612,6 +613,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) if (id < 0 || id >= ARRAY_SIZE(dmc_base)) { pr_err("%s: failed to get alias of dmc node '%s'\n", __func__, np->name); + of_node_put(np); return id; } @@ -619,6 +621,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) if (!dmc_base[id]) { pr_err("%s: failed to map dmc%d registers\n", __func__, id); + of_node_put(np); return -EFAULT; } } -- cgit v1.2.3 From 762792913f8c71fb4807b25540347e4ace1b5b95 Mon Sep 17 00:00:00 2001 From: Waldemar Rymarkiewicz Date: Thu, 27 Jul 2017 12:01:17 +0200 Subject: PM / OPP: Fix get sharing CPUs when hotplug is used We fail dev_pm_opp_of_get_sharing_cpus() when possible CPU device does not exist. This can happen on platforms where not all possible CPUs are available at start up ie. hotplugged out. The CPU device is not registered in the system so we are not able to check struct device to set the sharing CPUs bitmask properly. Example (real use case): 2 physical MIPS cores, 4 VPE, cpu0/2 run Linux and cpu1/3 are not available for Linux at boot up. cpufreq-dt driver + OPP v2 fail to register opp_table due to the fact there is no struct device for cpu1 (remains offline at bootup). To solve the bug, stop using device struct to check device_node. Instead get CPU device_node directly from device tree with of_get_cpu_node(). Signed-off-by: Waldemar Rymarkiewicz Acked-by: Viresh Kumar Reviewed-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp/of.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 6f0497b377c7..0b718886479b 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c @@ -248,15 +248,22 @@ void dev_pm_opp_of_remove_table(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); -/* Returns opp descriptor node for a device, caller must do of_node_put() */ -struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) +/* Returns opp descriptor node for a device node, caller must + * do of_node_put() */ +static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np) { /* * There should be only ONE phandle present in "operating-points-v2" * property. */ - return of_parse_phandle(dev->of_node, "operating-points-v2", 0); + return of_parse_phandle(np, "operating-points-v2", 0); +} + +/* Returns opp descriptor node for a device, caller must do of_node_put() */ +struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) +{ + return _opp_of_get_opp_desc_node(dev->of_node); } EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); @@ -576,8 +583,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - struct device_node *np, *tmp_np; - struct device *tcpu_dev; + struct device_node *np, *tmp_np, *cpu_np; int cpu, ret = 0; /* Get OPP descriptor node */ @@ -597,19 +603,18 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, if (cpu == cpu_dev->id) continue; - tcpu_dev = get_cpu_device(cpu); - if (!tcpu_dev) { - dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + cpu_np = of_get_cpu_node(cpu, NULL); + if (!cpu_np) { + dev_err(cpu_dev, "%s: failed to get cpu%d node\n", __func__, cpu); - ret = -ENODEV; + ret = -ENOENT; goto put_cpu_node; } /* Get OPP descriptor node */ - tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev); + tmp_np = _opp_of_get_opp_desc_node(cpu_np); if (!tmp_np) { - dev_err(tcpu_dev, "%s: Couldn't find opp node.\n", - __func__); + pr_err("%pOF: Couldn't find opp node\n", cpu_np); ret = -ENOENT; goto put_cpu_node; } -- cgit v1.2.3 From f5c13f44c7ad4fc0ce78e5da996762c2aebf510b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 31 Jul 2017 14:27:37 +0200 Subject: cpufreq: intel_pstate: Drop INTEL_PSTATE_HWP_SAMPLING_INTERVAL After commit 62611cb912f7 (intel_pstate: delete scheduler hook in HWP mode) the INTEL_PSTATE_HWP_SAMPLING_INTERVAL is not used anywhere in the code, so drop it. Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index f562ca74bd3b..2eac2adff11b 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -38,7 +38,6 @@ #include #define INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) -#define INTEL_PSTATE_HWP_SAMPLING_INTERVAL (50 * NSEC_PER_MSEC) #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 -- cgit v1.2.3 From 674e75411fc260b0d4532701228cfe12fc090da8 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 28 Jul 2017 12:16:38 +0530 Subject: sched: cpufreq: Allow remote cpufreq callbacks With Android UI and benchmarks the latency of cpufreq response to certain scheduling events can become very critical. Currently, callbacks into cpufreq governors are only made from the scheduler if the target CPU of the event is the same as the current CPU. This means there are certain situations where a target CPU may not run the cpufreq governor for some time. One testcase to show this behavior is where a task starts running on CPU0, then a new task is also spawned on CPU0 by a task on CPU1. If the system is configured such that the new tasks should receive maximum demand initially, this should result in CPU0 increasing frequency immediately. But because of the above mentioned limitation though, this does not occur. This patch updates the scheduler core to call the cpufreq callbacks for remote CPUs as well. The schedutil, ondemand and conservative governors are updated to process cpufreq utilization update hooks called for remote CPUs where the remote CPU is managed by the cpufreq policy of the local CPU. The intel_pstate driver is updated to always reject remote callbacks. This is tested with couple of usecases (Android: hackbench, recentfling, galleryfling, vellamo, Ubuntu: hackbench) on ARM hikey board (64 bit octa-core, single policy). Only galleryfling showed minor improvements, while others didn't had much deviation. The reason being that this patch only targets a corner case, where following are required to be true to improve performance and that doesn't happen too often with these tests: - Task is migrated to another CPU. - The task has high demand, and should take the target CPU to higher OPPs. - And the target CPU doesn't call into the cpufreq governor until the next tick. Based on initial work from Steve Muckle. Signed-off-by: Viresh Kumar Acked-by: Saravana Kannan Acked-by: Peter Zijlstra (Intel) Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq_governor.c | 3 +++ drivers/cpufreq/intel_pstate.c | 8 ++++++++ 2 files changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 47e24b5384b3..ce5f3ec7ce71 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -275,6 +275,9 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time, struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; u64 delta_ns, lst; + if (!cpufreq_can_do_remote_dvfs(policy_dbs->policy)) + return; + /* * The work may not be allowed to be queued up right now. * Possible reasons: diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 6cd503525638..d299b86a5a00 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1747,6 +1747,10 @@ static void intel_pstate_update_util_pid(struct update_util_data *data, struct cpudata *cpu = container_of(data, struct cpudata, update_util); u64 delta_ns = time - cpu->sample.time; + /* Don't allow remote callbacks */ + if (smp_processor_id() != cpu->cpu) + return; + if ((s64)delta_ns < pid_params.sample_rate_ns) return; @@ -1764,6 +1768,10 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, struct cpudata *cpu = container_of(data, struct cpudata, update_util); u64 delta_ns; + /* Don't allow remote callbacks */ + if (smp_processor_id() != cpu->cpu) + return; + if (flags & SCHED_CPUFREQ_IOWAIT) { cpu->iowait_boost = int_tofp(1); } else if (cpu->iowait_boost) { -- cgit v1.2.3 From 99d14d0e16fadb4de001bacc0ac0786a9ebecf2a Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 28 Jul 2017 12:16:39 +0530 Subject: cpufreq: Process remote callbacks from any CPU if the platform permits On many platforms, CPUs can do DVFS across cpufreq policies. i.e CPU from policy-A can change frequency of CPUs belonging to policy-B. This is quite common in case of ARM platforms where we don't configure any per-cpu register. Add a flag to identify such platforms and update cpufreq_can_do_remote_dvfs() to allow remote callbacks if this flag is set. Also enable the flag for cpufreq-dt driver which is used only on ARM platforms currently. Signed-off-by: Viresh Kumar Acked-by: Saravana Kannan Acked-by: Peter Zijlstra (Intel) Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index fef3c2160691..d83ab94d041a 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -274,6 +274,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) transition_latency = CPUFREQ_ETERNAL; policy->cpuinfo.transition_latency = transition_latency; + policy->dvfs_possible_from_any_cpu = true; return 0; -- cgit v1.2.3 From 635173a17b0323c28a39fb06fa36d876035cd2b9 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 28 Jul 2017 02:06:36 +0200 Subject: platform/x86: intel-hid: Wake up Dell Latitude 7275 from suspend-to-idle MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On Dell Latitude 7275 the 5-button array is not exposed in the ACPI tables, but still notifies are sent to the Intel HID device object (device ID INT33D5) in response to power button actions while suspended to idle. However, they are currently ignored as the intel-hid driver is not prepared to take care of them. As a result, power button wakeup from suspend-to-idle doesn't work on this platform, but suspend-to-idle is the only reliable suspend variant on it (the S3 implementation in the platform firmware turns out to be broken), so it would be good to handle it properly. For this reason, add an upfront check against the power button press event (0xCE) to notify_handler() in the wakeup mode which allows it to catch the power button wakeup notification on the affected platform (even though priv->array is NULL on it) and should not change the behavior on platforms with priv->array present (because priv->array contains the event in question in those cases). Link: https://bugzilla.kernel.org/show_bug.cgi?id=196115 Tested-by: Jérôme de Bretagne Signed-off-by: Rafael J. Wysocki Acked-by: Andy Shevchenko Acked-By: Mario Limonciello --- drivers/platform/x86/intel-hid.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 8519e0f97bdd..a782c78e7c63 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -203,15 +203,26 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) acpi_status status; if (priv->wakeup_mode) { + /* + * Needed for wakeup from suspend-to-idle to work on some + * platforms that don't expose the 5-button array, but still + * send notifies with the power button event code to this + * device object on power button actions while suspended. + */ + if (event == 0xce) + goto wakeup; + /* Wake up on 5-button array events only. */ if (event == 0xc0 || !priv->array) return; - if (sparse_keymap_entry_from_scancode(priv->array, event)) - pm_wakeup_hard_event(&device->dev); - else + if (!sparse_keymap_entry_from_scancode(priv->array, event)) { dev_info(&device->dev, "unknown event 0x%x\n", event); + return; + } +wakeup: + pm_wakeup_hard_event(&device->dev); return; } -- cgit v1.2.3 From 7bde2d50014d3c5110b1db9a8e2659b6fa5f6b4a Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Thu, 3 Aug 2017 19:03:14 -0700 Subject: cpufreq: intel_pstate: Improve IO performance with per-core P-states In the current implementation, the response latency between seeing SCHED_CPUFREQ_IOWAIT set and the actual P-state adjustment can be up to 10ms. It can be reduced by bumping up the P-state to the max at the time SCHED_CPUFREQ_IOWAIT is passed to intel_pstate_update_util(). With this change, the IO performance improves significantly. For a simple "grep -r . linux" (Here linux is the kernel source folder) with caches dropped every time on a Broadwell Xeon workstation with per-core P-states, the user and system time is shorter by as much as 30% - 40%. The same performance difference was not observed on clients that don't support per-core P-state. Signed-off-by: Srinivas Pandruvada [ rjw: Changelog ] Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 2eac2adff11b..532e261b2cab 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1526,6 +1526,15 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, if (flags & SCHED_CPUFREQ_IOWAIT) { cpu->iowait_boost = int_tofp(1); + cpu->last_update = time; + /* + * The last time the busy was 100% so P-state was max anyway + * so avoid overhead of computation. + */ + if (fp_toint(cpu->sample.busy_scaled) == 100) + return; + + goto set_pstate; } else if (cpu->iowait_boost) { /* Clear iowait_boost if the CPU may have been idle. */ delta_ns = time - cpu->last_update; @@ -1537,6 +1546,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, if ((s64)delta_ns < INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL) return; +set_pstate: if (intel_pstate_sample(cpu, time)) { int target_pstate; -- cgit v1.2.3 From 319af40a0053fea2ecadcf10bef7e796c91ea8c0 Mon Sep 17 00:00:00 2001 From: Finley Xiao Date: Fri, 4 Aug 2017 09:52:31 +0800 Subject: cpufreq: dt: Add rk3328 compatible to use generic cpufreq driver This patch adds the rk3328 compatible string for supporting the generic cpufreq driver on RK3328. Signed-off-by: Finley Xiao Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt-platdev.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 096aea7fcb67..13c0c47b607e 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -76,6 +76,7 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "rockchip,rk3188", }, { .compatible = "rockchip,rk3228", }, { .compatible = "rockchip,rk3288", }, + { .compatible = "rockchip,rk3328", }, { .compatible = "rockchip,rk3366", }, { .compatible = "rockchip,rk3368", }, { .compatible = "rockchip,rk3399", }, -- cgit v1.2.3 From e870c6c87cf9484090d28f2a68aa29e008960c93 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 31 Jul 2017 23:43:18 +0200 Subject: ACPI / PM: Prefer suspend-to-idle over S3 on some systems Modify the ACPI system sleep support setup code to select suspend-to-idle as the default system sleep state if (1) the ACPI_FADT_LOW_POWER_S0 flag is set in the FADT and (2) the Low Power Idle S0 _DSM interface has been discovered and (3) the default sleep state was not selected from the kernel command line. The main motivation for this change is that systems where the (1) and (2) conditions are met typically ship with OSes that don't exercise the S3 path in the platform firmware which remains untested and turns out to be non-functional at least in some cases. Signed-off-by: Rafael J. Wysocki Tested-by: Mario Limonciello --- drivers/acpi/sleep.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index be17664736b2..b363283dfcd9 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -714,6 +714,12 @@ static int lps0_device_attach(struct acpi_device *adev, if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) { lps0_dsm_func_mask = bitmask; lps0_device_handle = adev->handle; + /* + * Use suspend-to-idle by default if the default + * suspend mode was not set from the command line. + */ + if (mem_sleep_default > PM_SUSPEND_MEM) + mem_sleep_current = PM_SUSPEND_FREEZE; } acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n", -- cgit v1.2.3 From 034def597bb73cbf29ffade7d8aec8408af8c743 Mon Sep 17 00:00:00 2001 From: Khiem Nguyen Date: Fri, 4 Aug 2017 15:18:00 +0200 Subject: cpufreq: rcar: Add support for R8A7795 SoC After the commit "a399dc9fc50 cpufreq: shmobile: Use generic platdev driver", will use cpufreq-dt-platdev driver to enable cpufreq-dt support. Hence, follow the implementation to support new R8A7795 SoC. Signed-off-by: Khiem Nguyen Signed-off-by: Simon Horman Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt-platdev.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 13c0c47b607e..bcee384b3251 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -67,6 +67,7 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "renesas,r8a7792", }, { .compatible = "renesas,r8a7793", }, { .compatible = "renesas,r8a7794", }, + { .compatible = "renesas,r8a7795", }, { .compatible = "renesas,sh73a0", }, { .compatible = "rockchip,rk2928", }, -- cgit v1.2.3 From 820b9b0c093ebd40c4b8e44c0e84613c877f4948 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 2 Aug 2017 01:32:44 +0200 Subject: PM / wakeup: Set power.can_wakeup if wakeup_sysfs_add() fails Currently, an error from wakeup_sysfs_add() in device_set_wakeup_capable() causes the device's power.can_wakeup flag to remain unset even though the device technically is capable of signaling wakeup. If wakeup_sysfs_add() fails user space may not be able to enable the device to wake up the system from sleep states, but at least for some devices that does not matter. For this reason, set or clear power.can_wakeup upfront and if wakeup_sysfs_add() returns an error, print a message to the log. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/wakeup.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 144e6d8fafc8..b49efe33099e 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -412,15 +412,17 @@ void device_set_wakeup_capable(struct device *dev, bool capable) if (!!dev->power.can_wakeup == !!capable) return; + dev->power.can_wakeup = capable; if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { if (capable) { - if (wakeup_sysfs_add(dev)) - return; + int ret = wakeup_sysfs_add(dev); + + if (ret) + dev_info(dev, "Wakeup sysfs attributes not added\n"); } else { wakeup_sysfs_remove(dev); } } - dev->power.can_wakeup = capable; } EXPORT_SYMBOL_GPL(device_set_wakeup_capable); -- cgit v1.2.3 From a891283e56362543d1d276e192266069ef52075b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 10 Aug 2017 01:08:56 +0200 Subject: cpufreq: intel_pstate: Simplify intel_pstate_adjust_pstate() Since there is only one P-state selection routine in intel_pstate now, make intel_pstate_adjust_pstate() call it directly and drop the target_pstate argument from that function. Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 942cd6888b54..e519940fa57c 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1495,13 +1495,15 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); } -static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate) +static void intel_pstate_adjust_pstate(struct cpudata *cpu) { int from = cpu->pstate.current_pstate; struct sample *sample; + int target_pstate; update_turbo_state(); + target_pstate = get_target_pstate_use_cpu_load(cpu); target_pstate = intel_pstate_prepare_request(cpu, target_pstate); trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); intel_pstate_update_pstate(cpu, target_pstate); @@ -1547,12 +1549,8 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, return; set_pstate: - if (intel_pstate_sample(cpu, time)) { - int target_pstate; - - target_pstate = get_target_pstate_use_cpu_load(cpu); - intel_pstate_adjust_pstate(cpu, target_pstate); - } + if (intel_pstate_sample(cpu, time)) + intel_pstate_adjust_pstate(cpu); } static struct pstate_funcs core_funcs = { -- cgit v1.2.3 From d77d4888cb8458b098accd4d7555c0f7f6399c4e Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 10 Aug 2017 01:09:16 +0200 Subject: cpufreq: intel_pstate: Shorten a couple of long names The names of the INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL symbol and the get_target_pstate_use_cpu_load() function don't need to be so long any more, so make them shorter. Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e519940fa57c..ed69b86f0569 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -37,7 +37,7 @@ #include #include -#define INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) +#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 @@ -1438,7 +1438,7 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu) cpu->sample.core_avg_perf); } -static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) +static inline int32_t get_target_pstate(struct cpudata *cpu) { struct sample *sample = &cpu->sample; int32_t busy_frac, boost; @@ -1503,7 +1503,7 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu) update_turbo_state(); - target_pstate = get_target_pstate_use_cpu_load(cpu); + target_pstate = get_target_pstate(cpu); target_pstate = intel_pstate_prepare_request(cpu, target_pstate); trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); intel_pstate_update_pstate(cpu, target_pstate); @@ -1545,7 +1545,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, } cpu->last_update = time; delta_ns = time - cpu->sample.time; - if ((s64)delta_ns < INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL) + if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) return; set_pstate: -- cgit v1.2.3 From 209887e6b974c22328487b55d0f390522b014b03 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 9 Aug 2017 10:21:46 +0530 Subject: cpufreq: Return 0 from ->fast_switch() on errors CPUFREQ_ENTRY_INVALID is a special symbol which is used to specify that an entry in the cpufreq table is invalid. But using it outside of the scope of the cpufreq table looks a bit incorrect. We can represent an invalid frequency by writing it as 0 instead if we need. Note that it is already done that way for the return value of the ->get() callback. Lets do the same for ->fast_switch() and not use CPUFREQ_ENTRY_INVALID outside of the scope of cpufreq table. Also update the comment over cpufreq_driver_fast_switch() to clearly mention what this returns. None of the drivers return CPUFREQ_ENTRY_INVALID as of now from ->fast_switch() callback and so we don't need to update any of those. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 9bf97a366029..0c728190e444 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1817,9 +1817,10 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); * twice in parallel for the same policy and that it will never be called in * parallel with either ->target() or ->target_index() for the same policy. * - * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch() - * callback to indicate an error condition, the hardware configuration must be - * preserved. + * Returns the actual frequency set for the CPU. + * + * If 0 is returned by the driver's ->fast_switch() callback to indicate an + * error condition, the hardware configuration must be preserved. */ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) -- cgit v1.2.3 From 862e0104f4d1a489517cb1075ce974c0a1fd5a97 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 9 Aug 2017 18:12:38 +0800 Subject: cpufreq: mediatek: add cleanups with the more generic naming Since more MediaTek SoCs can be supported with the cpufreq driver and not limited to MT8173, a couple of cleanups are done here with renaming those functions and related structures with "mtk" instead of "mt8173". Signed-off-by: Sean Wang Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/mediatek-cpufreq.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 008e088fc246..9fa9291b907c 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -507,7 +507,7 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy) return 0; } -static struct cpufreq_driver mt8173_cpufreq_driver = { +static struct cpufreq_driver mtk_cpufreq_driver = { .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_HAVE_GOVERNOR_PER_POLICY, .verify = cpufreq_generic_frequency_table_verify, @@ -520,7 +520,7 @@ static struct cpufreq_driver mt8173_cpufreq_driver = { .attr = cpufreq_generic_attr, }; -static int mt8173_cpufreq_probe(struct platform_device *pdev) +static int mtk_cpufreq_probe(struct platform_device *pdev) { struct mtk_cpu_dvfs_info *info, *tmp; int cpu, ret; @@ -547,7 +547,7 @@ static int mt8173_cpufreq_probe(struct platform_device *pdev) list_add(&info->list_head, &dvfs_info_list); } - ret = cpufreq_register_driver(&mt8173_cpufreq_driver); + ret = cpufreq_register_driver(&mtk_cpufreq_driver); if (ret) { dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n"); goto release_dvfs_info_list; @@ -564,15 +564,15 @@ release_dvfs_info_list: return ret; } -static struct platform_driver mt8173_cpufreq_platdrv = { +static struct platform_driver mtk_cpufreq_platdrv = { .driver = { - .name = "mt8173-cpufreq", + .name = "mtk-cpufreq", }, - .probe = mt8173_cpufreq_probe, + .probe = mtk_cpufreq_probe, }; /* List of machines supported by this driver */ -static const struct of_device_id mt8173_cpufreq_machines[] __initconst = { +static const struct of_device_id mtk_cpufreq_machines[] __initconst = { { .compatible = "mediatek,mt2701", }, { .compatible = "mediatek,mt7623", }, { .compatible = "mediatek,mt817x", }, @@ -582,7 +582,7 @@ static const struct of_device_id mt8173_cpufreq_machines[] __initconst = { { } }; -static int __init mt8173_cpufreq_driver_init(void) +static int __init mtk_cpufreq_driver_init(void) { struct device_node *np; const struct of_device_id *match; @@ -593,14 +593,14 @@ static int __init mt8173_cpufreq_driver_init(void) if (!np) return -ENODEV; - match = of_match_node(mt8173_cpufreq_machines, np); + match = of_match_node(mtk_cpufreq_machines, np); of_node_put(np); if (!match) { - pr_warn("Machine is not compatible with mt8173-cpufreq\n"); + pr_warn("Machine is not compatible with mtk-cpufreq\n"); return -ENODEV; } - err = platform_driver_register(&mt8173_cpufreq_platdrv); + err = platform_driver_register(&mtk_cpufreq_platdrv); if (err) return err; @@ -610,7 +610,7 @@ static int __init mt8173_cpufreq_driver_init(void) * and the device registration codes are put here to handle defer * probing. */ - pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0); + pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0); if (IS_ERR(pdev)) { pr_err("failed to register mtk-cpufreq platform device\n"); return PTR_ERR(pdev); @@ -618,4 +618,4 @@ static int __init mt8173_cpufreq_driver_init(void) return 0; } -device_initcall(mt8173_cpufreq_driver_init); +device_initcall(mtk_cpufreq_driver_init); -- cgit v1.2.3 From ccc03d86e2c81667664f805f4e1b84c80aa7b3bf Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 9 Aug 2017 18:12:39 +0800 Subject: cpufreq: mediatek: add support of cpufreq to MT7622 SoC MT7622 is a 64-bit ARMv8 based dual-core SoC (2 * Cortex-A53) with a single cluster. The hardware is also compatible with the current driver, so add MT7622 as one of the compatible string list. Signed-off-by: Sean Wang Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/mediatek-cpufreq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 9fa9291b907c..18c4bd9a5c65 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -574,6 +574,7 @@ static struct platform_driver mtk_cpufreq_platdrv = { /* List of machines supported by this driver */ static const struct of_device_id mtk_cpufreq_machines[] __initconst = { { .compatible = "mediatek,mt2701", }, + { .compatible = "mediatek,mt7622", }, { .compatible = "mediatek,mt7623", }, { .compatible = "mediatek,mt817x", }, { .compatible = "mediatek,mt8173", }, -- cgit v1.2.3 From 690cbb90a709c1b9389c6cb8e1978e77553ce0fb Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 10 Aug 2017 00:13:07 +0200 Subject: PM / s2idle: Rename PM_SUSPEND_FREEZE to PM_SUSPEND_TO_IDLE To make it clear that the symbol in question refers to suspend-to-idle, rename it from PM_SUSPEND_FREEZE to PM_SUSPEND_TO_IDLE. Signed-off-by: Rafael J. Wysocki --- drivers/acpi/sleep.c | 2 +- drivers/regulator/of_regulator.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index b363283dfcd9..a0a6fd10fb5f 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -719,7 +719,7 @@ static int lps0_device_attach(struct acpi_device *adev, * suspend mode was not set from the command line. */ if (mem_sleep_default > PM_SUSPEND_MEM) - mem_sleep_current = PM_SUSPEND_FREEZE; + mem_sleep_current = PM_SUSPEND_TO_IDLE; } acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n", diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 96bf75458da5..860480ecf2be 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -150,7 +150,7 @@ static void of_get_regulation_constraints(struct device_node *np, suspend_state = &constraints->state_disk; break; case PM_SUSPEND_ON: - case PM_SUSPEND_FREEZE: + case PM_SUSPEND_TO_IDLE: case PM_SUSPEND_STANDBY: default: continue; -- cgit v1.2.3 From f02f4f9d826590f1ef1b087374d3e3cfb7949eac Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 10 Aug 2017 00:13:56 +0200 Subject: PM / s2idle: Rename freeze_state enum and related items Rename the freeze_state enum representing the suspend-to-idle state machine states to s2idle_states and rename the related variables and functions accordingly. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/wakeup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 144e6d8fafc8..34aebf3e460b 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -863,7 +863,7 @@ bool pm_wakeup_pending(void) void pm_system_wakeup(void) { atomic_inc(&pm_abort_suspend); - freeze_wake(); + s2idle_wake(); } EXPORT_SYMBOL_GPL(pm_system_wakeup); -- cgit v1.2.3 From 28ba086ed30fb3fb714598aa029b894c3754fa7b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 10 Aug 2017 00:14:45 +0200 Subject: PM / s2idle: Rename ->enter_freeze to ->enter_s2idle Rename the ->enter_freeze cpuidle driver callback to ->enter_s2idle to make it clear that it is used for entering suspend-to-idle and rename the related functions, variables and so on accordingly. Signed-off-by: Rafael J. Wysocki --- drivers/acpi/processor_idle.c | 6 +- drivers/cpuidle/cpuidle.c | 18 ++-- drivers/cpuidle/dt_idle_states.c | 4 +- drivers/idle/intel_idle.c | 180 +++++++++++++++++++-------------------- 4 files changed, 104 insertions(+), 104 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 5c8aa9cf62d7..a9eb9d654b7a 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -789,7 +789,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev, return index; } -static void acpi_idle_enter_freeze(struct cpuidle_device *dev, +static void acpi_idle_enter_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -867,14 +867,14 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) drv->safe_state_index = count; } /* - * Halt-induced C1 is not good for ->enter_freeze, because it + * Halt-induced C1 is not good for ->enter_s2idle, because it * re-enables interrupts on exit. Moreover, C1 is generally not * particularly interesting from the suspend-to-idle angle, so * avoid C1 and the situations in which we may need to fall back * to it altogether. */ if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) - state->enter_freeze = acpi_idle_enter_freeze; + state->enter_s2idle = acpi_idle_enter_s2idle; count++; if (count == CPUIDLE_STATE_MAX) diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 60bb64f4329d..484cc8909d5c 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -77,7 +77,7 @@ static int find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, unsigned int max_latency, unsigned int forbidden_flags, - bool freeze) + bool s2idle) { unsigned int latency_req = 0; int i, ret = 0; @@ -89,7 +89,7 @@ static int find_deepest_state(struct cpuidle_driver *drv, if (s->disabled || su->disable || s->exit_latency <= latency_req || s->exit_latency > max_latency || (s->flags & forbidden_flags) - || (freeze && !s->enter_freeze)) + || (s2idle && !s->enter_s2idle)) continue; latency_req = s->exit_latency; @@ -128,7 +128,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv, } #ifdef CONFIG_SUSPEND -static void enter_freeze_proper(struct cpuidle_driver *drv, +static void enter_s2idle_proper(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) { /* @@ -143,7 +143,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv, * suspended is generally unsafe. */ stop_critical_timings(); - drv->states[index].enter_freeze(dev, drv, index); + drv->states[index].enter_s2idle(dev, drv, index); WARN_ON(!irqs_disabled()); /* * timekeeping_resume() that will be called by tick_unfreeze() for the @@ -155,25 +155,25 @@ static void enter_freeze_proper(struct cpuidle_driver *drv, } /** - * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. + * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle. * @drv: cpuidle driver for the given CPU. * @dev: cpuidle device for the given CPU. * - * If there are states with the ->enter_freeze callback, find the deepest of + * If there are states with the ->enter_s2idle callback, find the deepest of * them and enter it with frozen tick. */ -int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) +int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev) { int index; /* - * Find the deepest state with ->enter_freeze present, which guarantees + * Find the deepest state with ->enter_s2idle present, which guarantees * that interrupts won't be enabled when it exits and allows the tick to * be frozen safely. */ index = find_deepest_state(drv, dev, UINT_MAX, 0, true); if (index > 0) - enter_freeze_proper(drv, dev, index); + enter_s2idle_proper(drv, dev, index); return index; } diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index ae8eb0359889..5ba9fab64e7d 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -41,9 +41,9 @@ static int init_state_node(struct cpuidle_state *idle_state, /* * Since this is not a "coupled" state, it's safe to assume interrupts * won't be enabled when it exits allowing the tick to be frozen - * safely. So enter() can be also enter_freeze() callback. + * safely. So enter() can be also enter_s2idle() callback. */ - idle_state->enter_freeze = match_id->data; + idle_state->enter_s2idle = match_id->data; err = of_property_read_u32(state_node, "wakeup-latency-us", &idle_state->exit_latency); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index c2ae819a871c..474cc4330613 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -97,7 +97,7 @@ static const struct idle_cpu *icpu; static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; static int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); -static void intel_idle_freeze(struct cpuidle_device *dev, +static void intel_idle_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); static struct cpuidle_state *cpuidle_state_table; @@ -132,7 +132,7 @@ static struct cpuidle_state nehalem_cstates[] = { .exit_latency = 3, .target_residency = 6, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -140,7 +140,7 @@ static struct cpuidle_state nehalem_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -148,7 +148,7 @@ static struct cpuidle_state nehalem_cstates[] = { .exit_latency = 20, .target_residency = 80, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] = { .exit_latency = 200, .target_residency = 800, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -169,7 +169,7 @@ static struct cpuidle_state snb_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -177,7 +177,7 @@ static struct cpuidle_state snb_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -185,7 +185,7 @@ static struct cpuidle_state snb_cstates[] = { .exit_latency = 80, .target_residency = 211, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -193,7 +193,7 @@ static struct cpuidle_state snb_cstates[] = { .exit_latency = 104, .target_residency = 345, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", .desc = "MWAIT 0x30", @@ -201,7 +201,7 @@ static struct cpuidle_state snb_cstates[] = { .exit_latency = 109, .target_residency = 345, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -214,7 +214,7 @@ static struct cpuidle_state byt_cstates[] = { .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6N", .desc = "MWAIT 0x58", @@ -222,7 +222,7 @@ static struct cpuidle_state byt_cstates[] = { .exit_latency = 300, .target_residency = 275, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6S", .desc = "MWAIT 0x52", @@ -230,7 +230,7 @@ static struct cpuidle_state byt_cstates[] = { .exit_latency = 500, .target_residency = 560, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", .desc = "MWAIT 0x60", @@ -238,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = { .exit_latency = 1200, .target_residency = 4000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7S", .desc = "MWAIT 0x64", @@ -246,7 +246,7 @@ static struct cpuidle_state byt_cstates[] = { .exit_latency = 10000, .target_residency = 20000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -259,7 +259,7 @@ static struct cpuidle_state cht_cstates[] = { .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6N", .desc = "MWAIT 0x58", @@ -267,7 +267,7 @@ static struct cpuidle_state cht_cstates[] = { .exit_latency = 80, .target_residency = 275, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6S", .desc = "MWAIT 0x52", @@ -275,7 +275,7 @@ static struct cpuidle_state cht_cstates[] = { .exit_latency = 200, .target_residency = 560, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", .desc = "MWAIT 0x60", @@ -283,7 +283,7 @@ static struct cpuidle_state cht_cstates[] = { .exit_latency = 1200, .target_residency = 4000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7S", .desc = "MWAIT 0x64", @@ -291,7 +291,7 @@ static struct cpuidle_state cht_cstates[] = { .exit_latency = 10000, .target_residency = 20000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -304,7 +304,7 @@ static struct cpuidle_state ivb_cstates[] = { .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -312,7 +312,7 @@ static struct cpuidle_state ivb_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -320,7 +320,7 @@ static struct cpuidle_state ivb_cstates[] = { .exit_latency = 59, .target_residency = 156, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -328,7 +328,7 @@ static struct cpuidle_state ivb_cstates[] = { .exit_latency = 80, .target_residency = 300, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", .desc = "MWAIT 0x30", @@ -336,7 +336,7 @@ static struct cpuidle_state ivb_cstates[] = { .exit_latency = 87, .target_residency = 300, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -349,7 +349,7 @@ static struct cpuidle_state ivt_cstates[] = { .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -357,7 +357,7 @@ static struct cpuidle_state ivt_cstates[] = { .exit_latency = 10, .target_residency = 80, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -365,7 +365,7 @@ static struct cpuidle_state ivt_cstates[] = { .exit_latency = 59, .target_residency = 156, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -373,7 +373,7 @@ static struct cpuidle_state ivt_cstates[] = { .exit_latency = 82, .target_residency = 300, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -386,7 +386,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -394,7 +394,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .exit_latency = 10, .target_residency = 250, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -402,7 +402,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .exit_latency = 59, .target_residency = 300, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -410,7 +410,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .exit_latency = 84, .target_residency = 400, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -423,7 +423,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -431,7 +431,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .exit_latency = 10, .target_residency = 500, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -439,7 +439,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .exit_latency = 59, .target_residency = 600, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -447,7 +447,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .exit_latency = 88, .target_residency = 700, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -460,7 +460,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -468,7 +468,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -476,7 +476,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 33, .target_residency = 100, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -484,7 +484,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 133, .target_residency = 400, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7s", .desc = "MWAIT 0x32", @@ -492,7 +492,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 166, .target_residency = 500, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", .desc = "MWAIT 0x40", @@ -500,7 +500,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 300, .target_residency = 900, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", .desc = "MWAIT 0x50", @@ -508,7 +508,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 600, .target_residency = 1800, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", .desc = "MWAIT 0x60", @@ -516,7 +516,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 2600, .target_residency = 7700, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -528,7 +528,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -536,7 +536,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -544,7 +544,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 40, .target_residency = 100, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -552,7 +552,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 133, .target_residency = 400, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7s", .desc = "MWAIT 0x32", @@ -560,7 +560,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 166, .target_residency = 500, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", .desc = "MWAIT 0x40", @@ -568,7 +568,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 300, .target_residency = 900, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", .desc = "MWAIT 0x50", @@ -576,7 +576,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 600, .target_residency = 1800, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", .desc = "MWAIT 0x60", @@ -584,7 +584,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 2600, .target_residency = 7700, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -597,7 +597,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -605,7 +605,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -613,7 +613,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 70, .target_residency = 100, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -621,7 +621,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 85, .target_residency = 200, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7s", .desc = "MWAIT 0x33", @@ -629,7 +629,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 124, .target_residency = 800, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", .desc = "MWAIT 0x40", @@ -637,7 +637,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 200, .target_residency = 800, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", .desc = "MWAIT 0x50", @@ -645,7 +645,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 480, .target_residency = 5000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", .desc = "MWAIT 0x60", @@ -653,7 +653,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 890, .target_residency = 5000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -666,7 +666,7 @@ static struct cpuidle_state skx_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -674,7 +674,7 @@ static struct cpuidle_state skx_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -682,7 +682,7 @@ static struct cpuidle_state skx_cstates[] = { .exit_latency = 133, .target_residency = 600, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -695,7 +695,7 @@ static struct cpuidle_state atom_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C2", .desc = "MWAIT 0x10", @@ -703,7 +703,7 @@ static struct cpuidle_state atom_cstates[] = { .exit_latency = 20, .target_residency = 80, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C4", .desc = "MWAIT 0x30", @@ -711,7 +711,7 @@ static struct cpuidle_state atom_cstates[] = { .exit_latency = 100, .target_residency = 400, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x52", @@ -719,7 +719,7 @@ static struct cpuidle_state atom_cstates[] = { .exit_latency = 140, .target_residency = 560, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -731,7 +731,7 @@ static struct cpuidle_state tangier_cstates[] = { .exit_latency = 1, .target_residency = 4, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C4", .desc = "MWAIT 0x30", @@ -739,7 +739,7 @@ static struct cpuidle_state tangier_cstates[] = { .exit_latency = 100, .target_residency = 400, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x52", @@ -747,7 +747,7 @@ static struct cpuidle_state tangier_cstates[] = { .exit_latency = 140, .target_residency = 560, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", .desc = "MWAIT 0x60", @@ -755,7 +755,7 @@ static struct cpuidle_state tangier_cstates[] = { .exit_latency = 1200, .target_residency = 4000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", .desc = "MWAIT 0x64", @@ -763,7 +763,7 @@ static struct cpuidle_state tangier_cstates[] = { .exit_latency = 10000, .target_residency = 20000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -775,7 +775,7 @@ static struct cpuidle_state avn_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x51", @@ -783,7 +783,7 @@ static struct cpuidle_state avn_cstates[] = { .exit_latency = 15, .target_residency = 45, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -795,7 +795,7 @@ static struct cpuidle_state knl_cstates[] = { .exit_latency = 1, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze }, + .enter_s2idle = intel_idle_s2idle }, { .name = "C6", .desc = "MWAIT 0x10", @@ -803,7 +803,7 @@ static struct cpuidle_state knl_cstates[] = { .exit_latency = 120, .target_residency = 500, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze }, + .enter_s2idle = intel_idle_s2idle }, { .enter = NULL } }; @@ -816,7 +816,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -824,7 +824,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -832,7 +832,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 133, .target_residency = 133, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7s", .desc = "MWAIT 0x31", @@ -840,7 +840,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 155, .target_residency = 155, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", .desc = "MWAIT 0x40", @@ -848,7 +848,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 1000, .target_residency = 1000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", .desc = "MWAIT 0x50", @@ -856,7 +856,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 2000, .target_residency = 2000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", .desc = "MWAIT 0x60", @@ -864,7 +864,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 10000, .target_residency = 10000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -877,7 +877,7 @@ static struct cpuidle_state dnv_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -885,7 +885,7 @@ static struct cpuidle_state dnv_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -893,7 +893,7 @@ static struct cpuidle_state dnv_cstates[] = { .exit_latency = 50, .target_residency = 500, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -936,12 +936,12 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, } /** - * intel_idle_freeze - simplified "enter" callback routine for suspend-to-idle + * intel_idle_s2idle - simplified "enter" callback routine for suspend-to-idle * @dev: cpuidle_device * @drv: cpuidle driver * @index: state index */ -static void intel_idle_freeze(struct cpuidle_device *dev, +static void intel_idle_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long ecx = 1; /* break on interrupt flag */ @@ -1337,7 +1337,7 @@ static void __init intel_idle_cpuidle_driver_init(void) int num_substates, mwait_hint, mwait_cstate; if ((cpuidle_state_table[cstate].enter == NULL) && - (cpuidle_state_table[cstate].enter_freeze == NULL)) + (cpuidle_state_table[cstate].enter_s2idle == NULL)) break; if (cstate + 1 > max_cstate) { -- cgit v1.2.3 From 23d5855f4774f4f7c246a67057ecacc904696d8a Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 10 Aug 2017 00:15:30 +0200 Subject: PM / s2idle: Rename platform operations structure Rename struct platform_freeze_ops to platform_s2idle_ops to make it clear that the callbacks in it are used during suspend-to-idle suspend/resume transitions and rename the related functions, variables and so on accordingly. Signed-off-by: Rafael J. Wysocki --- drivers/acpi/sleep.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index a0a6fd10fb5f..f7a8abbeac6e 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -737,14 +737,14 @@ static struct acpi_scan_handler lps0_handler = { .attach = lps0_device_attach, }; -static int acpi_freeze_begin(void) +static int acpi_s2idle_begin(void) { acpi_scan_lock_acquire(); s2idle_in_progress = true; return 0; } -static int acpi_freeze_prepare(void) +static int acpi_s2idle_prepare(void) { if (lps0_device_handle) { acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF); @@ -764,7 +764,7 @@ static int acpi_freeze_prepare(void) return 0; } -static void acpi_freeze_wake(void) +static void acpi_s2idle_wake(void) { /* * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means @@ -778,7 +778,7 @@ static void acpi_freeze_wake(void) } } -static void acpi_freeze_sync(void) +static void acpi_s2idle_sync(void) { /* * Process all pending events in case there are any wakeup ones. @@ -791,7 +791,7 @@ static void acpi_freeze_sync(void) s2idle_wakeup = false; } -static void acpi_freeze_restore(void) +static void acpi_s2idle_restore(void) { if (acpi_sci_irq_valid()) disable_irq_wake(acpi_sci_irq); @@ -804,19 +804,19 @@ static void acpi_freeze_restore(void) } } -static void acpi_freeze_end(void) +static void acpi_s2idle_end(void) { s2idle_in_progress = false; acpi_scan_lock_release(); } -static const struct platform_freeze_ops acpi_freeze_ops = { - .begin = acpi_freeze_begin, - .prepare = acpi_freeze_prepare, - .wake = acpi_freeze_wake, - .sync = acpi_freeze_sync, - .restore = acpi_freeze_restore, - .end = acpi_freeze_end, +static const struct platform_s2idle_ops acpi_s2idle_ops = { + .begin = acpi_s2idle_begin, + .prepare = acpi_s2idle_prepare, + .wake = acpi_s2idle_wake, + .sync = acpi_s2idle_sync, + .restore = acpi_s2idle_restore, + .end = acpi_s2idle_end, }; static void acpi_sleep_suspend_setup(void) @@ -831,7 +831,7 @@ static void acpi_sleep_suspend_setup(void) &acpi_suspend_ops_old : &acpi_suspend_ops); acpi_scan_add_handler(&lps0_handler); - freeze_set_ops(&acpi_freeze_ops); + s2idle_set_ops(&acpi_s2idle_ops); } #else /* !CONFIG_SUSPEND */ -- cgit v1.2.3 From b20a3f3d8a3516dcde2d1fb130169f336bc93023 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 10 Aug 2017 15:58:57 +0100 Subject: cpufreq: remove setting of policy->cpu in policy->cpus during init policy->cpu is copied into policy->cpus in cpufreq_online() before calling into cpufreq_driver->init(). So there's no need to set the same in the individual driver init() functions again. This patch removes the redundant setting of policy->cpu in policy->cpus in intel_pstate and cppc drivers. Reported-by: Viresh Kumar Signed-off-by: Sudeep Holla Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cppc_cpufreq.c | 1 - drivers/cpufreq/intel_pstate.c | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 10be285c9055..a1c3025f9df7 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -172,7 +172,6 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) return -EFAULT; } - cpumask_set_cpu(policy->cpu, policy->cpus); cpu->cur_policy = policy; /* Set policy->cur to max now. The governors will adjust later. */ diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index c1100a3e3325..5c20d2c56bd7 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2124,7 +2124,6 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.max_freq *= cpu->pstate.scaling; intel_pstate_init_acpi_perf_limits(policy); - cpumask_set_cpu(policy->cpu, policy->cpus); policy->fast_switch_possible = true; -- cgit v1.2.3 From bea2ebca6b917e46d0c585f416f1326fdf41e69b Mon Sep 17 00:00:00 2001 From: Khiem Nguyen Date: Fri, 11 Aug 2017 17:36:57 +0200 Subject: cpufreq: dt: Add r8a7796 support to to use generic cpufreq driver This patch adds the r8a7796 support the generic cpufreq driver by adding an appropriate compat string. This is in keeping with support for other Renesas ARM and arm64 based SoCs. Signed-off-by: Khiem Nguyen [simon: new changelog] Signed-off-by: Simon Horman Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt-platdev.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index bcee384b3251..233e18ad3948 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -68,6 +68,7 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "renesas,r8a7793", }, { .compatible = "renesas,r8a7794", }, { .compatible = "renesas,r8a7795", }, + { .compatible = "renesas,r8a7796", }, { .compatible = "renesas,sh73a0", }, { .compatible = "rockchip,rk2928", }, -- cgit v1.2.3 From a804d51004ef69ff362509ae813b3f455c3ab7b4 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 13 Aug 2017 15:10:06 +0530 Subject: cpufreq: Loongson2: constify platform_device_id platform_device_id are not supposed to change at runtime. All functions working with platform_device_id provided by work with const platform_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/loongson2_cpufreq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index 9ac27b22476c..da344696beed 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -114,7 +114,7 @@ static struct cpufreq_driver loongson2_cpufreq_driver = { .attr = cpufreq_generic_attr, }; -static struct platform_device_id platform_device_ids[] = { +static const struct platform_device_id platform_device_ids[] = { { .name = "loongson2_cpufreq", }, -- cgit v1.2.3 From ff6c349f74218eb03a676f46a2cffe69fc5b3d4d Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 16 Aug 2017 10:19:12 +0200 Subject: cpufreq: enable the DT cpufreq driver on the Ux500 This enables the generic DT and OPP-based cpufreq driver on the ST-Ericsson Ux500 series. Acked-by: Viresh Kumar Signed-off-by: Linus Walleij Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt-platdev.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 233e18ad3948..8b88768b565d 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -89,6 +89,11 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "socionext,uniphier-ld11", }, { .compatible = "socionext,uniphier-ld20", }, + { .compatible = "st-ericsson,u8500", }, + { .compatible = "st-ericsson,u8540", }, + { .compatible = "st-ericsson,u9500", }, + { .compatible = "st-ericsson,u9540", }, + { .compatible = "ti,omap2", }, { .compatible = "ti,omap3", }, { .compatible = "ti,omap4", }, -- cgit v1.2.3 From 726fb6b4f2a82a14a906f39bdabac4863b87c01a Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 15 Aug 2017 18:16:59 -0700 Subject: ACPI / PM: Check low power idle constraints for debug only For SoC to achieve its lowest power platform idle state a set of hardware preconditions must be met. These preconditions or constraints can be obtained by issuing a device specific method (_DSM) with function "1". Refer to the document provided in the link below. Here during initialization (from attach() callback of LPS0 device), invoke function 1 to get the device constraints. Each enabled constraint is stored in a table. The devices in this table are used to check whether they were in required minimum state, while entering suspend. This check is done from platform freeze wake() callback, only when /sys/power/pm_debug_messages attribute is non zero. If any constraint is not met and device is ACPI power managed then it prints the device information to kernel logs. Also if debug is enabled in acpi/sleep.c, the constraint table and state of each device on wake is dumped in kernel logs. Since pm_debug_messages_on setting is used as condition to check constraints outside kernel/power/main.c, pm_debug_messages_on is changed to a global variable. Link: http://www.uefi.org/sites/default/files/resources/Intel_ACPI_Low_Power_S0_Idle.pdf Signed-off-by: Srinivas Pandruvada Signed-off-by: Rafael J. Wysocki --- drivers/acpi/sleep.c | 168 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) (limited to 'drivers') diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index f7a8abbeac6e..61b2e9f17c4e 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -669,6 +669,7 @@ static const struct acpi_device_id lps0_device_ids[] = { #define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66" +#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1 #define ACPI_LPS0_SCREEN_OFF 3 #define ACPI_LPS0_SCREEN_ON 4 #define ACPI_LPS0_ENTRY 5 @@ -680,6 +681,166 @@ static acpi_handle lps0_device_handle; static guid_t lps0_dsm_guid; static char lps0_dsm_func_mask; +/* Device constraint entry structure */ +struct lpi_device_info { + char *name; + int enabled; + union acpi_object *package; +}; + +/* Constraint package structure */ +struct lpi_device_constraint { + int uid; + int min_dstate; + int function_states; +}; + +struct lpi_constraints { + acpi_handle handle; + int min_dstate; +}; + +static struct lpi_constraints *lpi_constraints_table; +static int lpi_constraints_table_size; + +static void lpi_device_get_constraints(void) +{ + union acpi_object *out_obj; + int i; + + out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid, + 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS, + NULL, ACPI_TYPE_PACKAGE); + + acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n", + out_obj ? "successful" : "failed"); + + if (!out_obj) + return; + + lpi_constraints_table = kcalloc(out_obj->package.count, + sizeof(*lpi_constraints_table), + GFP_KERNEL); + if (!lpi_constraints_table) + goto free_acpi_buffer; + + acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n"); + + for (i = 0; i < out_obj->package.count; i++) { + struct lpi_constraints *constraint; + acpi_status status; + union acpi_object *package = &out_obj->package.elements[i]; + struct lpi_device_info info = { }; + int package_count = 0, j; + + if (!package) + continue; + + for (j = 0; j < package->package.count; ++j) { + union acpi_object *element = + &(package->package.elements[j]); + + switch (element->type) { + case ACPI_TYPE_INTEGER: + info.enabled = element->integer.value; + break; + case ACPI_TYPE_STRING: + info.name = element->string.pointer; + break; + case ACPI_TYPE_PACKAGE: + package_count = element->package.count; + info.package = element->package.elements; + break; + } + } + + if (!info.enabled || !info.package || !info.name) + continue; + + constraint = &lpi_constraints_table[lpi_constraints_table_size]; + + status = acpi_get_handle(NULL, info.name, &constraint->handle); + if (ACPI_FAILURE(status)) + continue; + + acpi_handle_debug(lps0_device_handle, + "index:%d Name:%s\n", i, info.name); + + constraint->min_dstate = -1; + + for (j = 0; j < package_count; ++j) { + union acpi_object *info_obj = &info.package[j]; + union acpi_object *cnstr_pkg; + union acpi_object *obj; + struct lpi_device_constraint dev_info; + + switch (info_obj->type) { + case ACPI_TYPE_INTEGER: + /* version */ + break; + case ACPI_TYPE_PACKAGE: + if (info_obj->package.count < 2) + break; + + cnstr_pkg = info_obj->package.elements; + obj = &cnstr_pkg[0]; + dev_info.uid = obj->integer.value; + obj = &cnstr_pkg[1]; + dev_info.min_dstate = obj->integer.value; + + acpi_handle_debug(lps0_device_handle, + "uid:%d min_dstate:%s\n", + dev_info.uid, + acpi_power_state_string(dev_info.min_dstate)); + + constraint->min_dstate = dev_info.min_dstate; + break; + } + } + + if (constraint->min_dstate < 0) { + acpi_handle_debug(lps0_device_handle, + "Incomplete constraint defined\n"); + continue; + } + + lpi_constraints_table_size++; + } + + acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n"); + +free_acpi_buffer: + ACPI_FREE(out_obj); +} + +static void lpi_check_constraints(void) +{ + int i; + + for (i = 0; i < lpi_constraints_table_size; ++i) { + struct acpi_device *adev; + + if (acpi_bus_get_device(lpi_constraints_table[i].handle, &adev)) + continue; + + acpi_handle_debug(adev->handle, + "LPI: required min power state:%s current power state:%s\n", + acpi_power_state_string(lpi_constraints_table[i].min_dstate), + acpi_power_state_string(adev->power.state)); + + if (!adev->flags.power_manageable) { + acpi_handle_info(adev->handle, "LPI: Device not power manageble\n"); + continue; + } + + if (adev->power.state < lpi_constraints_table[i].min_dstate) + acpi_handle_info(adev->handle, + "LPI: Constraint not met; min power state:%s current power state:%s\n", + acpi_power_state_string(lpi_constraints_table[i].min_dstate), + acpi_power_state_string(adev->power.state)); + } +} + static void acpi_sleep_run_lps0_dsm(unsigned int func) { union acpi_object *out_obj; @@ -729,6 +890,9 @@ static int lps0_device_attach(struct acpi_device *adev, "_DSM function 0 evaluation failed\n"); } ACPI_FREE(out_obj); + + lpi_device_get_constraints(); + return 0; } @@ -766,6 +930,10 @@ static int acpi_s2idle_prepare(void) static void acpi_s2idle_wake(void) { + + if (pm_debug_messages_on) + lpi_check_constraints(); + /* * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means * that the SCI has triggered while suspended, so cancel the wakeup in -- cgit v1.2.3 From 836a1e25dff8b23d5d54022cf84f841dae2387c7 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 22 Aug 2017 15:17:40 +0200 Subject: mfd: db8500-prcmu: Get rid of cpufreq dependency The ARMSS clock, also known as the operating point of the CPU, should not cross-depend on cpufreq like this. Move the code to use just frequencies and remove the false frequency (1GHz) and put in the actual frequency provided by the ARMSS clock (998400000 Hz) as part of the process. After this and the related cpufreq patch, the DB8500 will simply use the standard DT cpufreq driver to change the operating points through the common clock framework using the ARMSS clock. Acked-by: Lee Jones Signed-off-by: Linus Walleij Signed-off-by: Rafael J. Wysocki --- drivers/mfd/db8500-prcmu.c | 62 ++++++++++++++++------------------------------ 1 file changed, 21 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 5c739ac752e8..5970b8def548 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include "dbx500-prcmu-regs.h" @@ -1692,32 +1691,27 @@ static long round_clock_rate(u8 clock, unsigned long rate) return rounded_rate; } -/* CPU FREQ table, may be changed due to if MAX_OPP is supported. */ -static struct cpufreq_frequency_table db8500_cpufreq_table[] = { - { .frequency = 200000, .driver_data = ARM_EXTCLK,}, - { .frequency = 400000, .driver_data = ARM_50_OPP,}, - { .frequency = 800000, .driver_data = ARM_100_OPP,}, - { .frequency = CPUFREQ_TABLE_END,}, /* To be used for MAX_OPP. */ - { .frequency = CPUFREQ_TABLE_END,}, +static const unsigned long armss_freqs[] = { + 200000000, + 400000000, + 800000000, + 998400000 }; static long round_armss_rate(unsigned long rate) { - struct cpufreq_frequency_table *pos; - long freq = 0; - - /* cpufreq table frequencies is in KHz. */ - rate = rate / 1000; + unsigned long freq = 0; + int i; /* Find the corresponding arm opp from the cpufreq table. */ - cpufreq_for_each_entry(pos, db8500_cpufreq_table) { - freq = pos->frequency; - if (freq == rate) + for (i = 0; i < ARRAY_SIZE(armss_freqs); i++) { + freq = armss_freqs[i]; + if (rate <= freq) break; } /* Return the last valid value, even if a match was not found. */ - return freq * 1000; + return freq; } #define MIN_PLL_VCO_RATE 600000000ULL @@ -1854,21 +1848,23 @@ static void set_clock_rate(u8 clock, unsigned long rate) static int set_armss_rate(unsigned long rate) { - struct cpufreq_frequency_table *pos; - - /* cpufreq table frequencies is in KHz. */ - rate = rate / 1000; + unsigned long freq; + u8 opps[] = { ARM_EXTCLK, ARM_50_OPP, ARM_100_OPP, ARM_MAX_OPP }; + int i; /* Find the corresponding arm opp from the cpufreq table. */ - cpufreq_for_each_entry(pos, db8500_cpufreq_table) - if (pos->frequency == rate) + for (i = 0; i < ARRAY_SIZE(armss_freqs); i++) { + freq = armss_freqs[i]; + if (rate == freq) break; + } - if (pos->frequency != rate) + if (rate != freq) return -EINVAL; /* Set the new arm opp. */ - return db8500_prcmu_set_arm_opp(pos->driver_data); + pr_debug("SET ARM OPP 0x%02x\n", opps[i]); + return db8500_prcmu_set_arm_opp(opps[i]); } static int set_plldsi_rate(unsigned long rate) @@ -3048,12 +3044,6 @@ static const struct mfd_cell db8500_prcmu_devs[] = { .platform_data = &db8500_regulators, .pdata_size = sizeof(db8500_regulators), }, - { - .name = "cpufreq-ux500", - .of_compatible = "stericsson,cpufreq-ux500", - .platform_data = &db8500_cpufreq_table, - .pdata_size = sizeof(db8500_cpufreq_table), - }, { .name = "cpuidle-dbx500", .of_compatible = "stericsson,cpuidle-dbx500", @@ -3067,14 +3057,6 @@ static const struct mfd_cell db8500_prcmu_devs[] = { }, }; -static void db8500_prcmu_update_cpufreq(void) -{ - if (prcmu_has_arm_maxopp()) { - db8500_cpufreq_table[3].frequency = 1000000; - db8500_cpufreq_table[3].driver_data = ARM_MAX_OPP; - } -} - static int db8500_prcmu_register_ab8500(struct device *parent) { struct device_node *np; @@ -3160,8 +3142,6 @@ static int db8500_prcmu_probe(struct platform_device *pdev) prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); - db8500_prcmu_update_cpufreq(); - err = mfd_add_devices(&pdev->dev, 0, common_prcmu_devs, ARRAY_SIZE(common_prcmu_devs), NULL, 0, db8500_irq_domain); if (err) { -- cgit v1.2.3 From 919096f7f3e55d6bb783e6b42851d720121d6fa7 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 16 Aug 2017 10:19:14 +0200 Subject: cpufreq: dbx500: Delete obsolete driver We have moved the Ux500 over to use the generic DT based cpufreq driver, so delete the old custom driver. At the same time select CPUFREQ_DT from the machine's Kconfig in order to satisfy the "default ARCH_U8500" selection on the old driver. Acked-by: Viresh Kumar Signed-off-by: Linus Walleij Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/Kconfig.arm | 9 ---- drivers/cpufreq/Makefile | 1 - drivers/cpufreq/dbx500-cpufreq.c | 103 --------------------------------------- 3 files changed, 113 deletions(-) delete mode 100644 drivers/cpufreq/dbx500-cpufreq.c (limited to 'drivers') diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 4c277d11831b..bdce4488ded1 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -71,15 +71,6 @@ config ARM_HIGHBANK_CPUFREQ If in doubt, say N. -config ARM_DB8500_CPUFREQ - tristate "ST-Ericsson DB8500 cpufreq" if COMPILE_TEST && !ARCH_U8500 - default ARCH_U8500 - depends on HAS_IOMEM - depends on !CPU_THERMAL || THERMAL - help - This adds the CPUFreq driver for ST-Ericsson Ux500 (DB8500) SoC - series. - config ARM_IMX6Q_CPUFREQ tristate "Freescale i.MX6 cpufreq support" depends on ARCH_MXC diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 227922b0d42c..c7af9b2a255e 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -53,7 +53,6 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o -obj-$(CONFIG_ARM_DB8500_CPUFREQ) += dbx500-cpufreq.o obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c deleted file mode 100644 index 4ee0431579c1..000000000000 --- a/drivers/cpufreq/dbx500-cpufreq.c +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (C) STMicroelectronics 2009 - * Copyright (C) ST-Ericsson SA 2010-2012 - * - * License Terms: GNU General Public License v2 - * Author: Sundar Iyer - * Author: Martin Persson - * Author: Jonas Aaberg - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -static struct cpufreq_frequency_table *freq_table; -static struct clk *armss_clk; -static struct thermal_cooling_device *cdev; - -static int dbx500_cpufreq_target(struct cpufreq_policy *policy, - unsigned int index) -{ - /* update armss clk frequency */ - return clk_set_rate(armss_clk, freq_table[index].frequency * 1000); -} - -static int dbx500_cpufreq_init(struct cpufreq_policy *policy) -{ - policy->clk = armss_clk; - return cpufreq_generic_init(policy, freq_table, 20 * 1000); -} - -static int dbx500_cpufreq_exit(struct cpufreq_policy *policy) -{ - if (!IS_ERR(cdev)) - cpufreq_cooling_unregister(cdev); - return 0; -} - -static void dbx500_cpufreq_ready(struct cpufreq_policy *policy) -{ - cdev = cpufreq_cooling_register(policy); - if (IS_ERR(cdev)) - pr_err("Failed to register cooling device %ld\n", PTR_ERR(cdev)); - else - pr_info("Cooling device registered: %s\n", cdev->type); -} - -static struct cpufreq_driver dbx500_cpufreq_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | - CPUFREQ_NEED_INITIAL_FREQ_CHECK, - .verify = cpufreq_generic_frequency_table_verify, - .target_index = dbx500_cpufreq_target, - .get = cpufreq_generic_get, - .init = dbx500_cpufreq_init, - .exit = dbx500_cpufreq_exit, - .ready = dbx500_cpufreq_ready, - .name = "DBX500", - .attr = cpufreq_generic_attr, -}; - -static int dbx500_cpufreq_probe(struct platform_device *pdev) -{ - struct cpufreq_frequency_table *pos; - - freq_table = dev_get_platdata(&pdev->dev); - if (!freq_table) { - pr_err("dbx500-cpufreq: Failed to fetch cpufreq table\n"); - return -ENODEV; - } - - armss_clk = clk_get(&pdev->dev, "armss"); - if (IS_ERR(armss_clk)) { - pr_err("dbx500-cpufreq: Failed to get armss clk\n"); - return PTR_ERR(armss_clk); - } - - pr_info("dbx500-cpufreq: Available frequencies:\n"); - cpufreq_for_each_entry(pos, freq_table) - pr_info(" %d Mhz\n", pos->frequency / 1000); - - return cpufreq_register_driver(&dbx500_cpufreq_driver); -} - -static struct platform_driver dbx500_cpufreq_plat_driver = { - .driver = { - .name = "cpufreq-ux500", - }, - .probe = dbx500_cpufreq_probe, -}; - -static int __init dbx500_cpufreq_register(void) -{ - return platform_driver_register(&dbx500_cpufreq_plat_driver); -} -device_initcall(dbx500_cpufreq_register); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("cpufreq driver for DBX500"); -- cgit v1.2.3 From e948bc8fbee077735c2b71b991a5ca5e573f3506 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 17 Aug 2017 09:12:27 +0530 Subject: cpufreq: Cap the default transition delay value to 10 ms If transition_delay_us isn't defined by the cpufreq driver, the default value of transition delay (time after which the cpufreq governor will try updating the frequency again) is currently calculated by multiplying transition_latency (nsec) with LATENCY_MULTIPLIER (1000) and then converting this time to usec. That gives the exact same value as transition_latency, just that the time unit is usec instead of nsec. With acpi-cpufreq for example, transition_latency is set to around 10 usec and we get transition delay as 10 ms. Which seems to be a reasonable amount of time to reevaluate the frequency again. But for platforms where frequency switching isn't that fast (like ARM), the transition_latency varies from 500 usec to 3 ms, and the transition delay becomes 500 ms to 3 seconds. Of course, that is a pretty bad default value to start with. We can try to come across a better formula (instead of multiplying with LATENCY_MULTIPLIER) to solve this problem, but will that be worth it ? This patch tries a simple approach and caps the maximum value of default transition delay to 10 ms. Of course, userspace can still come in and change this value anytime or individual drivers can rather provide transition_delay_us instead. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index b2cc98551fc3..c7ae67d6886d 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -532,8 +532,19 @@ unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy) return policy->transition_delay_us; latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC; - if (latency) - return latency * LATENCY_MULTIPLIER; + if (latency) { + /* + * For platforms that can change the frequency very fast (< 10 + * us), the above formula gives a decent transition delay. But + * for platforms where transition_latency is in milliseconds, it + * ends up giving unrealistic values. + * + * Cap the default transition delay to 10 ms, which seems to be + * a reasonable amount of time after which we should reevaluate + * the frequency. + */ + return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000); + } return LATENCY_MULTIPLIER; } -- cgit v1.2.3 From ea11e94badab2d4d58f9415d0211bc5f85d9d0a9 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 18 Jul 2017 16:42:50 -0500 Subject: PM / Domains: Convert to using %pOF instead of full_name Now that we have a custom printf format specifier, convert users of full_name to use %pOF instead. This is preparation to remove storing of the full path string for each node. Signed-off-by: Rob Herring Reviewed-by: Geert Uytterhoeven Acked-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 43fd08e50ae9..e8ca5e2cf1e5 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1775,7 +1775,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, mutex_lock(&of_genpd_mutex); list_add(&cp->link, &of_genpd_providers); mutex_unlock(&of_genpd_mutex); - pr_debug("Added domain provider from %s\n", np->full_name); + pr_debug("Added domain provider from %pOF\n", np); return 0; } @@ -2181,16 +2181,16 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, err = of_property_read_u32(state_node, "entry-latency-us", &entry_latency); if (err) { - pr_debug(" * %s missing entry-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing entry-latency-us property\n", + state_node); return -EINVAL; } err = of_property_read_u32(state_node, "exit-latency-us", &exit_latency); if (err) { - pr_debug(" * %s missing exit-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing exit-latency-us property\n", + state_node); return -EINVAL; } @@ -2244,8 +2244,8 @@ int of_genpd_parse_idle_states(struct device_node *dn, ret = genpd_parse_state(&st[i++], np); if (ret) { pr_err - ("Parsing idle state node %s failed with err %d\n", - np->full_name, ret); + ("Parsing idle state node %pOF failed with err %d\n", + np, ret); of_node_put(np); kfree(st); return ret; -- cgit v1.2.3 From cc5a7a74943d5c4a8c0ce5ba7ab5284358d4a8d8 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 18 Jul 2017 16:42:54 -0500 Subject: cpufreq: Convert to using %pOF instead of full_name Now that we have a custom printf format specifier, convert users of full_name to use %pOF instead. This is preparation to remove storing of the full path string for each node. Signed-off-by: Rob Herring Acked-by: Viresh Kumar Acked-by: Patrice Chotard Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/pmac64-cpufreq.c | 2 +- drivers/cpufreq/sti-cpufreq.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c index 267e0894c62d..be623dd7b9f2 100644 --- a/drivers/cpufreq/pmac64-cpufreq.c +++ b/drivers/cpufreq/pmac64-cpufreq.c @@ -516,7 +516,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) goto bail; } - DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name); + DBG("cpufreq: i2c clock chip found: %pOF\n", hwclock); /* Now get all the platform functions */ pfunc_cpu_getfreq = diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index d2d0430d09d4..47105735df12 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c @@ -65,8 +65,8 @@ static int sti_cpufreq_fetch_major(void) { ret = of_property_read_u32_index(np, "st,syscfg", MAJOR_ID_INDEX, &major_offset); if (ret) { - dev_err(dev, "No major number offset provided in %s [%d]\n", - np->full_name, ret); + dev_err(dev, "No major number offset provided in %pOF [%d]\n", + np, ret); return ret; } @@ -92,8 +92,8 @@ static int sti_cpufreq_fetch_minor(void) MINOR_ID_INDEX, &minor_offset); if (ret) { dev_err(dev, - "No minor number offset provided %s [%d]\n", - np->full_name, ret); + "No minor number offset provided %pOF [%d]\n", + np, ret); return ret; } -- cgit v1.2.3 From 84dc4141f0353bada6af53d32730f3e47e579bc0 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 18 Jul 2017 16:42:55 -0500 Subject: cpuidle: Convert to using %pOF instead of full_name Now that we have a custom printf format specifier, convert users of full_name to use %pOF instead. This is preparation to remove storing of the full path string for each node. Signed-off-by: Rob Herring Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/dt_idle_states.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index ae8eb0359889..bafd4dbf55d4 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -53,16 +53,16 @@ static int init_state_node(struct cpuidle_state *idle_state, err = of_property_read_u32(state_node, "entry-latency-us", &entry_latency); if (err) { - pr_debug(" * %s missing entry-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing entry-latency-us property\n", + state_node); return -EINVAL; } err = of_property_read_u32(state_node, "exit-latency-us", &exit_latency); if (err) { - pr_debug(" * %s missing exit-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing exit-latency-us property\n", + state_node); return -EINVAL; } /* @@ -75,8 +75,8 @@ static int init_state_node(struct cpuidle_state *idle_state, err = of_property_read_u32(state_node, "min-residency-us", &idle_state->target_residency); if (err) { - pr_debug(" * %s missing min-residency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing min-residency-us property\n", + state_node); return -EINVAL; } @@ -186,8 +186,8 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, } if (!idle_state_valid(state_node, i, cpumask)) { - pr_warn("%s idle state not valid, bailing out\n", - state_node->full_name); + pr_warn("%pOF idle state not valid, bailing out\n", + state_node); err = -EINVAL; break; } @@ -200,8 +200,8 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, idle_state = &drv->states[state_idx++]; err = init_state_node(idle_state, matches, state_node); if (err) { - pr_err("Parsing idle state node %s failed with err %d\n", - state_node->full_name, err); + pr_err("Parsing idle state node %pOF failed with err %d\n", + state_node, err); err = -EINVAL; break; } -- cgit v1.2.3 From edeec420de2407618de097977e76b572d9de1bcf Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 16 Aug 2017 11:07:27 +0530 Subject: cpufreq: dt-platdev: Automatically create cpufreq device with OPP v2 The initial idea of creating the cpufreq-dt-platdev.c file was to keep a list of platforms that use the "operating-points" (V1) bindings and create cpufreq device for them only, as we weren't sure which platforms would want the device to get created automatically as some had their own cpufreq drivers as well, or wanted to initialize cpufreq after doing some stuff from platform code. But that wasn't the case with platforms using "operating-points-v2" property. We wanted the device to get created automatically without the need of adding them to the whitelist. Though, we will still have some exceptions where we don't want to create the device automatically. Rename the earlier platform list as *whitelist* and create a new *blacklist* as well. The cpufreq-dt device will get created if: - The platform is there in the whitelist OR - The platform has "operating-points-v2" property in CPU0's DT node and isn't part of the blacklist . Reported-by: Geert Uytterhoeven Signed-off-by: Viresh Kumar Tested-by: Simon Horman Reviewed-by: Masahiro Yamada Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt-platdev.c | 45 ++++++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 8b88768b565d..7c67a6c74fdc 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -9,11 +9,16 @@ #include #include +#include #include #include "cpufreq-dt.h" -static const struct of_device_id machines[] __initconst = { +/* + * Machines for which the cpufreq device is *always* created, mostly used for + * platforms using "operating-points" (V1) property. + */ +static const struct of_device_id whitelist[] __initconst = { { .compatible = "allwinner,sun4i-a10", }, { .compatible = "allwinner,sun5i-a10s", }, { .compatible = "allwinner,sun5i-a13", }, @@ -107,21 +112,51 @@ static const struct of_device_id machines[] __initconst = { { } }; +/* + * Machines for which the cpufreq device is *not* created, mostly used for + * platforms using "operating-points-v2" property. + */ +static const struct of_device_id blacklist[] __initconst = { + { } +}; + +static bool __init cpu0_node_has_opp_v2_prop(void) +{ + struct device_node *np = of_cpu_device_node_get(0); + bool ret = false; + + if (of_get_property(np, "operating-points-v2", NULL)) + ret = true; + + of_node_put(np); + return ret; +} + static int __init cpufreq_dt_platdev_init(void) { struct device_node *np = of_find_node_by_path("/"); const struct of_device_id *match; + const void *data = NULL; if (!np) return -ENODEV; - match = of_match_node(machines, np); + match = of_match_node(whitelist, np); + if (match) { + data = match->data; + goto create_pdev; + } + + if (cpu0_node_has_opp_v2_prop() && !of_match_node(blacklist, np)) + goto create_pdev; + of_node_put(np); - if (!match) - return -ENODEV; + return -ENODEV; +create_pdev: + of_node_put(np); return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt", - -1, match->data, + -1, data, sizeof(struct cpufreq_dt_platform_data))); } device_initcall(cpufreq_dt_platdev_init); -- cgit v1.2.3 From d79d148b6451307fd46e30673ce9de28690c8ced Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 16 Aug 2017 11:07:28 +0530 Subject: cpufreq: dt-platdev: Drop few entries from whitelist Drop few ARM (32 and 64 bit) platforms from the whitelist which always use "operating-points-v2" property from their DT. They should continue to work after this patch. Tested on Hikey platform (only the "hisilicon,hi6220" entry). Signed-off-by: Viresh Kumar Acked-by: Chen-Yu Tsai Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt-platdev.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 7c67a6c74fdc..a020da7940d6 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -27,7 +27,6 @@ static const struct of_device_id whitelist[] __initconst = { { .compatible = "allwinner,sun6i-a31s", }, { .compatible = "allwinner,sun7i-a20", }, { .compatible = "allwinner,sun8i-a23", }, - { .compatible = "allwinner,sun8i-a33", }, { .compatible = "allwinner,sun8i-a83t", }, { .compatible = "allwinner,sun8i-h3", }, @@ -37,7 +36,6 @@ static const struct of_device_id whitelist[] __initconst = { { .compatible = "arm,integrator-cp", }, { .compatible = "hisilicon,hi3660", }, - { .compatible = "hisilicon,hi6220", }, { .compatible = "fsl,imx27", }, { .compatible = "fsl,imx51", }, @@ -51,11 +49,8 @@ static const struct of_device_id whitelist[] __initconst = { { .compatible = "samsung,exynos3250", }, { .compatible = "samsung,exynos4210", }, { .compatible = "samsung,exynos4212", }, - { .compatible = "samsung,exynos4412", }, { .compatible = "samsung,exynos5250", }, #ifndef CONFIG_BL_SWITCHER - { .compatible = "samsung,exynos5420", }, - { .compatible = "samsung,exynos5433", }, { .compatible = "samsung,exynos5800", }, #endif @@ -88,11 +83,7 @@ static const struct of_device_id whitelist[] __initconst = { { .compatible = "rockchip,rk3368", }, { .compatible = "rockchip,rk3399", }, - { .compatible = "socionext,uniphier-pro5", }, - { .compatible = "socionext,uniphier-pxs2", }, { .compatible = "socionext,uniphier-ld6b", }, - { .compatible = "socionext,uniphier-ld11", }, - { .compatible = "socionext,uniphier-ld20", }, { .compatible = "st-ericsson,u8500", }, { .compatible = "st-ericsson,u8540", }, @@ -107,8 +98,6 @@ static const struct of_device_id whitelist[] __initconst = { { .compatible = "xlnx,zynq-7000", }, { .compatible = "xlnx,zynqmp", }, - { .compatible = "zte,zx296718", }, - { } }; -- cgit v1.2.3 From 9a6e91d08e7707baa2f824257342ca7efcb199bc Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Sat, 19 Aug 2017 22:22:46 +0200 Subject: cpufreq: ti: Fix 'of_node_put' being called twice in error handling path If 'dev_pm_opp_set_supported_hw()' fails, 'opp_data->opp_node' refcount will be decremented 2 times. One, just a few lines above, and another one in the error handling path. Fix it by simply moving the 'of_node_put' call of the normal path. Signed-off-by: Christophe JAILLET Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/ti-cpufreq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index a7b5658c0460..b29cd3398463 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -245,8 +245,6 @@ static int ti_cpufreq_init(void) if (ret) goto fail_put_node; - of_node_put(opp_data->opp_node); - ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev, version, VERSION_COUNT)); if (ret) { @@ -255,6 +253,8 @@ static int ti_cpufreq_init(void) goto fail_put_node; } + of_node_put(opp_data->opp_node); + register_cpufreq_dt: platform_device_register_simple("cpufreq-dt", -1, NULL, 0); -- cgit v1.2.3 From 9d913e43436c840020478463bd72098d8bf1556d Mon Sep 17 00:00:00 2001 From: David Wu Date: Mon, 21 Aug 2017 18:58:33 +0800 Subject: PM / AVS: rockchip-io: add io selectors and supplies for RV1108 This adds the necessary data for handling io voltage domains on the RV1108. Signed-off-by: David Wu Reviewed-by: Heiko Stuebner Signed-off-by: Rafael J. Wysocki --- drivers/power/avs/rockchip-io-domain.c | 38 ++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'drivers') diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c index 031a34372191..75f63e38a8d1 100644 --- a/drivers/power/avs/rockchip-io-domain.c +++ b/drivers/power/avs/rockchip-io-domain.c @@ -349,6 +349,36 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = { .init = rk3399_pmu_iodomain_init, }; +static const struct rockchip_iodomain_soc_data soc_data_rv1108 = { + .grf_offset = 0x404, + .supply_names = { + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + "vccio1", + "vccio2", + "vccio3", + "vccio5", + "vccio6", + }, + +}; + +static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = { + .grf_offset = 0x104, + .supply_names = { + "pmu", + }, +}; + static const struct of_device_id rockchip_iodomain_match[] = { { .compatible = "rockchip,rk3188-io-voltage-domain", @@ -382,6 +412,14 @@ static const struct of_device_id rockchip_iodomain_match[] = { .compatible = "rockchip,rk3399-pmu-io-voltage-domain", .data = (void *)&soc_data_rk3399_pmu }, + { + .compatible = "rockchip,rv1108-io-voltage-domain", + .data = (void *)&soc_data_rv1108 + }, + { + .compatible = "rockchip,rv1108-pmu-io-voltage-domain", + .data = (void *)&soc_data_rv1108_pmu + }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, rockchip_iodomain_match); -- cgit v1.2.3 From e8305d408f7873f5a640c74f09d5665771acc369 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 18 Jul 2017 16:42:57 -0500 Subject: PM / devfreq: Convert to using %pOF instead of full_name Now that we have a custom printf format specifier, convert users of full_name to use %pOF instead. This is preparation to remove storing of the full path string for each node. Signed-off-by: Rob Herring Reviewed-by: Chanwoo Choi Signed-off-by: MyungJoo Ham Cc: Kyungmin Park Cc: linux-pm@vger.kernel.org --- drivers/devfreq/devfreq-event.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c index 8648b32ebc89..d67242d87744 100644 --- a/drivers/devfreq/devfreq-event.c +++ b/drivers/devfreq/devfreq-event.c @@ -277,8 +277,8 @@ int devfreq_event_get_edev_count(struct device *dev) sizeof(u32)); if (count < 0) { dev_err(dev, - "failed to get the count of devfreq-event in %s node\n", - dev->of_node->full_name); + "failed to get the count of devfreq-event in %pOF node\n", + dev->of_node); return count; } -- cgit v1.2.3 From f75b0afa190d041c142bdc3f0c6022f61340b8a3 Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Thu, 24 Aug 2017 10:42:50 +0900 Subject: PM / devfreq: Move private devfreq_update_stats() into devfreq THe devfreq_update_stats() updates the 'struct devfreq_dev_status' in order to get current status of devfreq device. It is only used for the governors. This patch moves the devfreq_update_stats() into devfreq directory. Signed-off-by: Chanwoo Choi Signed-off-by: MyungJoo Ham --- drivers/devfreq/governor.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h index a4f2fa1091e4..cfc50a61a90d 100644 --- a/drivers/devfreq/governor.h +++ b/drivers/devfreq/governor.h @@ -69,4 +69,8 @@ extern int devfreq_remove_governor(struct devfreq_governor *governor); extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq); +static inline int devfreq_update_stats(struct devfreq *df) +{ + return df->profile->get_dev_status(df->dev.parent, &df->last_status); +} #endif /* _GOVERNOR_H */ -- cgit v1.2.3 From b9c69e043266db96f21024d819be3f7a842fc8eb Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Thu, 24 Aug 2017 10:42:51 +0900 Subject: PM / devfreq: Add dependency on PM_OPP The devfreq ues the OPP library to handle the voltage and frequency for the device basically. This patch adds the dependency on CONFIG_PM_OPP in order to prevent either the build break or the unknow behavior. Signed-off-by: Chanwoo Choi Signed-off-by: MyungJoo Ham --- drivers/devfreq/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 41254e702f1e..6a172d338f6d 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -1,6 +1,7 @@ menuconfig PM_DEVFREQ bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" select SRCU + select PM_OPP help A device may have a list of frequencies and voltages available. devfreq, a generic DVFS framework can be registered for a device -- cgit v1.2.3 From 9e14de1077e9c34f141cf98bdba60cdd5193d962 Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Thu, 24 Aug 2017 10:42:48 +0900 Subject: PM / devfreq: Fix memory leak when fail to register device When the devfreq_add_device fails to register deivce, the memory leak of devfreq instance happen. So, this patch fix the memory leak issue. Before freeing the devfreq instance checks whether devfreq instance is NULL or not because the device_unregister() frees the devfreq instance when jumping to the 'err_init'. It is to prevent the duplicate the kfee(devfreq). Cc: stable@vger.kernel.org Fixes: ac4b281176a5 ("PM / devfreq: fix duplicated kfree on devfreq pointer") Signed-off-by: Chanwoo Choi Signed-off-by: MyungJoo Ham --- drivers/devfreq/devfreq.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index dea04871b50d..a1c4ee818614 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -564,7 +564,7 @@ struct devfreq *devfreq_add_device(struct device *dev, err = device_register(&devfreq->dev); if (err) { mutex_unlock(&devfreq->lock); - goto err_out; + goto err_dev; } devfreq->trans_table = devm_kzalloc(&devfreq->dev, @@ -610,6 +610,9 @@ err_init: mutex_unlock(&devfreq_list_lock); device_unregister(&devfreq->dev); +err_dev: + if (devfreq) + kfree(devfreq); err_out: return ERR_PTR(err); } -- cgit v1.2.3 From 843791bb6c76b1f808951b284be5c10ee47fd245 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 25 Aug 2017 18:00:16 +0100 Subject: cpufreq: speedstep-lib: make several arrays static, makes code smaller Don't populate arrays on the stack, instead make them static. Makes the object code smaller by over 860 bytes: Before: text data bss dec hex filename 10716 5196 0 15912 3e28 drivers/cpufreq/speedstep-lib.o After: text data bss dec hex filename 9690 5356 0 15046 3ac6 drivers/cpufreq/speedstep-lib.o Signed-off-by: Colin Ian King Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/speedstep-lib.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index 1b8062182c81..ccab452a4ef5 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c @@ -35,7 +35,7 @@ static int relaxed_check; static unsigned int pentium3_get_frequency(enum speedstep_processor processor) { /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ - struct { + static const struct { unsigned int ratio; /* Frequency Multiplier (x10) */ u8 bitmap; /* power on configuration bits [27, 25:22] (in MSR 0x2a) */ @@ -58,7 +58,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) }; /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ - struct { + static const struct { unsigned int value; /* Front Side Bus speed in MHz */ u8 bitmap; /* power on configuration bits [18: 19] (in MSR 0x2a) */ -- cgit v1.2.3 From fded5fc8412a0bfadd2b130433109a5be7ffdf82 Mon Sep 17 00:00:00 2001 From: Leonard Crestez Date: Mon, 28 Aug 2017 14:05:18 +0300 Subject: cpufreq: imx6q: Fix imx6sx low frequency support This patch contains the minimal changes required to support imx6sx OPP of 198 Mhz. Without this patch cpufreq still reports success but the frequency is not changed, the "arm" clock will still be at 396000000 in clk_summary. In order to do this PLL1 needs to be still kept enabled while changing the ARM clock. This is a hardware requirement: when ARM_PODF is changed in CCM we need to check the busy bit of CCM_CDHIPR bit 16 arm_podf_busy, and this bit is sync with PLL1 clock, so if PLL1 NOT enabled, this bit will never get clear. Keep pll1_sys explicitly enabled until after the rate is change to deal with this. Otherwise from the clk framework perspective pll1_sys is unused and gets turned off. Signed-off-by: Leonard Crestez Reviewed-by: Lucas Stach Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/imx6q-cpufreq.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index b6edd3ccaa55..14466a9b01c0 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -47,6 +47,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) struct dev_pm_opp *opp; unsigned long freq_hz, volt, volt_old; unsigned int old_freq, new_freq; + bool pll1_sys_temp_enabled = false; int ret; new_freq = freq_table[index].frequency; @@ -124,6 +125,10 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { clk_set_rate(pll1_sys_clk, new_freq * 1000); clk_set_parent(pll1_sw_clk, pll1_sys_clk); + } else { + /* pll1_sys needs to be enabled for divider rate change to work. */ + pll1_sys_temp_enabled = true; + clk_prepare_enable(pll1_sys_clk); } } @@ -135,6 +140,10 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) return ret; } + /* PLL1 is only needed until after ARM-PODF is set. */ + if (pll1_sys_temp_enabled) + clk_disable_unprepare(pll1_sys_clk); + /* scaling down? scale voltage after frequency */ if (new_freq < old_freq) { ret = regulator_set_voltage_tol(arm_reg, volt, 0); -- cgit v1.2.3 From dc2251bf98c66db3f4e055b751968f0871037ae4 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 23 Aug 2017 23:19:57 +0200 Subject: cpuidle: Eliminate the CPUIDLE_DRIVER_STATE_START symbol On some architectures the first (index 0) idle state is a polling one and it doesn't really save energy, so there is the CPUIDLE_DRIVER_STATE_START symbol allowing some pieces of cpuidle code to avoid using that state. However, this makes the code rather hard to follow. It is better to explicitly avoid the polling state, so add a new cpuidle state flag CPUIDLE_FLAG_POLLING to mark it and make the relevant code check that flag for the first state instead of using the CPUIDLE_DRIVER_STATE_START symbol. In the ACPI processor driver that cannot always rely on the state flags (like before the states table has been set up) define a new internal symbol ACPI_IDLE_STATE_START equivalent to the CPUIDLE_DRIVER_STATE_START one and drop the latter. Signed-off-by: Rafael J. Wysocki Tested-by: Sudeep Holla Acked-by: Daniel Lezcano --- drivers/acpi/processor_idle.c | 10 ++++++---- drivers/cpuidle/driver.c | 1 + drivers/cpuidle/governors/ladder.c | 14 ++++++++------ drivers/cpuidle/governors/menu.c | 13 +++++-------- 4 files changed, 20 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 5c8aa9cf62d7..39a01ea7a46d 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -48,6 +48,8 @@ #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_idle"); +#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0) + static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; module_param(max_cstate, uint, 0000); static unsigned int nocst __read_mostly; @@ -761,7 +763,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev, if (cx->type != ACPI_STATE_C1) { if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { - index = CPUIDLE_DRIVER_STATE_START; + index = ACPI_IDLE_STATE_START; cx = per_cpu(acpi_cstate[index], dev->cpu); } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { if (cx->bm_sts_skip || !acpi_idle_bm_check()) { @@ -813,7 +815,7 @@ static void acpi_idle_enter_freeze(struct cpuidle_device *dev, static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, struct cpuidle_device *dev) { - int i, count = CPUIDLE_DRIVER_STATE_START; + int i, count = ACPI_IDLE_STATE_START; struct acpi_processor_cx *cx; if (max_cstate == 0) @@ -840,7 +842,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, static int acpi_processor_setup_cstates(struct acpi_processor *pr) { - int i, count = CPUIDLE_DRIVER_STATE_START; + int i, count = ACPI_IDLE_STATE_START; struct acpi_processor_cx *cx; struct cpuidle_state *state; struct cpuidle_driver *drv = &acpi_idle_driver; @@ -1291,7 +1293,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) return -EINVAL; drv->safe_state_index = -1; - for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) { + for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) { drv->states[i].name[0] = '\0'; drv->states[i].desc[0] = '\0'; } diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index e53fb861beb0..e942f8ef4309 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -204,6 +204,7 @@ static void poll_idle_init(struct cpuidle_driver *drv) state->power_usage = -1; state->enter = poll_idle; state->disabled = false; + state->flags = CPUIDLE_FLAG_POLLING; } #else static void poll_idle_init(struct cpuidle_driver *drv) {} diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index ac321f09e717..ce1a2ffffb2a 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c @@ -69,6 +69,7 @@ static int ladder_select_state(struct cpuidle_driver *drv, struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); struct ladder_device_state *last_state; int last_residency, last_idx = ldev->last_state_idx; + int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); /* Special case when user has set very strict latency requirement */ @@ -96,13 +97,13 @@ static int ladder_select_state(struct cpuidle_driver *drv, } /* consider demotion */ - if (last_idx > CPUIDLE_DRIVER_STATE_START && + if (last_idx > first_idx && (drv->states[last_idx].disabled || dev->states_usage[last_idx].disable || drv->states[last_idx].exit_latency > latency_req)) { int i; - for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { + for (i = last_idx - 1; i > first_idx; i--) { if (drv->states[i].exit_latency <= latency_req) break; } @@ -110,7 +111,7 @@ static int ladder_select_state(struct cpuidle_driver *drv, return i; } - if (last_idx > CPUIDLE_DRIVER_STATE_START && + if (last_idx > first_idx && last_residency < last_state->threshold.demotion_time) { last_state->stats.demotion_count++; last_state->stats.promotion_count = 0; @@ -133,13 +134,14 @@ static int ladder_enable_device(struct cpuidle_driver *drv, struct cpuidle_device *dev) { int i; + int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); struct ladder_device_state *lstate; struct cpuidle_state *state; - ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; + ldev->last_state_idx = first_idx; - for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { + for (i = first_idx; i < drv->state_count; i++) { state = &drv->states[i]; lstate = &ldev->states[i]; @@ -151,7 +153,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv, if (i < drv->state_count - 1) lstate->threshold.promotion_time = state->exit_latency; - if (i > CPUIDLE_DRIVER_STATE_START) + if (i > first_idx) lstate->threshold.demotion_time = state->exit_latency; } diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 61b64c2b2cb8..48eaf2879228 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -324,8 +324,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) expected_interval = get_typical_interval(data); expected_interval = min(expected_interval, data->next_timer_us); - if (CPUIDLE_DRIVER_STATE_START > 0) { - struct cpuidle_state *s = &drv->states[CPUIDLE_DRIVER_STATE_START]; + first_idx = 0; + if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) { + struct cpuidle_state *s = &drv->states[1]; unsigned int polling_threshold; /* @@ -336,12 +337,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) polling_threshold = max_t(unsigned int, 20, s->target_residency); if (data->next_timer_us > polling_threshold && latency_req > s->exit_latency && !s->disabled && - !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable) - first_idx = CPUIDLE_DRIVER_STATE_START; - else - first_idx = CPUIDLE_DRIVER_STATE_START - 1; - } else { - first_idx = 0; + !dev->states_usage[1].disable) + first_idx = 1; } /* -- cgit v1.2.3 From 34c2f65b718d44ea7d7b3cc10777f410677455ce Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 23 Aug 2017 23:21:07 +0200 Subject: cpuidle: Move polling state initialization code to separate file Move the polling state initialization code to a separate file built conditionally on CONFIG_ARCH_HAS_CPU_RELAX to get rid of the #ifdef in driver.c. Signed-off-by: Rafael J. Wysocki Tested-by: Sudeep Holla Acked-by: Daniel Lezcano --- drivers/cpuidle/Makefile | 1 + drivers/cpuidle/driver.c | 31 ------------------------------- drivers/cpuidle/poll_state.c | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 31 deletions(-) create mode 100644 drivers/cpuidle/poll_state.c (limited to 'drivers') diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 3ba81b1dffad..0b67a05a7aae 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -5,6 +5,7 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o +obj-$(CONFIG_ARCH_HAS_CPU_RELAX) += poll_state.o ################################################################################## # ARM SoC drivers diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index e942f8ef4309..6f694c86f3fa 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -179,37 +179,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv) } } -#ifdef CONFIG_ARCH_HAS_CPU_RELAX -static int __cpuidle poll_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) -{ - local_irq_enable(); - if (!current_set_polling_and_test()) { - while (!need_resched()) - cpu_relax(); - } - current_clr_polling(); - - return index; -} - -static void poll_idle_init(struct cpuidle_driver *drv) -{ - struct cpuidle_state *state = &drv->states[0]; - - snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); - snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); - state->exit_latency = 0; - state->target_residency = 0; - state->power_usage = -1; - state->enter = poll_idle; - state->disabled = false; - state->flags = CPUIDLE_FLAG_POLLING; -} -#else -static void poll_idle_init(struct cpuidle_driver *drv) {} -#endif /* !CONFIG_ARCH_HAS_CPU_RELAX */ - /** * __cpuidle_register_driver: register the driver * @drv: a valid pointer to a struct cpuidle_driver diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c new file mode 100644 index 000000000000..0db4f7273952 --- /dev/null +++ b/drivers/cpuidle/poll_state.c @@ -0,0 +1,36 @@ +/* + * poll_state.c - Polling idle state + * + * This file is released under the GPLv2. + */ + +#include +#include +#include + +static int __cpuidle poll_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + local_irq_enable(); + if (!current_set_polling_and_test()) { + while (!need_resched()) + cpu_relax(); + } + current_clr_polling(); + + return index; +} + +void poll_idle_init(struct cpuidle_driver *drv) +{ + struct cpuidle_state *state = &drv->states[0]; + + snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); + snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); + state->exit_latency = 0; + state->target_residency = 0; + state->power_usage = -1; + state->enter = poll_idle; + state->disabled = false; + state->flags = CPUIDLE_FLAG_POLLING; +} -- cgit v1.2.3 From 1b39e3f813b4685c7a30ae964d5529a1b0e3a286 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 29 Aug 2017 03:14:37 +0200 Subject: cpuidle: Make drivers initialize polling state Make the drivers that want to include the polling state into their states table initialize it explicitly and drop the initialization of it (which in fact is conditional, but that is not obvious from the code) from the core. Signed-off-by: Rafael J. Wysocki Tested-by: Sudeep Holla Acked-by: Daniel Lezcano --- drivers/acpi/processor_idle.c | 9 ++++++++- drivers/cpuidle/driver.c | 2 -- drivers/cpuidle/poll_state.c | 3 ++- drivers/idle/intel_idle.c | 1 + 4 files changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 39a01ea7a46d..df38e81cc672 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -842,7 +842,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, static int acpi_processor_setup_cstates(struct acpi_processor *pr) { - int i, count = ACPI_IDLE_STATE_START; + int i, count; struct acpi_processor_cx *cx; struct cpuidle_state *state; struct cpuidle_driver *drv = &acpi_idle_driver; @@ -850,6 +850,13 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) if (max_cstate == 0) max_cstate = 1; + if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) { + cpuidle_poll_state_init(drv); + count = 1; + } else { + count = 0; + } + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { cx = &pr->power.states[i]; diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 6f694c86f3fa..dc32f34e68d9 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -216,8 +216,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv) on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, (void *)1, 1); - poll_idle_init(drv); - return 0; } diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c index 0db4f7273952..7416b16287de 100644 --- a/drivers/cpuidle/poll_state.c +++ b/drivers/cpuidle/poll_state.c @@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev, return index; } -void poll_idle_init(struct cpuidle_driver *drv) +void cpuidle_poll_state_init(struct cpuidle_driver *drv) { struct cpuidle_state *state = &drv->states[0]; @@ -34,3 +34,4 @@ void poll_idle_init(struct cpuidle_driver *drv) state->disabled = false; state->flags = CPUIDLE_FLAG_POLLING; } +EXPORT_SYMBOL_GPL(cpuidle_poll_state_init); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index c2ae819a871c..7bf8739e33bc 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1331,6 +1331,7 @@ static void __init intel_idle_cpuidle_driver_init(void) intel_idle_state_table_update(); + cpuidle_poll_state_init(drv); drv->state_count = 1; for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { -- cgit v1.2.3