Merge back earlier cpufreq material for 3.19-rc1.
This commit is contained in:
Коммит
8a497cfdc0
|
@ -1,17 +1,28 @@
|
|||
Intel P-state driver
|
||||
--------------------
|
||||
|
||||
This driver implements a scaling driver with an internal governor for
|
||||
Intel Core processors. The driver follows the same model as the
|
||||
Transmeta scaling driver (longrun.c) and implements the setpolicy()
|
||||
instead of target(). Scaling drivers that implement setpolicy() are
|
||||
assumed to implement internal governors by the cpufreq core. All the
|
||||
logic for selecting the current P state is contained within the
|
||||
driver; no external governor is used by the cpufreq core.
|
||||
This driver provides an interface to control the P state selection for
|
||||
SandyBridge+ Intel processors. The driver can operate two different
|
||||
modes based on the processor model legacy and Hardware P state (HWP)
|
||||
mode.
|
||||
|
||||
Intel SandyBridge+ processors are supported.
|
||||
In legacy mode the driver implements a scaling driver with an internal
|
||||
governor for Intel Core processors. The driver follows the same model
|
||||
as the Transmeta scaling driver (longrun.c) and implements the
|
||||
setpolicy() instead of target(). Scaling drivers that implement
|
||||
setpolicy() are assumed to implement internal governors by the cpufreq
|
||||
core. All the logic for selecting the current P state is contained
|
||||
within the driver; no external governor is used by the cpufreq core.
|
||||
|
||||
New sysfs files for controlling P state selection have been added to
|
||||
In HWP mode P state selection is implemented in the processor
|
||||
itself. The driver provides the interfaces between the cpufreq core and
|
||||
the processor to control P state selection based on user preferences
|
||||
and reporting frequency to the cpufreq core. In this mode the
|
||||
internal governor code is disabled.
|
||||
|
||||
In addtion to the interfaces provided by the cpufreq core for
|
||||
controlling frequency the driver provides sysfs files for
|
||||
controlling P state selection. These files have been added to
|
||||
/sys/devices/system/cpu/intel_pstate/
|
||||
|
||||
max_perf_pct: limits the maximum P state that will be requested by
|
||||
|
@ -33,7 +44,9 @@ frequency is fiction for Intel Core processors. Even if the scaling
|
|||
driver selects a single P state the actual frequency the processor
|
||||
will run at is selected by the processor itself.
|
||||
|
||||
New debugfs files have also been added to /sys/kernel/debug/pstate_snb/
|
||||
For legacy mode debugfs files have also been added to allow tuning of
|
||||
the internal governor algorythm. These files are located at
|
||||
/sys/kernel/debug/pstate_snb/ These files are NOT present in HWP mode.
|
||||
|
||||
deadband
|
||||
d_gain_pct
|
||||
|
|
|
@ -1446,6 +1446,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
disable
|
||||
Do not enable intel_pstate as the default
|
||||
scaling driver for the supported processors
|
||||
no_hwp
|
||||
Do not enable hardware P state control (HWP)
|
||||
if available.
|
||||
|
||||
intremap= [X86-64, Intel-IOMMU]
|
||||
on enable Interrupt Remapping (default)
|
||||
|
|
|
@ -189,6 +189,11 @@
|
|||
#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
|
||||
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
|
||||
#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
|
||||
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
|
||||
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
|
||||
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||
|
|
|
@ -152,6 +152,45 @@
|
|||
#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
|
||||
#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
|
||||
|
||||
/* Hardware P state interface */
|
||||
#define MSR_PPERF 0x0000064e
|
||||
#define MSR_PERF_LIMIT_REASONS 0x0000064f
|
||||
#define MSR_PM_ENABLE 0x00000770
|
||||
#define MSR_HWP_CAPABILITIES 0x00000771
|
||||
#define MSR_HWP_REQUEST_PKG 0x00000772
|
||||
#define MSR_HWP_INTERRUPT 0x00000773
|
||||
#define MSR_HWP_REQUEST 0x00000774
|
||||
#define MSR_HWP_STATUS 0x00000777
|
||||
|
||||
/* CPUID.6.EAX */
|
||||
#define HWP_BASE_BIT (1<<7)
|
||||
#define HWP_NOTIFICATIONS_BIT (1<<8)
|
||||
#define HWP_ACTIVITY_WINDOW_BIT (1<<9)
|
||||
#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10)
|
||||
#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11)
|
||||
|
||||
/* IA32_HWP_CAPABILITIES */
|
||||
#define HWP_HIGHEST_PERF(x) (x & 0xff)
|
||||
#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8)
|
||||
#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16)
|
||||
#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24)
|
||||
|
||||
/* IA32_HWP_REQUEST */
|
||||
#define HWP_MIN_PERF(x) (x & 0xff)
|
||||
#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
|
||||
#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16)
|
||||
#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24)
|
||||
#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32)
|
||||
#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42)
|
||||
|
||||
/* IA32_HWP_STATUS */
|
||||
#define HWP_GUARANTEED_CHANGE(x) (x & 0x1)
|
||||
#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4)
|
||||
|
||||
/* IA32_HWP_INTERRUPT */
|
||||
#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1)
|
||||
#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2)
|
||||
|
||||
#define MSR_AMD64_MC0_MASK 0xc0010044
|
||||
|
||||
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
|
||||
|
@ -345,6 +384,8 @@
|
|||
|
||||
#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
|
||||
|
||||
#define MSR_MISC_PWR_MGMT 0x000001aa
|
||||
|
||||
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
|
||||
#define ENERGY_PERF_BIAS_PERFORMANCE 0
|
||||
#define ENERGY_PERF_BIAS_NORMAL 6
|
||||
|
|
|
@ -36,6 +36,11 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
|||
{ X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
|
||||
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
|
||||
{ X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
|
||||
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
|
||||
{ X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
|
||||
|
|
|
@ -63,7 +63,6 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
|
|||
|
||||
config CPU_FREQ_DEFAULT_GOV_POWERSAVE
|
||||
bool "powersave"
|
||||
depends on EXPERT
|
||||
select CPU_FREQ_GOV_POWERSAVE
|
||||
help
|
||||
Use the CPUFreq governor 'powersave' as default. This sets
|
||||
|
@ -183,6 +182,8 @@ config CPU_FREQ_GOV_CONSERVATIVE
|
|||
|
||||
If in doubt, say N.
|
||||
|
||||
comment "CPU frequency scaling drivers"
|
||||
|
||||
config CPUFREQ_DT
|
||||
tristate "Generic DT based cpufreq driver"
|
||||
depends on HAVE_CLK && OF
|
||||
|
@ -196,19 +197,19 @@ config CPUFREQ_DT
|
|||
|
||||
If in doubt, say N.
|
||||
|
||||
menu "x86 CPU frequency scaling drivers"
|
||||
depends on X86
|
||||
if X86
|
||||
source "drivers/cpufreq/Kconfig.x86"
|
||||
endmenu
|
||||
endif
|
||||
|
||||
menu "ARM CPU frequency scaling drivers"
|
||||
depends on ARM || ARM64
|
||||
if ARM || ARM64
|
||||
source "drivers/cpufreq/Kconfig.arm"
|
||||
endmenu
|
||||
endif
|
||||
|
||||
menu "AVR32 CPU frequency scaling drivers"
|
||||
depends on AVR32
|
||||
if PPC32 || PPC64
|
||||
source "drivers/cpufreq/Kconfig.powerpc"
|
||||
endif
|
||||
|
||||
if AVR32
|
||||
config AVR32_AT32AP_CPUFREQ
|
||||
bool "CPU frequency driver for AT32AP"
|
||||
depends on PLATFORM_AT32AP
|
||||
|
@ -216,12 +217,9 @@ config AVR32_AT32AP_CPUFREQ
|
|||
help
|
||||
This enables the CPU frequency driver for AT32AP processors.
|
||||
If in doubt, say N.
|
||||
endif
|
||||
|
||||
endmenu
|
||||
|
||||
menu "CPUFreq processor drivers"
|
||||
depends on IA64
|
||||
|
||||
if IA64
|
||||
config IA64_ACPI_CPUFREQ
|
||||
tristate "ACPI Processor P-States driver"
|
||||
depends on ACPI_PROCESSOR
|
||||
|
@ -232,12 +230,9 @@ config IA64_ACPI_CPUFREQ
|
|||
For details, take a look at <file:Documentation/cpu-freq/>.
|
||||
|
||||
If in doubt, say N.
|
||||
endif
|
||||
|
||||
endmenu
|
||||
|
||||
menu "MIPS CPUFreq processor drivers"
|
||||
depends on MIPS
|
||||
|
||||
if MIPS
|
||||
config LOONGSON2_CPUFREQ
|
||||
tristate "Loongson2 CPUFreq Driver"
|
||||
help
|
||||
|
@ -250,15 +245,18 @@ config LOONGSON2_CPUFREQ
|
|||
|
||||
If in doubt, say N.
|
||||
|
||||
endmenu
|
||||
config LOONGSON1_CPUFREQ
|
||||
tristate "Loongson1 CPUFreq Driver"
|
||||
help
|
||||
This option adds a CPUFreq driver for loongson1 processors which
|
||||
support software configurable cpu frequency.
|
||||
|
||||
menu "PowerPC CPU frequency scaling drivers"
|
||||
depends on PPC32 || PPC64
|
||||
source "drivers/cpufreq/Kconfig.powerpc"
|
||||
endmenu
|
||||
For details, take a look at <file:Documentation/cpu-freq/>.
|
||||
|
||||
menu "SPARC CPU frequency scaling drivers"
|
||||
depends on SPARC64
|
||||
If in doubt, say N.
|
||||
endif
|
||||
|
||||
if SPARC64
|
||||
config SPARC_US3_CPUFREQ
|
||||
tristate "UltraSPARC-III CPU Frequency driver"
|
||||
help
|
||||
|
@ -276,10 +274,9 @@ config SPARC_US2E_CPUFREQ
|
|||
For details, take a look at <file:Documentation/cpu-freq>.
|
||||
|
||||
If in doubt, say N.
|
||||
endmenu
|
||||
endif
|
||||
|
||||
menu "SH CPU Frequency scaling"
|
||||
depends on SUPERH
|
||||
if SUPERH
|
||||
config SH_CPU_FREQ
|
||||
tristate "SuperH CPU Frequency driver"
|
||||
help
|
||||
|
@ -293,7 +290,7 @@ config SH_CPU_FREQ
|
|||
For details, take a look at <file:Documentation/cpu-freq>.
|
||||
|
||||
If unsure, say N.
|
||||
endmenu
|
||||
endif
|
||||
|
||||
endif
|
||||
endmenu
|
||||
|
|
|
@ -247,3 +247,11 @@ config ARM_TEGRA_CPUFREQ
|
|||
default y
|
||||
help
|
||||
This adds the CPUFreq driver support for TEGRA SOCs.
|
||||
|
||||
config ARM_PXA2xx_CPUFREQ
|
||||
tristate "Intel PXA2xx CPUfreq driver"
|
||||
depends on PXA27x || PXA25x
|
||||
help
|
||||
This add the CPUFreq driver support for Intel PXA2xx SOCs.
|
||||
|
||||
If in doubt, say N.
|
||||
|
|
|
@ -61,8 +61,7 @@ obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
|
|||
obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
|
||||
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
|
||||
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
|
||||
obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o
|
||||
obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o
|
||||
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
|
||||
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
|
||||
obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
|
||||
obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
|
||||
|
@ -98,6 +97,7 @@ obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
|
|||
obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
|
||||
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
|
||||
obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
|
||||
obj-$(CONFIG_LOONGSON1_CPUFREQ) += ls1x-cpufreq.o
|
||||
obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
|
||||
obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
|
||||
obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
|
||||
|
|
|
@ -58,6 +58,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
|
|||
old_freq = clk_get_rate(cpu_clk) / 1000;
|
||||
|
||||
if (!IS_ERR(cpu_reg)) {
|
||||
unsigned long opp_freq;
|
||||
|
||||
rcu_read_lock();
|
||||
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
|
||||
if (IS_ERR(opp)) {
|
||||
|
@ -67,13 +69,16 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
|
|||
return PTR_ERR(opp);
|
||||
}
|
||||
volt = dev_pm_opp_get_voltage(opp);
|
||||
opp_freq = dev_pm_opp_get_freq(opp);
|
||||
rcu_read_unlock();
|
||||
tol = volt * priv->voltage_tolerance / 100;
|
||||
volt_old = regulator_get_voltage(cpu_reg);
|
||||
dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
|
||||
opp_freq / 1000, volt);
|
||||
}
|
||||
|
||||
dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
|
||||
old_freq / 1000, volt_old ? volt_old / 1000 : -1,
|
||||
old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
|
||||
new_freq / 1000, volt ? volt / 1000 : -1);
|
||||
|
||||
/* scaling up? scale voltage before frequency */
|
||||
|
@ -89,7 +94,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
|
|||
ret = clk_set_rate(cpu_clk, freq_exact);
|
||||
if (ret) {
|
||||
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
|
||||
if (!IS_ERR(cpu_reg))
|
||||
if (!IS_ERR(cpu_reg) && volt_old > 0)
|
||||
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
|
||||
return ret;
|
||||
}
|
||||
|
@ -181,7 +186,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
|||
{
|
||||
struct cpufreq_dt_platform_data *pd;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
struct thermal_cooling_device *cdev;
|
||||
struct device_node *np;
|
||||
struct private_data *priv;
|
||||
struct device *cpu_dev;
|
||||
|
@ -264,20 +268,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
|||
goto out_free_priv;
|
||||
}
|
||||
|
||||
/*
|
||||
* For now, just loading the cooling device;
|
||||
* thermal DT code takes care of matching them.
|
||||
*/
|
||||
if (of_find_property(np, "#cooling-cells", NULL)) {
|
||||
cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
|
||||
if (IS_ERR(cdev))
|
||||
dev_err(cpu_dev,
|
||||
"running cpufreq without cooling device: %ld\n",
|
||||
PTR_ERR(cdev));
|
||||
else
|
||||
priv->cdev = cdev;
|
||||
}
|
||||
|
||||
priv->cpu_dev = cpu_dev;
|
||||
priv->cpu_reg = cpu_reg;
|
||||
policy->driver_data = priv;
|
||||
|
@ -287,7 +277,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
|||
if (ret) {
|
||||
dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
|
||||
ret);
|
||||
goto out_cooling_unregister;
|
||||
goto out_free_cpufreq_table;
|
||||
}
|
||||
|
||||
policy->cpuinfo.transition_latency = transition_latency;
|
||||
|
@ -300,8 +290,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
|||
|
||||
return 0;
|
||||
|
||||
out_cooling_unregister:
|
||||
cpufreq_cooling_unregister(priv->cdev);
|
||||
out_free_cpufreq_table:
|
||||
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
|
||||
out_free_priv:
|
||||
kfree(priv);
|
||||
|
@ -319,7 +308,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
|
|||
{
|
||||
struct private_data *priv = policy->driver_data;
|
||||
|
||||
cpufreq_cooling_unregister(priv->cdev);
|
||||
if (priv->cdev)
|
||||
cpufreq_cooling_unregister(priv->cdev);
|
||||
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
|
||||
clk_put(policy->clk);
|
||||
if (!IS_ERR(priv->cpu_reg))
|
||||
|
@ -329,6 +319,33 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void cpufreq_ready(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct private_data *priv = policy->driver_data;
|
||||
struct device_node *np = of_node_get(priv->cpu_dev->of_node);
|
||||
|
||||
if (WARN_ON(!np))
|
||||
return;
|
||||
|
||||
/*
|
||||
* For now, just loading the cooling device;
|
||||
* thermal DT code takes care of matching them.
|
||||
*/
|
||||
if (of_find_property(np, "#cooling-cells", NULL)) {
|
||||
priv->cdev = of_cpufreq_cooling_register(np,
|
||||
policy->related_cpus);
|
||||
if (IS_ERR(priv->cdev)) {
|
||||
dev_err(priv->cpu_dev,
|
||||
"running cpufreq without cooling device: %ld\n",
|
||||
PTR_ERR(priv->cdev));
|
||||
|
||||
priv->cdev = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
of_node_put(np);
|
||||
}
|
||||
|
||||
static struct cpufreq_driver dt_cpufreq_driver = {
|
||||
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
|
||||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
|
@ -336,6 +353,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
|
|||
.get = cpufreq_generic_get,
|
||||
.init = cpufreq_init,
|
||||
.exit = cpufreq_exit,
|
||||
.ready = cpufreq_ready,
|
||||
.name = "cpufreq-dt",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
|
|
@ -535,7 +535,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|||
static ssize_t store_##file_name \
|
||||
(struct cpufreq_policy *policy, const char *buf, size_t count) \
|
||||
{ \
|
||||
int ret; \
|
||||
int ret, temp; \
|
||||
struct cpufreq_policy new_policy; \
|
||||
\
|
||||
ret = cpufreq_get_policy(&new_policy, policy->cpu); \
|
||||
|
@ -546,8 +546,10 @@ static ssize_t store_##file_name \
|
|||
if (ret != 1) \
|
||||
return -EINVAL; \
|
||||
\
|
||||
temp = new_policy.object; \
|
||||
ret = cpufreq_set_policy(policy, &new_policy); \
|
||||
policy->user_policy.object = policy->object; \
|
||||
if (!ret) \
|
||||
policy->user_policy.object = temp; \
|
||||
\
|
||||
return ret ? ret : count; \
|
||||
}
|
||||
|
@ -898,46 +900,31 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
|
|||
struct freq_attr **drv_attr;
|
||||
int ret = 0;
|
||||
|
||||
/* prepare interface data */
|
||||
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
|
||||
&dev->kobj, "cpufreq");
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* set up files for this cpu device */
|
||||
drv_attr = cpufreq_driver->attr;
|
||||
while ((drv_attr) && (*drv_attr)) {
|
||||
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
|
||||
if (ret)
|
||||
goto err_out_kobj_put;
|
||||
return ret;
|
||||
drv_attr++;
|
||||
}
|
||||
if (cpufreq_driver->get) {
|
||||
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
|
||||
if (ret)
|
||||
goto err_out_kobj_put;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
|
||||
if (ret)
|
||||
goto err_out_kobj_put;
|
||||
return ret;
|
||||
|
||||
if (cpufreq_driver->bios_limit) {
|
||||
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
|
||||
if (ret)
|
||||
goto err_out_kobj_put;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = cpufreq_add_dev_symlink(policy);
|
||||
if (ret)
|
||||
goto err_out_kobj_put;
|
||||
|
||||
return ret;
|
||||
|
||||
err_out_kobj_put:
|
||||
kobject_put(&policy->kobj);
|
||||
wait_for_completion(&policy->kobj_unregister);
|
||||
return ret;
|
||||
return cpufreq_add_dev_symlink(policy);
|
||||
}
|
||||
|
||||
static void cpufreq_init_policy(struct cpufreq_policy *policy)
|
||||
|
@ -1196,6 +1183,8 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
|||
goto err_set_policy_cpu;
|
||||
}
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
|
||||
/* related cpus should atleast have policy->cpus */
|
||||
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
|
||||
|
||||
|
@ -1208,9 +1197,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
|||
if (!recover_policy) {
|
||||
policy->user_policy.min = policy->min;
|
||||
policy->user_policy.max = policy->max;
|
||||
|
||||
/* prepare interface data */
|
||||
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
|
||||
&dev->kobj, "cpufreq");
|
||||
if (ret) {
|
||||
pr_err("%s: failed to init policy->kobj: %d\n",
|
||||
__func__, ret);
|
||||
goto err_init_policy_kobj;
|
||||
}
|
||||
}
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
for_each_cpu(j, policy->cpus)
|
||||
per_cpu(cpufreq_cpu_data, j) = policy;
|
||||
|
@ -1288,8 +1285,13 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
|||
up_write(&policy->rwsem);
|
||||
|
||||
kobject_uevent(&policy->kobj, KOBJ_ADD);
|
||||
|
||||
up_read(&cpufreq_rwsem);
|
||||
|
||||
/* Callback for handling stuff after policy is ready */
|
||||
if (cpufreq_driver->ready)
|
||||
cpufreq_driver->ready(policy);
|
||||
|
||||
pr_debug("initialization complete\n");
|
||||
|
||||
return 0;
|
||||
|
@ -1301,6 +1303,11 @@ err_get_freq:
|
|||
per_cpu(cpufreq_cpu_data, j) = NULL;
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
if (!recover_policy) {
|
||||
kobject_put(&policy->kobj);
|
||||
wait_for_completion(&policy->kobj_unregister);
|
||||
}
|
||||
err_init_policy_kobj:
|
||||
up_write(&policy->rwsem);
|
||||
|
||||
if (cpufreq_driver->exit)
|
||||
|
|
|
@ -137,6 +137,7 @@ struct cpu_defaults {
|
|||
|
||||
static struct pstate_adjust_policy pid_params;
|
||||
static struct pstate_funcs pstate_funcs;
|
||||
static int hwp_active;
|
||||
|
||||
struct perf_limits {
|
||||
int no_turbo;
|
||||
|
@ -244,6 +245,34 @@ static inline void update_turbo_state(void)
|
|||
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
|
||||
}
|
||||
|
||||
#define PCT_TO_HWP(x) (x * 255 / 100)
|
||||
static void intel_pstate_hwp_set(void)
|
||||
{
|
||||
int min, max, cpu;
|
||||
u64 value, freq;
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
|
||||
min = PCT_TO_HWP(limits.min_perf_pct);
|
||||
value &= ~HWP_MIN_PERF(~0L);
|
||||
value |= HWP_MIN_PERF(min);
|
||||
|
||||
max = PCT_TO_HWP(limits.max_perf_pct);
|
||||
if (limits.no_turbo) {
|
||||
rdmsrl( MSR_HWP_CAPABILITIES, freq);
|
||||
max = HWP_GUARANTEED_PERF(freq);
|
||||
}
|
||||
|
||||
value &= ~HWP_MAX_PERF(~0L);
|
||||
value |= HWP_MAX_PERF(max);
|
||||
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
|
||||
}
|
||||
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
/************************** debugfs begin ************************/
|
||||
static int pid_param_set(void *data, u64 val)
|
||||
{
|
||||
|
@ -279,6 +308,8 @@ static void __init intel_pstate_debug_expose_params(void)
|
|||
struct dentry *debugfs_parent;
|
||||
int i = 0;
|
||||
|
||||
if (hwp_active)
|
||||
return;
|
||||
debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
|
||||
if (IS_ERR_OR_NULL(debugfs_parent))
|
||||
return;
|
||||
|
@ -329,8 +360,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
|
|||
pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
limits.no_turbo = clamp_t(int, input, 0, 1);
|
||||
|
||||
if (hwp_active)
|
||||
intel_pstate_hwp_set();
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -348,6 +383,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
|
|||
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
|
||||
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
|
||||
|
||||
if (hwp_active)
|
||||
intel_pstate_hwp_set();
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -363,6 +400,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
|
|||
limits.min_perf_pct = clamp_t(int, input, 0 , 100);
|
||||
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
|
||||
|
||||
if (hwp_active)
|
||||
intel_pstate_hwp_set();
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -395,8 +434,16 @@ static void __init intel_pstate_sysfs_expose_params(void)
|
|||
rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
|
||||
BUG_ON(rc);
|
||||
}
|
||||
|
||||
/************************** sysfs end ************************/
|
||||
|
||||
static void intel_pstate_hwp_enable(void)
|
||||
{
|
||||
hwp_active++;
|
||||
pr_info("intel_pstate HWP enabled\n");
|
||||
|
||||
wrmsrl( MSR_PM_ENABLE, 0x1);
|
||||
}
|
||||
|
||||
static int byt_get_min_pstate(void)
|
||||
{
|
||||
u64 value;
|
||||
|
@ -648,6 +695,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
|
|||
cpu->prev_mperf = mperf;
|
||||
}
|
||||
|
||||
static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
|
||||
{
|
||||
int delay;
|
||||
|
||||
delay = msecs_to_jiffies(50);
|
||||
mod_timer_pinned(&cpu->timer, jiffies + delay);
|
||||
}
|
||||
|
||||
static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
|
||||
{
|
||||
int delay;
|
||||
|
@ -694,6 +749,14 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
|
|||
intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
|
||||
}
|
||||
|
||||
static void intel_hwp_timer_func(unsigned long __data)
|
||||
{
|
||||
struct cpudata *cpu = (struct cpudata *) __data;
|
||||
|
||||
intel_pstate_sample(cpu);
|
||||
intel_hwp_set_sample_time(cpu);
|
||||
}
|
||||
|
||||
static void intel_pstate_timer_func(unsigned long __data)
|
||||
{
|
||||
struct cpudata *cpu = (struct cpudata *) __data;
|
||||
|
@ -730,6 +793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
|||
ICPU(0x3f, core_params),
|
||||
ICPU(0x45, core_params),
|
||||
ICPU(0x46, core_params),
|
||||
ICPU(0x47, core_params),
|
||||
ICPU(0x4c, byt_params),
|
||||
ICPU(0x4f, core_params),
|
||||
ICPU(0x56, core_params),
|
||||
|
@ -737,6 +801,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
|
||||
|
||||
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
|
||||
ICPU(0x56, core_params),
|
||||
{}
|
||||
};
|
||||
|
||||
static int intel_pstate_init_cpu(unsigned int cpunum)
|
||||
{
|
||||
struct cpudata *cpu;
|
||||
|
@ -753,9 +822,14 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
|
|||
intel_pstate_get_cpu_pstates(cpu);
|
||||
|
||||
init_timer_deferrable(&cpu->timer);
|
||||
cpu->timer.function = intel_pstate_timer_func;
|
||||
cpu->timer.data = (unsigned long)cpu;
|
||||
cpu->timer.expires = jiffies + HZ/100;
|
||||
|
||||
if (!hwp_active)
|
||||
cpu->timer.function = intel_pstate_timer_func;
|
||||
else
|
||||
cpu->timer.function = intel_hwp_timer_func;
|
||||
|
||||
intel_pstate_busy_pid_reset(cpu);
|
||||
intel_pstate_sample(cpu);
|
||||
|
||||
|
@ -792,6 +866,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|||
limits.no_turbo = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
|
||||
limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
|
||||
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
|
||||
|
@ -801,6 +876,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|||
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
|
||||
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
|
||||
|
||||
if (hwp_active)
|
||||
intel_pstate_hwp_set();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -823,6 +901,9 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
|
|||
pr_info("intel_pstate CPU %d exiting\n", cpu_num);
|
||||
|
||||
del_timer_sync(&all_cpu_data[cpu_num]->timer);
|
||||
if (hwp_active)
|
||||
return;
|
||||
|
||||
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
|
||||
}
|
||||
|
||||
|
@ -866,6 +947,7 @@ static struct cpufreq_driver intel_pstate_driver = {
|
|||
};
|
||||
|
||||
static int __initdata no_load;
|
||||
static int __initdata no_hwp;
|
||||
|
||||
static int intel_pstate_msrs_not_valid(void)
|
||||
{
|
||||
|
@ -959,6 +1041,15 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
|
|||
{
|
||||
struct acpi_table_header hdr;
|
||||
struct hw_vendor_info *v_info;
|
||||
const struct x86_cpu_id *id;
|
||||
u64 misc_pwr;
|
||||
|
||||
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
|
||||
if (id) {
|
||||
rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
|
||||
if ( misc_pwr & (1 << 8))
|
||||
return true;
|
||||
}
|
||||
|
||||
if (acpi_disabled ||
|
||||
ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
|
||||
|
@ -982,6 +1073,7 @@ static int __init intel_pstate_init(void)
|
|||
int cpu, rc = 0;
|
||||
const struct x86_cpu_id *id;
|
||||
struct cpu_defaults *cpu_info;
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
|
||||
if (no_load)
|
||||
return -ENODEV;
|
||||
|
@ -1011,6 +1103,9 @@ static int __init intel_pstate_init(void)
|
|||
if (!all_cpu_data)
|
||||
return -ENOMEM;
|
||||
|
||||
if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
|
||||
intel_pstate_hwp_enable();
|
||||
|
||||
rc = cpufreq_register_driver(&intel_pstate_driver);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
@ -1041,6 +1136,8 @@ static int __init intel_pstate_setup(char *str)
|
|||
|
||||
if (!strcmp(str, "disable"))
|
||||
no_load = 1;
|
||||
if (!strcmp(str, "no_hwp"))
|
||||
no_hwp = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("intel_pstate", intel_pstate_setup);
|
||||
|
|
|
@ -0,0 +1,223 @@
|
|||
/*
|
||||
* CPU Frequency Scaling for Loongson 1 SoC
|
||||
*
|
||||
* Copyright (C) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/mach-loongson1/cpufreq.h>
|
||||
#include <asm/mach-loongson1/loongson1.h>
|
||||
|
||||
static struct {
|
||||
struct device *dev;
|
||||
struct clk *clk; /* CPU clk */
|
||||
struct clk *mux_clk; /* MUX of CPU clk */
|
||||
struct clk *pll_clk; /* PLL clk */
|
||||
struct clk *osc_clk; /* OSC clk */
|
||||
unsigned int max_freq;
|
||||
unsigned int min_freq;
|
||||
} ls1x_cpufreq;
|
||||
|
||||
static int ls1x_cpufreq_notifier(struct notifier_block *nb,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
if (val == CPUFREQ_POSTCHANGE)
|
||||
current_cpu_data.udelay_val = loops_per_jiffy;
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block ls1x_cpufreq_notifier_block = {
|
||||
.notifier_call = ls1x_cpufreq_notifier
|
||||
};
|
||||
|
||||
static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
|
||||
unsigned int index)
|
||||
{
|
||||
unsigned int old_freq, new_freq;
|
||||
|
||||
old_freq = policy->cur;
|
||||
new_freq = policy->freq_table[index].frequency;
|
||||
|
||||
/*
|
||||
* The procedure of reconfiguring CPU clk is as below.
|
||||
*
|
||||
* - Reparent CPU clk to OSC clk
|
||||
* - Reset CPU clock (very important)
|
||||
* - Reconfigure CPU DIV
|
||||
* - Reparent CPU clk back to CPU DIV clk
|
||||
*/
|
||||
|
||||
dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
|
||||
clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk);
|
||||
__raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
|
||||
LS1X_CLK_PLL_DIV);
|
||||
__raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
|
||||
LS1X_CLK_PLL_DIV);
|
||||
clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000);
|
||||
clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_frequency_table *freq_tbl;
|
||||
unsigned int pll_freq, freq;
|
||||
int steps, i, ret;
|
||||
|
||||
pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000;
|
||||
|
||||
steps = 1 << DIV_CPU_WIDTH;
|
||||
freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL);
|
||||
if (!freq_tbl) {
|
||||
dev_err(ls1x_cpufreq.dev,
|
||||
"failed to alloc cpufreq_frequency_table\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < (steps - 1); i++) {
|
||||
freq = pll_freq / (i + 1);
|
||||
if ((freq < ls1x_cpufreq.min_freq) ||
|
||||
(freq > ls1x_cpufreq.max_freq))
|
||||
freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
|
||||
else
|
||||
freq_tbl[i].frequency = freq;
|
||||
dev_dbg(ls1x_cpufreq.dev,
|
||||
"cpufreq table: index %d: frequency %d\n", i,
|
||||
freq_tbl[i].frequency);
|
||||
}
|
||||
freq_tbl[i].frequency = CPUFREQ_TABLE_END;
|
||||
|
||||
policy->clk = ls1x_cpufreq.clk;
|
||||
ret = cpufreq_generic_init(policy, freq_tbl, 0);
|
||||
if (ret)
|
||||
kfree(freq_tbl);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
kfree(policy->freq_table);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver ls1x_cpufreq_driver = {
|
||||
.name = "cpufreq-ls1x",
|
||||
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
|
||||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
.target_index = ls1x_cpufreq_target,
|
||||
.get = cpufreq_generic_get,
|
||||
.init = ls1x_cpufreq_init,
|
||||
.exit = ls1x_cpufreq_exit,
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static int ls1x_cpufreq_remove(struct platform_device *pdev)
|
||||
{
|
||||
cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
cpufreq_unregister_driver(&ls1x_cpufreq_driver);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ls1x_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data;
|
||||
struct clk *clk;
|
||||
int ret;
|
||||
|
||||
if (!pdata || !pdata->clk_name || !pdata->osc_clk_name)
|
||||
return -EINVAL;
|
||||
|
||||
ls1x_cpufreq.dev = &pdev->dev;
|
||||
|
||||
clk = devm_clk_get(&pdev->dev, pdata->clk_name);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
|
||||
pdata->clk_name);
|
||||
ret = PTR_ERR(clk);
|
||||
goto out;
|
||||
}
|
||||
ls1x_cpufreq.clk = clk;
|
||||
|
||||
clk = clk_get_parent(clk);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
|
||||
__clk_get_name(ls1x_cpufreq.clk));
|
||||
ret = PTR_ERR(clk);
|
||||
goto out;
|
||||
}
|
||||
ls1x_cpufreq.mux_clk = clk;
|
||||
|
||||
clk = clk_get_parent(clk);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
|
||||
__clk_get_name(ls1x_cpufreq.mux_clk));
|
||||
ret = PTR_ERR(clk);
|
||||
goto out;
|
||||
}
|
||||
ls1x_cpufreq.pll_clk = clk;
|
||||
|
||||
clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
|
||||
pdata->osc_clk_name);
|
||||
ret = PTR_ERR(clk);
|
||||
goto out;
|
||||
}
|
||||
ls1x_cpufreq.osc_clk = clk;
|
||||
|
||||
ls1x_cpufreq.max_freq = pdata->max_freq;
|
||||
ls1x_cpufreq.min_freq = pdata->min_freq;
|
||||
|
||||
ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
|
||||
if (ret) {
|
||||
dev_err(ls1x_cpufreq.dev,
|
||||
"failed to register cpufreq driver: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n",
|
||||
ret);
|
||||
|
||||
cpufreq_unregister_driver(&ls1x_cpufreq_driver);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct platform_driver ls1x_cpufreq_platdrv = {
|
||||
.driver = {
|
||||
.name = "ls1x-cpufreq",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
.probe = ls1x_cpufreq_probe,
|
||||
.remove = ls1x_cpufreq_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(ls1x_cpufreq_platdrv);
|
||||
|
||||
MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
|
||||
MODULE_DESCRIPTION("Loongson 1 CPUFreq driver");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -603,6 +603,13 @@ static void __exit pcc_cpufreq_exit(void)
|
|||
free_percpu(pcc_cpu_info);
|
||||
}
|
||||
|
||||
static const struct acpi_device_id processor_device_ids[] = {
|
||||
{ACPI_PROCESSOR_OBJECT_HID, },
|
||||
{ACPI_PROCESSOR_DEVICE_HID, },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
|
||||
|
||||
MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
|
||||
MODULE_VERSION(PCC_VERSION);
|
||||
MODULE_DESCRIPTION("Processor Clocking Control interface driver");
|
||||
|
|
|
@ -217,26 +217,26 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
|
|||
|
||||
|
||||
struct cpufreq_driver {
|
||||
char name[CPUFREQ_NAME_LEN];
|
||||
u8 flags;
|
||||
void *driver_data;
|
||||
char name[CPUFREQ_NAME_LEN];
|
||||
u8 flags;
|
||||
void *driver_data;
|
||||
|
||||
/* needed by all drivers */
|
||||
int (*init) (struct cpufreq_policy *policy);
|
||||
int (*verify) (struct cpufreq_policy *policy);
|
||||
int (*init)(struct cpufreq_policy *policy);
|
||||
int (*verify)(struct cpufreq_policy *policy);
|
||||
|
||||
/* define one out of two */
|
||||
int (*setpolicy) (struct cpufreq_policy *policy);
|
||||
int (*setpolicy)(struct cpufreq_policy *policy);
|
||||
|
||||
/*
|
||||
* On failure, should always restore frequency to policy->restore_freq
|
||||
* (i.e. old freq).
|
||||
*/
|
||||
int (*target) (struct cpufreq_policy *policy, /* Deprecated */
|
||||
unsigned int target_freq,
|
||||
unsigned int relation);
|
||||
int (*target_index) (struct cpufreq_policy *policy,
|
||||
unsigned int index);
|
||||
int (*target)(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation); /* Deprecated */
|
||||
int (*target_index)(struct cpufreq_policy *policy,
|
||||
unsigned int index);
|
||||
/*
|
||||
* Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
|
||||
* unset.
|
||||
|
@ -252,27 +252,31 @@ struct cpufreq_driver {
|
|||
* wish to switch to intermediate frequency for some target frequency.
|
||||
* In that case core will directly call ->target_index().
|
||||
*/
|
||||
unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
|
||||
unsigned int index);
|
||||
int (*target_intermediate)(struct cpufreq_policy *policy,
|
||||
unsigned int index);
|
||||
unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
|
||||
unsigned int index);
|
||||
int (*target_intermediate)(struct cpufreq_policy *policy,
|
||||
unsigned int index);
|
||||
|
||||
/* should be defined, if possible */
|
||||
unsigned int (*get) (unsigned int cpu);
|
||||
unsigned int (*get)(unsigned int cpu);
|
||||
|
||||
/* optional */
|
||||
int (*bios_limit) (int cpu, unsigned int *limit);
|
||||
int (*bios_limit)(int cpu, unsigned int *limit);
|
||||
|
||||
int (*exit) (struct cpufreq_policy *policy);
|
||||
void (*stop_cpu) (struct cpufreq_policy *policy);
|
||||
int (*suspend) (struct cpufreq_policy *policy);
|
||||
int (*resume) (struct cpufreq_policy *policy);
|
||||
struct freq_attr **attr;
|
||||
int (*exit)(struct cpufreq_policy *policy);
|
||||
void (*stop_cpu)(struct cpufreq_policy *policy);
|
||||
int (*suspend)(struct cpufreq_policy *policy);
|
||||
int (*resume)(struct cpufreq_policy *policy);
|
||||
|
||||
/* Will be called after the driver is fully initialized */
|
||||
void (*ready)(struct cpufreq_policy *policy);
|
||||
|
||||
struct freq_attr **attr;
|
||||
|
||||
/* platform specific boost support code */
|
||||
bool boost_supported;
|
||||
bool boost_enabled;
|
||||
int (*set_boost) (int state);
|
||||
bool boost_supported;
|
||||
bool boost_enabled;
|
||||
int (*set_boost)(int state);
|
||||
};
|
||||
|
||||
/* flags */
|
||||
|
|
Загрузка…
Ссылка в новой задаче