Merge back earlier cpufreq material.
Conflicts: arch/mips/loongson/lemote-2f/clock.c drivers/cpufreq/intel_pstate.c
This commit is contained in:
Коммит
5ece239918
|
@ -128,7 +128,7 @@ Description: Discover cpuidle policy and mechanism
|
|||
|
||||
What: /sys/devices/system/cpu/cpu#/cpufreq/*
|
||||
Date: pre-git history
|
||||
Contact: cpufreq@vger.kernel.org
|
||||
Contact: linux-pm@vger.kernel.org
|
||||
Description: Discover and change clock speed of CPUs
|
||||
|
||||
Clock scaling allows you to change the clock speed of the
|
||||
|
@ -146,7 +146,7 @@ Description: Discover and change clock speed of CPUs
|
|||
|
||||
What: /sys/devices/system/cpu/cpu#/cpufreq/freqdomain_cpus
|
||||
Date: June 2013
|
||||
Contact: cpufreq@vger.kernel.org
|
||||
Contact: linux-pm@vger.kernel.org
|
||||
Description: Discover CPUs in the same CPU frequency coordination domain
|
||||
|
||||
freqdomain_cpus is the list of CPUs (online+offline) that share
|
||||
|
|
|
@ -20,6 +20,7 @@ Contents:
|
|||
---------
|
||||
1. CPUFreq core and interfaces
|
||||
2. CPUFreq notifiers
|
||||
3. CPUFreq Table Generation with Operating Performance Point (OPP)
|
||||
|
||||
1. General Information
|
||||
=======================
|
||||
|
@ -92,3 +93,31 @@ values:
|
|||
cpu - number of the affected CPU
|
||||
old - old frequency
|
||||
new - new frequency
|
||||
|
||||
3. CPUFreq Table Generation with Operating Performance Point (OPP)
|
||||
==================================================================
|
||||
For details about OPP, see Documentation/power/opp.txt
|
||||
|
||||
dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
|
||||
cpufreq_frequency_table_cpuinfo which is provided with the list of
|
||||
frequencies that are available for operation. This function provides
|
||||
a ready to use conversion routine to translate the OPP layer's internal
|
||||
information about the available frequencies into a format readily
|
||||
providable to cpufreq.
|
||||
|
||||
WARNING: Do not use this function in interrupt context.
|
||||
|
||||
Example:
|
||||
soc_pm_init()
|
||||
{
|
||||
/* Do things */
|
||||
r = dev_pm_opp_init_cpufreq_table(dev, &freq_table);
|
||||
if (!r)
|
||||
cpufreq_frequency_table_cpuinfo(policy, freq_table);
|
||||
/* Do other things */
|
||||
}
|
||||
|
||||
NOTE: This function is available only if CONFIG_CPU_FREQ is enabled in
|
||||
addition to CONFIG_PM_OPP.
|
||||
|
||||
dev_pm_opp_free_cpufreq_table - Free up the table allocated by dev_pm_opp_init_cpufreq_table
|
||||
|
|
|
@ -228,3 +228,22 @@ is the corresponding frequency table helper for the ->target
|
|||
stage. Just pass the values to this function, and the unsigned int
|
||||
index returns the number of the frequency table entry which contains
|
||||
the frequency the CPU shall be set to.
|
||||
|
||||
The following macros can be used as iterators over cpufreq_frequency_table:
|
||||
|
||||
cpufreq_for_each_entry(pos, table) - iterates over all entries of frequency
|
||||
table.
|
||||
|
||||
cpufreq-for_each_valid_entry(pos, table) - iterates over all entries,
|
||||
excluding CPUFREQ_ENTRY_INVALID frequencies.
|
||||
Use arguments "pos" - a cpufreq_frequency_table * as a loop cursor and
|
||||
"table" - the cpufreq_frequency_table * you want to iterate over.
|
||||
|
||||
For example:
|
||||
|
||||
struct cpufreq_frequency_table *pos, *driver_freq_table;
|
||||
|
||||
cpufreq_for_each_entry(pos, driver_freq_table) {
|
||||
/* Do something with pos */
|
||||
pos->frequency = ...
|
||||
}
|
||||
|
|
|
@ -35,8 +35,8 @@ Mailing List
|
|||
------------
|
||||
There is a CPU frequency changing CVS commit and general list where
|
||||
you can report bugs, problems or submit patches. To post a message,
|
||||
send an email to cpufreq@vger.kernel.org, to subscribe go to
|
||||
http://vger.kernel.org/vger-lists.html#cpufreq and follow the
|
||||
send an email to linux-pm@vger.kernel.org, to subscribe go to
|
||||
http://vger.kernel.org/vger-lists.html#linux-pm and follow the
|
||||
instructions there.
|
||||
|
||||
Links
|
||||
|
|
|
@ -10,8 +10,7 @@ Contents
|
|||
3. OPP Search Functions
|
||||
4. OPP Availability Control Functions
|
||||
5. OPP Data Retrieval Functions
|
||||
6. Cpufreq Table Generation
|
||||
7. Data Structures
|
||||
6. Data Structures
|
||||
|
||||
1. Introduction
|
||||
===============
|
||||
|
@ -72,7 +71,6 @@ operations until that OPP could be re-enabled if possible.
|
|||
OPP library facilitates this concept in it's implementation. The following
|
||||
operational functions operate only on available opps:
|
||||
opp_find_freq_{ceil, floor}, dev_pm_opp_get_voltage, dev_pm_opp_get_freq, dev_pm_opp_get_opp_count
|
||||
and dev_pm_opp_init_cpufreq_table
|
||||
|
||||
dev_pm_opp_find_freq_exact is meant to be used to find the opp pointer which can then
|
||||
be used for dev_pm_opp_enable/disable functions to make an opp available as required.
|
||||
|
@ -96,10 +94,9 @@ using RCU read locks. The opp_find_freq_{exact,ceil,floor},
|
|||
opp_get_{voltage, freq, opp_count} fall into this category.
|
||||
|
||||
opp_{add,enable,disable} are updaters which use mutex and implement it's own
|
||||
RCU locking mechanisms. dev_pm_opp_init_cpufreq_table acts as an updater and uses
|
||||
mutex to implment RCU updater strategy. These functions should *NOT* be called
|
||||
under RCU locks and other contexts that prevent blocking functions in RCU or
|
||||
mutex operations from working.
|
||||
RCU locking mechanisms. These functions should *NOT* be called under RCU locks
|
||||
and other contexts that prevent blocking functions in RCU or mutex operations
|
||||
from working.
|
||||
|
||||
2. Initial OPP List Registration
|
||||
================================
|
||||
|
@ -311,34 +308,7 @@ dev_pm_opp_get_opp_count - Retrieve the number of available opps for a device
|
|||
/* Do other things */
|
||||
}
|
||||
|
||||
6. Cpufreq Table Generation
|
||||
===========================
|
||||
dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
|
||||
cpufreq_frequency_table_cpuinfo which is provided with the list of
|
||||
frequencies that are available for operation. This function provides
|
||||
a ready to use conversion routine to translate the OPP layer's internal
|
||||
information about the available frequencies into a format readily
|
||||
providable to cpufreq.
|
||||
|
||||
WARNING: Do not use this function in interrupt context.
|
||||
|
||||
Example:
|
||||
soc_pm_init()
|
||||
{
|
||||
/* Do things */
|
||||
r = dev_pm_opp_init_cpufreq_table(dev, &freq_table);
|
||||
if (!r)
|
||||
cpufreq_frequency_table_cpuinfo(policy, freq_table);
|
||||
/* Do other things */
|
||||
}
|
||||
|
||||
NOTE: This function is available only if CONFIG_CPU_FREQ is enabled in
|
||||
addition to CONFIG_PM as power management feature is required to
|
||||
dynamically scale voltage and frequency in a system.
|
||||
|
||||
dev_pm_opp_free_cpufreq_table - Free up the table allocated by dev_pm_opp_init_cpufreq_table
|
||||
|
||||
7. Data Structures
|
||||
6. Data Structures
|
||||
==================
|
||||
Typically an SoC contains multiple voltage domains which are variable. Each
|
||||
domain is represented by a device pointer. The relationship to OPP can be
|
||||
|
|
|
@ -2410,7 +2410,6 @@ F: drivers/net/ethernet/ti/cpmac.c
|
|||
CPU FREQUENCY DRIVERS
|
||||
M: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
M: Viresh Kumar <viresh.kumar@linaro.org>
|
||||
L: cpufreq@vger.kernel.org
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
|
||||
|
@ -2421,7 +2420,6 @@ F: include/linux/cpufreq.h
|
|||
CPU FREQUENCY DRIVERS - ARM BIG LITTLE
|
||||
M: Viresh Kumar <viresh.kumar@linaro.org>
|
||||
M: Sudeep Holla <sudeep.holla@arm.com>
|
||||
L: cpufreq@vger.kernel.org
|
||||
L: linux-pm@vger.kernel.org
|
||||
W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php
|
||||
S: Maintained
|
||||
|
|
|
@ -1092,20 +1092,21 @@ int da850_register_cpufreq(char *async_clk)
|
|||
|
||||
static int da850_round_armrate(struct clk *clk, unsigned long rate)
|
||||
{
|
||||
int i, ret = 0, diff;
|
||||
int ret = 0, diff;
|
||||
unsigned int best = (unsigned int) -1;
|
||||
struct cpufreq_frequency_table *table = cpufreq_info.freq_table;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
|
||||
rate /= 1000; /* convert to kHz */
|
||||
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
diff = table[i].frequency - rate;
|
||||
cpufreq_for_each_entry(pos, table) {
|
||||
diff = pos->frequency - rate;
|
||||
if (diff < 0)
|
||||
diff = -diff;
|
||||
|
||||
if (diff < best) {
|
||||
best = diff;
|
||||
ret = table[i].frequency;
|
||||
ret = pos->frequency;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -91,10 +91,9 @@ EXPORT_SYMBOL(clk_put);
|
|||
|
||||
int clk_set_rate(struct clk *clk, unsigned long rate)
|
||||
{
|
||||
unsigned int rate_khz = rate / 1000;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
int ret = 0;
|
||||
int regval;
|
||||
int i;
|
||||
|
||||
if (likely(clk->ops && clk->ops->set_rate)) {
|
||||
unsigned long flags;
|
||||
|
@ -107,22 +106,16 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
|
|||
if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
|
||||
propagate_rate(clk);
|
||||
|
||||
for (i = 0; loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END;
|
||||
i++) {
|
||||
if (loongson2_clockmod_table[i].frequency ==
|
||||
CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
if (rate_khz == loongson2_clockmod_table[i].frequency)
|
||||
cpufreq_for_each_valid_entry(pos, loongson2_clockmod_table)
|
||||
if (rate == pos->frequency)
|
||||
break;
|
||||
}
|
||||
if (rate_khz != loongson2_clockmod_table[i].frequency)
|
||||
if (rate != pos->frequency)
|
||||
return -ENOTSUPP;
|
||||
|
||||
clk->rate = rate;
|
||||
|
||||
regval = LOONGSON_CHIPCFG0;
|
||||
regval = (regval & ~0x7) |
|
||||
(loongson2_clockmod_table[i].driver_data - 1);
|
||||
regval = (regval & ~0x7) | (pos->driver_data - 1);
|
||||
LOONGSON_CHIPCFG0 = regval;
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/rculist.h>
|
||||
|
@ -596,96 +595,6 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
/**
|
||||
* dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
|
||||
* @dev: device for which we do this operation
|
||||
* @table: Cpufreq table returned back to caller
|
||||
*
|
||||
* Generate a cpufreq table for a provided device- this assumes that the
|
||||
* opp list is already initialized and ready for usage.
|
||||
*
|
||||
* This function allocates required memory for the cpufreq table. It is
|
||||
* expected that the caller does the required maintenance such as freeing
|
||||
* the table as required.
|
||||
*
|
||||
* Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
|
||||
* if no memory available for the operation (table is not populated), returns 0
|
||||
* if successful and table is populated.
|
||||
*
|
||||
* WARNING: It is important for the callers to ensure refreshing their copy of
|
||||
* the table if any of the mentioned functions have been invoked in the interim.
|
||||
*
|
||||
* Locking: The internal device_opp and opp structures are RCU protected.
|
||||
* To simplify the logic, we pretend we are updater and hold relevant mutex here
|
||||
* Callers should ensure that this function is *NOT* called under RCU protection
|
||||
* or in contexts where mutex locking cannot be used.
|
||||
*/
|
||||
int dev_pm_opp_init_cpufreq_table(struct device *dev,
|
||||
struct cpufreq_frequency_table **table)
|
||||
{
|
||||
struct device_opp *dev_opp;
|
||||
struct dev_pm_opp *opp;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
int i = 0;
|
||||
|
||||
/* Pretend as if I am an updater */
|
||||
mutex_lock(&dev_opp_list_lock);
|
||||
|
||||
dev_opp = find_device_opp(dev);
|
||||
if (IS_ERR(dev_opp)) {
|
||||
int r = PTR_ERR(dev_opp);
|
||||
mutex_unlock(&dev_opp_list_lock);
|
||||
dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
|
||||
(dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
|
||||
if (!freq_table) {
|
||||
mutex_unlock(&dev_opp_list_lock);
|
||||
dev_warn(dev, "%s: Unable to allocate frequency table\n",
|
||||
__func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
list_for_each_entry(opp, &dev_opp->opp_list, node) {
|
||||
if (opp->available) {
|
||||
freq_table[i].driver_data = i;
|
||||
freq_table[i].frequency = opp->rate / 1000;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev_opp_list_lock);
|
||||
|
||||
freq_table[i].driver_data = i;
|
||||
freq_table[i].frequency = CPUFREQ_TABLE_END;
|
||||
|
||||
*table = &freq_table[0];
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
|
||||
|
||||
/**
|
||||
* dev_pm_opp_free_cpufreq_table() - free the cpufreq table
|
||||
* @dev: device for which we do this operation
|
||||
* @table: table to free
|
||||
*
|
||||
* Free up the table allocated by dev_pm_opp_init_cpufreq_table
|
||||
*/
|
||||
void dev_pm_opp_free_cpufreq_table(struct device *dev,
|
||||
struct cpufreq_frequency_table **table)
|
||||
{
|
||||
if (!table)
|
||||
return;
|
||||
|
||||
kfree(*table);
|
||||
*table = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
|
||||
#endif /* CONFIG_CPU_FREQ */
|
||||
|
||||
/**
|
||||
* dev_pm_opp_get_notifier() - find notifier_head of the device with opp
|
||||
* @dev: device pointer used to lookup device OPPs.
|
||||
|
|
|
@ -5,7 +5,8 @@
|
|||
# big LITTLE core layer and glue drivers
|
||||
config ARM_BIG_LITTLE_CPUFREQ
|
||||
tristate "Generic ARM big LITTLE CPUfreq driver"
|
||||
depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
|
||||
depends on (BIG_LITTLE && ARM_CPU_TOPOLOGY) || (ARM64 && SMP)
|
||||
depends on HAVE_CLK
|
||||
select PM_OPP
|
||||
help
|
||||
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
|
||||
|
@ -85,7 +86,7 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
|
|||
It allows usage of special frequencies for Samsung Exynos
|
||||
processors if thermal conditions are appropriate.
|
||||
|
||||
It reguires, for safe operation, thermal framework with properly
|
||||
It requires, for safe operation, thermal framework with properly
|
||||
defined trip points.
|
||||
|
||||
If in doubt, say N.
|
||||
|
@ -186,7 +187,7 @@ config ARM_S3C2416_CPUFREQ
|
|||
S3C2450 SoC. The S3C2416 supports changing the rate of the
|
||||
armdiv clock source and also entering a so called dynamic
|
||||
voltage scaling mode in which it is possible to reduce the
|
||||
core voltage of the cpu.
|
||||
core voltage of the CPU.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ config X86_INTEL_PSTATE
|
|||
The driver implements an internal governor and will become
|
||||
the scaling driver and governor for Sandy bridge processors.
|
||||
|
||||
When this driver is enabled it will become the perferred
|
||||
When this driver is enabled it will become the preferred
|
||||
scaling driver for Sandy bridge processors.
|
||||
|
||||
If in doubt, say N.
|
||||
|
@ -52,7 +52,7 @@ config X86_ACPI_CPUFREQ_CPB
|
|||
help
|
||||
The powernow-k8 driver used to provide a sysfs knob called "cpb"
|
||||
to disable the Core Performance Boosting feature of AMD CPUs. This
|
||||
file has now been superseeded by the more generic "boost" entry.
|
||||
file has now been superseded by the more generic "boost" entry.
|
||||
|
||||
By enabling this option the acpi_cpufreq driver provides the old
|
||||
entry in addition to the new boost ones, for compatibility reasons.
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
# CPUfreq core
|
||||
obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o
|
||||
obj-$(CONFIG_PM_OPP) += cpufreq_opp.o
|
||||
|
||||
# CPUfreq stats
|
||||
obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
|
||||
|
||||
|
|
|
@ -213,7 +213,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
|
|||
|
||||
static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
|
||||
{
|
||||
int i;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
struct acpi_processor_performance *perf;
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
|
@ -223,10 +223,9 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
|
|||
|
||||
perf = data->acpi_data;
|
||||
|
||||
for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
if (msr == perf->states[data->freq_table[i].driver_data].status)
|
||||
return data->freq_table[i].frequency;
|
||||
}
|
||||
cpufreq_for_each_entry(pos, data->freq_table)
|
||||
if (msr == perf->states[pos->driver_data].status)
|
||||
return pos->frequency;
|
||||
return data->freq_table[0].frequency;
|
||||
}
|
||||
|
||||
|
|
|
@ -226,22 +226,22 @@ static inline u32 get_table_count(struct cpufreq_frequency_table *table)
|
|||
/* get the minimum frequency in the cpufreq_frequency_table */
|
||||
static inline u32 get_table_min(struct cpufreq_frequency_table *table)
|
||||
{
|
||||
int i;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
uint32_t min_freq = ~0;
|
||||
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
|
||||
if (table[i].frequency < min_freq)
|
||||
min_freq = table[i].frequency;
|
||||
cpufreq_for_each_entry(pos, table)
|
||||
if (pos->frequency < min_freq)
|
||||
min_freq = pos->frequency;
|
||||
return min_freq;
|
||||
}
|
||||
|
||||
/* get the maximum frequency in the cpufreq_frequency_table */
|
||||
static inline u32 get_table_max(struct cpufreq_frequency_table *table)
|
||||
{
|
||||
int i;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
uint32_t max_freq = 0;
|
||||
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
|
||||
if (table[i].frequency > max_freq)
|
||||
max_freq = table[i].frequency;
|
||||
cpufreq_for_each_entry(pos, table)
|
||||
if (pos->frequency > max_freq)
|
||||
max_freq = pos->frequency;
|
||||
return max_freq;
|
||||
}
|
||||
|
||||
|
|
|
@ -379,7 +379,7 @@ static struct cpufreq_driver nforce2_driver = {
|
|||
};
|
||||
|
||||
#ifdef MODULE
|
||||
static DEFINE_PCI_DEVICE_TABLE(nforce2_ids) = {
|
||||
static const struct pci_device_id nforce2_ids[] = {
|
||||
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2 },
|
||||
{}
|
||||
};
|
||||
|
|
|
@ -354,6 +354,18 @@ static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
|
|||
void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
|
||||
struct cpufreq_freqs *freqs)
|
||||
{
|
||||
|
||||
/*
|
||||
* Catch double invocations of _begin() which lead to self-deadlock.
|
||||
* ASYNC_NOTIFICATION drivers are left out because the cpufreq core
|
||||
* doesn't invoke _begin() on their behalf, and hence the chances of
|
||||
* double invocations are very low. Moreover, there are scenarios
|
||||
* where these checks can emit false-positive warnings in these
|
||||
* drivers; so we avoid that by skipping them altogether.
|
||||
*/
|
||||
WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
|
||||
&& current == policy->transition_task);
|
||||
|
||||
wait:
|
||||
wait_event(policy->transition_wait, !policy->transition_ongoing);
|
||||
|
||||
|
@ -365,6 +377,7 @@ wait:
|
|||
}
|
||||
|
||||
policy->transition_ongoing = true;
|
||||
policy->transition_task = current;
|
||||
|
||||
spin_unlock(&policy->transition_lock);
|
||||
|
||||
|
@ -381,6 +394,7 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
|
|||
cpufreq_notify_post_transition(policy, freqs, transition_failed);
|
||||
|
||||
policy->transition_ongoing = false;
|
||||
policy->transition_task = NULL;
|
||||
|
||||
wake_up(&policy->transition_wait);
|
||||
}
|
||||
|
@ -1802,12 +1816,43 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
|
|||
* GOVERNORS *
|
||||
*********************************************************************/
|
||||
|
||||
static int __target_index(struct cpufreq_policy *policy,
|
||||
struct cpufreq_frequency_table *freq_table, int index)
|
||||
{
|
||||
struct cpufreq_freqs freqs;
|
||||
int retval = -EINVAL;
|
||||
bool notify;
|
||||
|
||||
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
|
||||
|
||||
if (notify) {
|
||||
freqs.old = policy->cur;
|
||||
freqs.new = freq_table[index].frequency;
|
||||
freqs.flags = 0;
|
||||
|
||||
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
|
||||
__func__, policy->cpu, freqs.old, freqs.new);
|
||||
|
||||
cpufreq_freq_transition_begin(policy, &freqs);
|
||||
}
|
||||
|
||||
retval = cpufreq_driver->target_index(policy, index);
|
||||
if (retval)
|
||||
pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
|
||||
retval);
|
||||
|
||||
if (notify)
|
||||
cpufreq_freq_transition_end(policy, &freqs, retval);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
{
|
||||
int retval = -EINVAL;
|
||||
unsigned int old_target_freq = target_freq;
|
||||
int retval = -EINVAL;
|
||||
|
||||
if (cpufreq_disabled())
|
||||
return -ENODEV;
|
||||
|
@ -1834,8 +1879,6 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
|||
retval = cpufreq_driver->target(policy, target_freq, relation);
|
||||
else if (cpufreq_driver->target_index) {
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
struct cpufreq_freqs freqs;
|
||||
bool notify;
|
||||
int index;
|
||||
|
||||
freq_table = cpufreq_frequency_get_table(policy->cpu);
|
||||
|
@ -1856,26 +1899,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
|||
goto out;
|
||||
}
|
||||
|
||||
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
|
||||
|
||||
if (notify) {
|
||||
freqs.old = policy->cur;
|
||||
freqs.new = freq_table[index].frequency;
|
||||
freqs.flags = 0;
|
||||
|
||||
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
|
||||
__func__, policy->cpu, freqs.old, freqs.new);
|
||||
|
||||
cpufreq_freq_transition_begin(policy, &freqs);
|
||||
}
|
||||
|
||||
retval = cpufreq_driver->target_index(policy, index);
|
||||
if (retval)
|
||||
pr_err("%s: Failed to change cpu frequency: %d\n",
|
||||
__func__, retval);
|
||||
|
||||
if (notify)
|
||||
cpufreq_freq_transition_end(policy, &freqs, retval);
|
||||
retval = __target_index(policy, freq_table, index);
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* Generic OPP helper interface for CPUFreq drivers
|
||||
*
|
||||
* Copyright (C) 2009-2014 Texas Instruments Incorporated.
|
||||
* Nishanth Menon
|
||||
* Romit Dasgupta
|
||||
* Kevin Hilman
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/**
|
||||
* dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
|
||||
* @dev: device for which we do this operation
|
||||
* @table: Cpufreq table returned back to caller
|
||||
*
|
||||
* Generate a cpufreq table for a provided device- this assumes that the
|
||||
* opp list is already initialized and ready for usage.
|
||||
*
|
||||
* This function allocates required memory for the cpufreq table. It is
|
||||
* expected that the caller does the required maintenance such as freeing
|
||||
* the table as required.
|
||||
*
|
||||
* Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
|
||||
* if no memory available for the operation (table is not populated), returns 0
|
||||
* if successful and table is populated.
|
||||
*
|
||||
* WARNING: It is important for the callers to ensure refreshing their copy of
|
||||
* the table if any of the mentioned functions have been invoked in the interim.
|
||||
*
|
||||
* Locking: The internal device_opp and opp structures are RCU protected.
|
||||
* Since we just use the regular accessor functions to access the internal data
|
||||
* structures, we use RCU read lock inside this function. As a result, users of
|
||||
* this function DONOT need to use explicit locks for invoking.
|
||||
*/
|
||||
int dev_pm_opp_init_cpufreq_table(struct device *dev,
|
||||
struct cpufreq_frequency_table **table)
|
||||
{
|
||||
struct dev_pm_opp *opp;
|
||||
struct cpufreq_frequency_table *freq_table = NULL;
|
||||
int i, max_opps, ret = 0;
|
||||
unsigned long rate;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
max_opps = dev_pm_opp_get_opp_count(dev);
|
||||
if (max_opps <= 0) {
|
||||
ret = max_opps ? max_opps : -ENODATA;
|
||||
goto out;
|
||||
}
|
||||
|
||||
freq_table = kzalloc(sizeof(*freq_table) * (max_opps + 1), GFP_KERNEL);
|
||||
if (!freq_table) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0, rate = 0; i < max_opps; i++, rate++) {
|
||||
/* find next rate */
|
||||
opp = dev_pm_opp_find_freq_ceil(dev, &rate);
|
||||
if (IS_ERR(opp)) {
|
||||
ret = PTR_ERR(opp);
|
||||
goto out;
|
||||
}
|
||||
freq_table[i].driver_data = i;
|
||||
freq_table[i].frequency = rate / 1000;
|
||||
}
|
||||
|
||||
freq_table[i].driver_data = i;
|
||||
freq_table[i].frequency = CPUFREQ_TABLE_END;
|
||||
|
||||
*table = &freq_table[0];
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
if (ret)
|
||||
kfree(freq_table);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
|
||||
|
||||
/**
|
||||
* dev_pm_opp_free_cpufreq_table() - free the cpufreq table
|
||||
* @dev: device for which we do this operation
|
||||
* @table: table to free
|
||||
*
|
||||
* Free up the table allocated by dev_pm_opp_init_cpufreq_table
|
||||
*/
|
||||
void dev_pm_opp_free_cpufreq_table(struct device *dev,
|
||||
struct cpufreq_frequency_table **table)
|
||||
{
|
||||
if (!table)
|
||||
return;
|
||||
|
||||
kfree(*table);
|
||||
*table = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
|
|
@ -182,11 +182,11 @@ static void cpufreq_stats_free_table(unsigned int cpu)
|
|||
|
||||
static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int i, j, count = 0, ret = 0;
|
||||
unsigned int i, count = 0, ret = 0;
|
||||
struct cpufreq_stats *stat;
|
||||
unsigned int alloc_size;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpufreq_frequency_table *table;
|
||||
struct cpufreq_frequency_table *pos, *table;
|
||||
|
||||
table = cpufreq_frequency_get_table(cpu);
|
||||
if (unlikely(!table))
|
||||
|
@ -205,12 +205,8 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
|
|||
stat->cpu = cpu;
|
||||
per_cpu(cpufreq_stats_table, cpu) = stat;
|
||||
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
unsigned int freq = table[i].frequency;
|
||||
if (freq == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
cpufreq_for_each_valid_entry(pos, table)
|
||||
count++;
|
||||
}
|
||||
|
||||
alloc_size = count * sizeof(int) + count * sizeof(u64);
|
||||
|
||||
|
@ -228,15 +224,11 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
|
|||
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
|
||||
stat->trans_table = stat->freq_table + count;
|
||||
#endif
|
||||
j = 0;
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
unsigned int freq = table[i].frequency;
|
||||
if (freq == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
if (freq_table_get_index(stat, freq) == -1)
|
||||
stat->freq_table[j++] = freq;
|
||||
}
|
||||
stat->state_num = j;
|
||||
i = 0;
|
||||
cpufreq_for_each_valid_entry(pos, table)
|
||||
if (freq_table_get_index(stat, pos->frequency) == -1)
|
||||
stat->freq_table[i++] = pos->frequency;
|
||||
stat->state_num = i;
|
||||
spin_lock(&cpufreq_stats_lock);
|
||||
stat->last_time = get_jiffies_64();
|
||||
stat->last_index = freq_table_get_index(stat, policy->cur);
|
||||
|
|
|
@ -45,7 +45,7 @@ static struct cpufreq_driver dbx500_cpufreq_driver = {
|
|||
|
||||
static int dbx500_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
int i = 0;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
|
||||
freq_table = dev_get_platdata(&pdev->dev);
|
||||
if (!freq_table) {
|
||||
|
@ -60,10 +60,8 @@ static int dbx500_cpufreq_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
pr_info("dbx500-cpufreq: Available frequencies:\n");
|
||||
while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
|
||||
pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
|
||||
i++;
|
||||
}
|
||||
cpufreq_for_each_entry(pos, freq_table)
|
||||
pr_info(" %d Mhz\n", pos->frequency / 1000);
|
||||
|
||||
return cpufreq_register_driver(&dbx500_cpufreq_driver);
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ static int elanfreq_target(struct cpufreq_policy *policy,
|
|||
static int elanfreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
unsigned int i;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
|
||||
/* capability check */
|
||||
if ((c->x86_vendor != X86_VENDOR_AMD) ||
|
||||
|
@ -159,10 +159,9 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
|
|||
max_freq = elanfreq_get_cpu_frequency(0);
|
||||
|
||||
/* table init */
|
||||
for (i = 0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
|
||||
if (elanfreq_table[i].frequency > max_freq)
|
||||
elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
|
||||
}
|
||||
cpufreq_for_each_entry(pos, elanfreq_table)
|
||||
if (pos->frequency > max_freq)
|
||||
pos->frequency = CPUFREQ_ENTRY_INVALID;
|
||||
|
||||
/* cpuinfo and default policy values */
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
|
|
|
@ -29,17 +29,16 @@ static unsigned int locking_frequency;
|
|||
static int exynos_cpufreq_get_index(unsigned int freq)
|
||||
{
|
||||
struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
|
||||
int index;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
|
||||
for (index = 0;
|
||||
freq_table[index].frequency != CPUFREQ_TABLE_END; index++)
|
||||
if (freq_table[index].frequency == freq)
|
||||
cpufreq_for_each_entry(pos, freq_table)
|
||||
if (pos->frequency == freq)
|
||||
break;
|
||||
|
||||
if (freq_table[index].frequency == CPUFREQ_TABLE_END)
|
||||
if (pos->frequency == CPUFREQ_TABLE_END)
|
||||
return -EINVAL;
|
||||
|
||||
return index;
|
||||
return pos - freq_table;
|
||||
}
|
||||
|
||||
static int exynos_cpufreq_scale(unsigned int target_freq)
|
||||
|
@ -49,6 +48,7 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
|
|||
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
|
||||
unsigned int arm_volt, safe_arm_volt = 0;
|
||||
unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
|
||||
struct device *dev = exynos_info->dev;
|
||||
unsigned int old_freq;
|
||||
int index, old_index;
|
||||
int ret = 0;
|
||||
|
@ -90,8 +90,8 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
|
|||
/* Firstly, voltage up to increase frequency */
|
||||
ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
|
||||
if (ret) {
|
||||
pr_err("%s: failed to set cpu voltage to %d\n",
|
||||
__func__, arm_volt);
|
||||
dev_err(dev, "failed to set cpu voltage to %d\n",
|
||||
arm_volt);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -100,8 +100,8 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
|
|||
ret = regulator_set_voltage(arm_regulator, safe_arm_volt,
|
||||
safe_arm_volt);
|
||||
if (ret) {
|
||||
pr_err("%s: failed to set cpu voltage to %d\n",
|
||||
__func__, safe_arm_volt);
|
||||
dev_err(dev, "failed to set cpu voltage to %d\n",
|
||||
safe_arm_volt);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -115,8 +115,8 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
|
|||
ret = regulator_set_voltage(arm_regulator, arm_volt,
|
||||
arm_volt);
|
||||
if (ret) {
|
||||
pr_err("%s: failed to set cpu voltage to %d\n",
|
||||
__func__, arm_volt);
|
||||
dev_err(dev, "failed to set cpu voltage to %d\n",
|
||||
arm_volt);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -163,6 +163,8 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
|
|||
if (!exynos_info)
|
||||
return -ENOMEM;
|
||||
|
||||
exynos_info->dev = &pdev->dev;
|
||||
|
||||
if (soc_is_exynos4210())
|
||||
ret = exynos4210_cpufreq_init(exynos_info);
|
||||
else if (soc_is_exynos4212() || soc_is_exynos4412())
|
||||
|
@ -176,13 +178,13 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
|
|||
goto err_vdd_arm;
|
||||
|
||||
if (exynos_info->set_freq == NULL) {
|
||||
pr_err("%s: No set_freq function (ERR)\n", __func__);
|
||||
dev_err(&pdev->dev, "No set_freq function (ERR)\n");
|
||||
goto err_vdd_arm;
|
||||
}
|
||||
|
||||
arm_regulator = regulator_get(NULL, "vdd_arm");
|
||||
if (IS_ERR(arm_regulator)) {
|
||||
pr_err("%s: failed to get resource vdd_arm\n", __func__);
|
||||
dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
|
||||
goto err_vdd_arm;
|
||||
}
|
||||
|
||||
|
@ -192,7 +194,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
|
|||
if (!cpufreq_register_driver(&exynos_driver))
|
||||
return 0;
|
||||
|
||||
pr_err("%s: failed to register cpufreq driver\n", __func__);
|
||||
dev_err(&pdev->dev, "failed to register cpufreq driver\n");
|
||||
regulator_put(arm_regulator);
|
||||
err_vdd_arm:
|
||||
kfree(exynos_info);
|
||||
|
|
|
@ -34,6 +34,7 @@ struct apll_freq {
|
|||
};
|
||||
|
||||
struct exynos_dvfs_info {
|
||||
struct device *dev;
|
||||
unsigned long mpll_freq_khz;
|
||||
unsigned int pll_safe_idx;
|
||||
struct clk *cpu_clk;
|
||||
|
|
|
@ -114,25 +114,23 @@ static struct cpufreq_freqs freqs;
|
|||
|
||||
static int init_div_table(void)
|
||||
{
|
||||
struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
|
||||
struct cpufreq_frequency_table *pos, *freq_tbl = dvfs_info->freq_table;
|
||||
unsigned int tmp, clk_div, ema_div, freq, volt_id;
|
||||
int i = 0;
|
||||
struct dev_pm_opp *opp;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
|
||||
cpufreq_for_each_entry(pos, freq_tbl) {
|
||||
opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
|
||||
freq_tbl[i].frequency * 1000, true);
|
||||
pos->frequency * 1000, true);
|
||||
if (IS_ERR(opp)) {
|
||||
rcu_read_unlock();
|
||||
dev_err(dvfs_info->dev,
|
||||
"failed to find valid OPP for %u KHZ\n",
|
||||
freq_tbl[i].frequency);
|
||||
pos->frequency);
|
||||
return PTR_ERR(opp);
|
||||
}
|
||||
|
||||
freq = freq_tbl[i].frequency / 1000; /* In MHZ */
|
||||
freq = pos->frequency / 1000; /* In MHZ */
|
||||
clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
|
||||
<< P0_7_CPUCLKDEV_SHIFT;
|
||||
clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
|
||||
|
@ -157,7 +155,8 @@ static int init_div_table(void)
|
|||
tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
|
||||
| ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
|
||||
|
||||
__raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i);
|
||||
__raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 *
|
||||
(pos - freq_tbl));
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
@ -166,8 +165,9 @@ static int init_div_table(void)
|
|||
|
||||
static void exynos_enable_dvfs(unsigned int cur_frequency)
|
||||
{
|
||||
unsigned int tmp, i, cpu;
|
||||
unsigned int tmp, cpu;
|
||||
struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
/* Disable DVFS */
|
||||
__raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL);
|
||||
|
||||
|
@ -182,15 +182,15 @@ static void exynos_enable_dvfs(unsigned int cur_frequency)
|
|||
__raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
|
||||
|
||||
/* Set initial performance index */
|
||||
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
|
||||
if (freq_table[i].frequency == cur_frequency)
|
||||
cpufreq_for_each_entry(pos, freq_table)
|
||||
if (pos->frequency == cur_frequency)
|
||||
break;
|
||||
|
||||
if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
|
||||
if (pos->frequency == CPUFREQ_TABLE_END) {
|
||||
dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
|
||||
/* Assign the highest frequency */
|
||||
i = 0;
|
||||
cur_frequency = freq_table[i].frequency;
|
||||
pos = freq_table;
|
||||
cur_frequency = pos->frequency;
|
||||
}
|
||||
|
||||
dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
|
||||
|
@ -199,7 +199,7 @@ static void exynos_enable_dvfs(unsigned int cur_frequency)
|
|||
for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
|
||||
tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
|
||||
tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
|
||||
tmp |= (i << C0_3_PSTATE_NEW_SHIFT);
|
||||
tmp |= ((pos - freq_table) << C0_3_PSTATE_NEW_SHIFT);
|
||||
__raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,22 +21,19 @@
|
|||
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
|
||||
struct cpufreq_frequency_table *table)
|
||||
{
|
||||
struct cpufreq_frequency_table *pos;
|
||||
unsigned int min_freq = ~0;
|
||||
unsigned int max_freq = 0;
|
||||
unsigned int i;
|
||||
unsigned int freq;
|
||||
|
||||
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
|
||||
unsigned int freq = table[i].frequency;
|
||||
if (freq == CPUFREQ_ENTRY_INVALID) {
|
||||
pr_debug("table entry %u is invalid, skipping\n", i);
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
freq = pos->frequency;
|
||||
|
||||
continue;
|
||||
}
|
||||
if (!cpufreq_boost_enabled()
|
||||
&& (table[i].flags & CPUFREQ_BOOST_FREQ))
|
||||
&& (pos->flags & CPUFREQ_BOOST_FREQ))
|
||||
continue;
|
||||
|
||||
pr_debug("table entry %u: %u kHz\n", i, freq);
|
||||
pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
|
||||
if (freq < min_freq)
|
||||
min_freq = freq;
|
||||
if (freq > max_freq)
|
||||
|
@ -57,7 +54,8 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
|
|||
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
|
||||
struct cpufreq_frequency_table *table)
|
||||
{
|
||||
unsigned int next_larger = ~0, freq, i = 0;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
unsigned int freq, next_larger = ~0;
|
||||
bool found = false;
|
||||
|
||||
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
|
||||
|
@ -65,9 +63,9 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
|
|||
|
||||
cpufreq_verify_within_cpu_limits(policy);
|
||||
|
||||
for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) {
|
||||
if (freq == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
freq = pos->frequency;
|
||||
|
||||
if ((freq >= policy->min) && (freq <= policy->max)) {
|
||||
found = true;
|
||||
break;
|
||||
|
@ -118,7 +116,8 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
|||
.driver_data = ~0,
|
||||
.frequency = 0,
|
||||
};
|
||||
unsigned int i;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
unsigned int freq, i = 0;
|
||||
|
||||
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
|
||||
target_freq, relation, policy->cpu);
|
||||
|
@ -132,15 +131,19 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
|||
break;
|
||||
}
|
||||
|
||||
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
|
||||
unsigned int freq = table[i].frequency;
|
||||
if (freq == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
freq = pos->frequency;
|
||||
|
||||
i = pos - table;
|
||||
if ((freq < policy->min) || (freq > policy->max))
|
||||
continue;
|
||||
if (freq == target_freq) {
|
||||
optimal.driver_data = i;
|
||||
break;
|
||||
}
|
||||
switch (relation) {
|
||||
case CPUFREQ_RELATION_H:
|
||||
if (freq <= target_freq) {
|
||||
if (freq < target_freq) {
|
||||
if (freq >= optimal.frequency) {
|
||||
optimal.frequency = freq;
|
||||
optimal.driver_data = i;
|
||||
|
@ -153,7 +156,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
|||
}
|
||||
break;
|
||||
case CPUFREQ_RELATION_L:
|
||||
if (freq >= target_freq) {
|
||||
if (freq > target_freq) {
|
||||
if (freq <= optimal.frequency) {
|
||||
optimal.frequency = freq;
|
||||
optimal.driver_data = i;
|
||||
|
@ -184,8 +187,7 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
|
|||
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
|
||||
unsigned int freq)
|
||||
{
|
||||
struct cpufreq_frequency_table *table;
|
||||
int i;
|
||||
struct cpufreq_frequency_table *pos, *table;
|
||||
|
||||
table = cpufreq_frequency_get_table(policy->cpu);
|
||||
if (unlikely(!table)) {
|
||||
|
@ -193,10 +195,9 @@ int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
if (table[i].frequency == freq)
|
||||
return i;
|
||||
}
|
||||
cpufreq_for_each_valid_entry(pos, table)
|
||||
if (pos->frequency == freq)
|
||||
return pos - table;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -208,16 +209,13 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
|
|||
static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
|
||||
bool show_boost)
|
||||
{
|
||||
unsigned int i = 0;
|
||||
ssize_t count = 0;
|
||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
|
||||
|
||||
if (!table)
|
||||
return -ENODEV;
|
||||
|
||||
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
|
||||
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
/*
|
||||
* show_boost = true and driver_data = BOOST freq
|
||||
* display BOOST freqs
|
||||
|
@ -229,10 +227,10 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
|
|||
* show_boost = false and driver_data != BOOST freq
|
||||
* display NON BOOST freqs
|
||||
*/
|
||||
if (show_boost ^ (table[i].flags & CPUFREQ_BOOST_FREQ))
|
||||
if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
|
||||
continue;
|
||||
|
||||
count += sprintf(&buf[count], "%d ", table[i].frequency);
|
||||
count += sprintf(&buf[count], "%d ", pos->frequency);
|
||||
}
|
||||
count += sprintf(&buf[count], "\n");
|
||||
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
|
@ -170,25 +169,25 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
arm_clk = devm_clk_get(cpu_dev, "arm");
|
||||
pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
|
||||
pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
|
||||
step_clk = devm_clk_get(cpu_dev, "step");
|
||||
pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m");
|
||||
arm_clk = clk_get(cpu_dev, "arm");
|
||||
pll1_sys_clk = clk_get(cpu_dev, "pll1_sys");
|
||||
pll1_sw_clk = clk_get(cpu_dev, "pll1_sw");
|
||||
step_clk = clk_get(cpu_dev, "step");
|
||||
pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m");
|
||||
if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
|
||||
IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
|
||||
dev_err(cpu_dev, "failed to get clocks\n");
|
||||
ret = -ENOENT;
|
||||
goto put_node;
|
||||
goto put_clk;
|
||||
}
|
||||
|
||||
arm_reg = devm_regulator_get(cpu_dev, "arm");
|
||||
pu_reg = devm_regulator_get(cpu_dev, "pu");
|
||||
soc_reg = devm_regulator_get(cpu_dev, "soc");
|
||||
arm_reg = regulator_get(cpu_dev, "arm");
|
||||
pu_reg = regulator_get(cpu_dev, "pu");
|
||||
soc_reg = regulator_get(cpu_dev, "soc");
|
||||
if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) {
|
||||
dev_err(cpu_dev, "failed to get regulators\n");
|
||||
ret = -ENOENT;
|
||||
goto put_node;
|
||||
goto put_reg;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -201,21 +200,21 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
|
|||
ret = of_init_opp_table(cpu_dev);
|
||||
if (ret < 0) {
|
||||
dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
|
||||
goto put_node;
|
||||
goto put_reg;
|
||||
}
|
||||
|
||||
num = dev_pm_opp_get_opp_count(cpu_dev);
|
||||
if (num < 0) {
|
||||
ret = num;
|
||||
dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
|
||||
goto put_node;
|
||||
goto put_reg;
|
||||
}
|
||||
}
|
||||
|
||||
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
|
||||
if (ret) {
|
||||
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
|
||||
goto put_node;
|
||||
goto put_reg;
|
||||
}
|
||||
|
||||
/* Make imx6_soc_volt array's size same as arm opp number */
|
||||
|
@ -301,7 +300,24 @@ soc_opp_out:
|
|||
|
||||
free_freq_table:
|
||||
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
|
||||
put_node:
|
||||
put_reg:
|
||||
if (!IS_ERR(arm_reg))
|
||||
regulator_put(arm_reg);
|
||||
if (!IS_ERR(pu_reg))
|
||||
regulator_put(pu_reg);
|
||||
if (!IS_ERR(soc_reg))
|
||||
regulator_put(soc_reg);
|
||||
put_clk:
|
||||
if (!IS_ERR(arm_clk))
|
||||
clk_put(arm_clk);
|
||||
if (!IS_ERR(pll1_sys_clk))
|
||||
clk_put(pll1_sys_clk);
|
||||
if (!IS_ERR(pll1_sw_clk))
|
||||
clk_put(pll1_sw_clk);
|
||||
if (!IS_ERR(step_clk))
|
||||
clk_put(step_clk);
|
||||
if (!IS_ERR(pll2_pfd2_396m_clk))
|
||||
clk_put(pll2_pfd2_396m_clk);
|
||||
of_node_put(np);
|
||||
return ret;
|
||||
}
|
||||
|
@ -310,6 +326,14 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
|
|||
{
|
||||
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
|
||||
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
|
||||
regulator_put(arm_reg);
|
||||
regulator_put(pu_reg);
|
||||
regulator_put(soc_reg);
|
||||
clk_put(arm_clk);
|
||||
clk_put(pll1_sys_clk);
|
||||
clk_put(pll1_sw_clk);
|
||||
clk_put(step_clk);
|
||||
clk_put(pll2_pfd2_396m_clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -32,8 +32,6 @@
|
|||
#include <asm/msr.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
|
||||
#define SAMPLE_COUNT 3
|
||||
|
||||
#define BYT_RATIOS 0x66a
|
||||
#define BYT_VIDS 0x66b
|
||||
#define BYT_TURBO_RATIOS 0x66c
|
||||
|
@ -90,8 +88,6 @@ struct _pid {
|
|||
struct cpudata {
|
||||
int cpu;
|
||||
|
||||
char name[64];
|
||||
|
||||
struct timer_list timer;
|
||||
|
||||
struct pstate_data pstate;
|
||||
|
@ -549,8 +545,6 @@ static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
|
|||
|
||||
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
|
||||
{
|
||||
sprintf(cpu->name, "Intel 2nd generation core");
|
||||
|
||||
cpu->pstate.min_pstate = pstate_funcs.get_min();
|
||||
cpu->pstate.max_pstate = pstate_funcs.get_max();
|
||||
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
|
||||
|
@ -560,9 +554,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
|
|||
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
|
||||
}
|
||||
|
||||
static inline void intel_pstate_calc_busy(struct cpudata *cpu,
|
||||
struct sample *sample)
|
||||
static inline void intel_pstate_calc_busy(struct cpudata *cpu)
|
||||
{
|
||||
struct sample *sample = &cpu->sample;
|
||||
int64_t core_pct;
|
||||
int32_t rem;
|
||||
|
||||
|
@ -595,7 +589,7 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
|
|||
cpu->sample.aperf -= cpu->prev_aperf;
|
||||
cpu->sample.mperf -= cpu->prev_mperf;
|
||||
|
||||
intel_pstate_calc_busy(cpu, &cpu->sample);
|
||||
intel_pstate_calc_busy(cpu);
|
||||
|
||||
cpu->prev_aperf = aperf;
|
||||
cpu->prev_mperf = mperf;
|
||||
|
@ -684,10 +678,13 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
|||
ICPU(0x37, byt_params),
|
||||
ICPU(0x3a, core_params),
|
||||
ICPU(0x3c, core_params),
|
||||
ICPU(0x3d, core_params),
|
||||
ICPU(0x3e, core_params),
|
||||
ICPU(0x3f, core_params),
|
||||
ICPU(0x45, core_params),
|
||||
ICPU(0x46, core_params),
|
||||
ICPU(0x4f, core_params),
|
||||
ICPU(0x56, core_params),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
|
||||
|
|
|
@ -530,6 +530,7 @@ static int longhaul_get_ranges(void)
|
|||
|
||||
static void longhaul_setup_voltagescaling(void)
|
||||
{
|
||||
struct cpufreq_frequency_table *freq_pos;
|
||||
union msr_longhaul longhaul;
|
||||
struct mV_pos minvid, maxvid, vid;
|
||||
unsigned int j, speed, pos, kHz_step, numvscales;
|
||||
|
@ -608,18 +609,16 @@ static void longhaul_setup_voltagescaling(void)
|
|||
/* Calculate kHz for one voltage step */
|
||||
kHz_step = (highest_speed - min_vid_speed) / numvscales;
|
||||
|
||||
j = 0;
|
||||
while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
|
||||
speed = longhaul_table[j].frequency;
|
||||
cpufreq_for_each_entry(freq_pos, longhaul_table) {
|
||||
speed = freq_pos->frequency;
|
||||
if (speed > min_vid_speed)
|
||||
pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
|
||||
else
|
||||
pos = minvid.pos;
|
||||
longhaul_table[j].driver_data |= mV_vrm_table[pos] << 8;
|
||||
freq_pos->driver_data |= mV_vrm_table[pos] << 8;
|
||||
vid = vrm_mV_table[mV_vrm_table[pos]];
|
||||
printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
|
||||
speed, j, vid.mV);
|
||||
j++;
|
||||
speed, (int)(freq_pos - longhaul_table), vid.mV);
|
||||
}
|
||||
|
||||
can_scale_voltage = 1;
|
||||
|
|
|
@ -136,9 +136,10 @@ void restore_astate(int cpu)
|
|||
|
||||
static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_frequency_table *pos;
|
||||
const u32 *max_freqp;
|
||||
u32 max_freq;
|
||||
int i, cur_astate;
|
||||
int cur_astate;
|
||||
struct resource res;
|
||||
struct device_node *cpu, *dn;
|
||||
int err = -ENODEV;
|
||||
|
@ -197,10 +198,9 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
pr_debug("initializing frequency table\n");
|
||||
|
||||
/* initialize frequency table */
|
||||
for (i=0; pas_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
|
||||
pas_freqs[i].frequency =
|
||||
get_astate_freq(pas_freqs[i].driver_data) * 100000;
|
||||
pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
|
||||
cpufreq_for_each_entry(pos, pas_freqs) {
|
||||
pos->frequency = get_astate_freq(pos->driver_data) * 100000;
|
||||
pr_debug("%d: %d\n", (int)(pos - pas_freqs), pos->frequency);
|
||||
}
|
||||
|
||||
cur_astate = get_cur_astate(policy->cpu);
|
||||
|
|
|
@ -151,6 +151,7 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
|
|||
|
||||
static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_frequency_table *pos;
|
||||
unsigned int i, f;
|
||||
unsigned khz;
|
||||
|
||||
|
@ -168,12 +169,11 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
|
|||
}
|
||||
}
|
||||
if (param_max_multiplier) {
|
||||
for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
|
||||
if (clock_ratio[i].driver_data == param_max_multiplier) {
|
||||
cpufreq_for_each_entry(pos, clock_ratio)
|
||||
if (pos->driver_data == param_max_multiplier) {
|
||||
max_multiplier = param_max_multiplier;
|
||||
goto have_max_multiplier;
|
||||
}
|
||||
}
|
||||
printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -201,12 +201,12 @@ have_busfreq:
|
|||
param_busfreq = busfreq * 10;
|
||||
|
||||
/* table init */
|
||||
for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
|
||||
f = clock_ratio[i].driver_data;
|
||||
cpufreq_for_each_entry(pos, clock_ratio) {
|
||||
f = pos->driver_data;
|
||||
if (f > max_multiplier)
|
||||
clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID;
|
||||
pos->frequency = CPUFREQ_ENTRY_INVALID;
|
||||
else
|
||||
clock_ratio[i].frequency = busfreq * f;
|
||||
pos->frequency = busfreq * f;
|
||||
}
|
||||
|
||||
/* cpuinfo and default policy values */
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
* power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf)
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -45,7 +47,6 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <acpi/processor.h>
|
||||
|
||||
#define PFX "powernow-k8: "
|
||||
#define VERSION "version 2.20.00"
|
||||
#include "powernow-k8.h"
|
||||
|
||||
|
@ -161,7 +162,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
|
|||
u32 i = 0;
|
||||
|
||||
if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
|
||||
printk(KERN_ERR PFX "internal error - overflow on fid write\n");
|
||||
pr_err("internal error - overflow on fid write\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -175,9 +176,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
|
|||
do {
|
||||
wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
|
||||
if (i++ > 100) {
|
||||
printk(KERN_ERR PFX
|
||||
"Hardware error - pending bit very stuck - "
|
||||
"no further pstate changes possible\n");
|
||||
pr_err("Hardware error - pending bit very stuck - no further pstate changes possible\n");
|
||||
return 1;
|
||||
}
|
||||
} while (query_current_values_with_pending_wait(data));
|
||||
|
@ -185,15 +184,13 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
|
|||
count_off_irt(data);
|
||||
|
||||
if (savevid != data->currvid) {
|
||||
printk(KERN_ERR PFX
|
||||
"vid change on fid trans, old 0x%x, new 0x%x\n",
|
||||
savevid, data->currvid);
|
||||
pr_err("vid change on fid trans, old 0x%x, new 0x%x\n",
|
||||
savevid, data->currvid);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (fid != data->currfid) {
|
||||
printk(KERN_ERR PFX
|
||||
"fid trans failed, fid 0x%x, curr 0x%x\n", fid,
|
||||
pr_err("fid trans failed, fid 0x%x, curr 0x%x\n", fid,
|
||||
data->currfid);
|
||||
return 1;
|
||||
}
|
||||
|
@ -209,7 +206,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
|
|||
int i = 0;
|
||||
|
||||
if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
|
||||
printk(KERN_ERR PFX "internal error - overflow on vid write\n");
|
||||
pr_err("internal error - overflow on vid write\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -223,23 +220,19 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
|
|||
do {
|
||||
wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
|
||||
if (i++ > 100) {
|
||||
printk(KERN_ERR PFX "internal error - pending bit "
|
||||
"very stuck - no further pstate "
|
||||
"changes possible\n");
|
||||
pr_err("internal error - pending bit very stuck - no further pstate changes possible\n");
|
||||
return 1;
|
||||
}
|
||||
} while (query_current_values_with_pending_wait(data));
|
||||
|
||||
if (savefid != data->currfid) {
|
||||
printk(KERN_ERR PFX "fid changed on vid trans, old "
|
||||
"0x%x new 0x%x\n",
|
||||
savefid, data->currfid);
|
||||
pr_err("fid changed on vid trans, old 0x%x new 0x%x\n",
|
||||
savefid, data->currfid);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (vid != data->currvid) {
|
||||
printk(KERN_ERR PFX "vid trans failed, vid 0x%x, "
|
||||
"curr 0x%x\n",
|
||||
pr_err("vid trans failed, vid 0x%x, curr 0x%x\n",
|
||||
vid, data->currvid);
|
||||
return 1;
|
||||
}
|
||||
|
@ -283,8 +276,7 @@ static int transition_fid_vid(struct powernow_k8_data *data,
|
|||
return 1;
|
||||
|
||||
if ((reqfid != data->currfid) || (reqvid != data->currvid)) {
|
||||
printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, "
|
||||
"curr 0x%x 0x%x\n",
|
||||
pr_err("failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n",
|
||||
smp_processor_id(),
|
||||
reqfid, reqvid, data->currfid, data->currvid);
|
||||
return 1;
|
||||
|
@ -304,8 +296,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data,
|
|||
u32 savefid = data->currfid;
|
||||
u32 maxvid, lo, rvomult = 1;
|
||||
|
||||
pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, "
|
||||
"reqvid 0x%x, rvo 0x%x\n",
|
||||
pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
|
||||
smp_processor_id(),
|
||||
data->currfid, data->currvid, reqvid, data->rvo);
|
||||
|
||||
|
@ -342,8 +333,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data,
|
|||
return 1;
|
||||
|
||||
if (savefid != data->currfid) {
|
||||
printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n",
|
||||
data->currfid);
|
||||
pr_err("ph1 err, currfid changed 0x%x\n", data->currfid);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -360,13 +350,11 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
|
|||
u32 fid_interval, savevid = data->currvid;
|
||||
|
||||
if (data->currfid == reqfid) {
|
||||
printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n",
|
||||
data->currfid);
|
||||
pr_err("ph2 null fid transition 0x%x\n", data->currfid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, "
|
||||
"reqfid 0x%x\n",
|
||||
pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n",
|
||||
smp_processor_id(),
|
||||
data->currfid, data->currvid, reqfid);
|
||||
|
||||
|
@ -409,15 +397,13 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
|
|||
return 1;
|
||||
|
||||
if (data->currfid != reqfid) {
|
||||
printk(KERN_ERR PFX
|
||||
"ph2: mismatch, failed fid transition, "
|
||||
"curr 0x%x, req 0x%x\n",
|
||||
pr_err("ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n",
|
||||
data->currfid, reqfid);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (savevid != data->currvid) {
|
||||
printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n",
|
||||
pr_err("ph2: vid changed, save 0x%x, curr 0x%x\n",
|
||||
savevid, data->currvid);
|
||||
return 1;
|
||||
}
|
||||
|
@ -444,17 +430,14 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
|
|||
return 1;
|
||||
|
||||
if (savefid != data->currfid) {
|
||||
printk(KERN_ERR PFX
|
||||
"ph3: bad fid change, save 0x%x, curr 0x%x\n",
|
||||
savefid, data->currfid);
|
||||
pr_err("ph3: bad fid change, save 0x%x, curr 0x%x\n",
|
||||
savefid, data->currfid);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (data->currvid != reqvid) {
|
||||
printk(KERN_ERR PFX
|
||||
"ph3: failed vid transition\n, "
|
||||
"req 0x%x, curr 0x%x",
|
||||
reqvid, data->currvid);
|
||||
pr_err("ph3: failed vid transition\n, req 0x%x, curr 0x%x",
|
||||
reqvid, data->currvid);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -498,23 +481,20 @@ static void check_supported_cpu(void *_rc)
|
|||
if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
|
||||
if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
|
||||
((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
|
||||
printk(KERN_INFO PFX
|
||||
"Processor cpuid %x not supported\n", eax);
|
||||
pr_info("Processor cpuid %x not supported\n", eax);
|
||||
return;
|
||||
}
|
||||
|
||||
eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
|
||||
if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
|
||||
printk(KERN_INFO PFX
|
||||
"No frequency change capabilities detected\n");
|
||||
pr_info("No frequency change capabilities detected\n");
|
||||
return;
|
||||
}
|
||||
|
||||
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
|
||||
if ((edx & P_STATE_TRANSITION_CAPABLE)
|
||||
!= P_STATE_TRANSITION_CAPABLE) {
|
||||
printk(KERN_INFO PFX
|
||||
"Power state transitions not supported\n");
|
||||
pr_info("Power state transitions not supported\n");
|
||||
return;
|
||||
}
|
||||
*rc = 0;
|
||||
|
@ -529,43 +509,39 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
|
|||
|
||||
for (j = 0; j < data->numps; j++) {
|
||||
if (pst[j].vid > LEAST_VID) {
|
||||
printk(KERN_ERR FW_BUG PFX "vid %d invalid : 0x%x\n",
|
||||
j, pst[j].vid);
|
||||
pr_err(FW_BUG "vid %d invalid : 0x%x\n", j,
|
||||
pst[j].vid);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (pst[j].vid < data->rvo) {
|
||||
/* vid + rvo >= 0 */
|
||||
printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate"
|
||||
" %d\n", j);
|
||||
pr_err(FW_BUG "0 vid exceeded with pstate %d\n", j);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (pst[j].vid < maxvid + data->rvo) {
|
||||
/* vid + rvo >= maxvid */
|
||||
printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate"
|
||||
" %d\n", j);
|
||||
pr_err(FW_BUG "maxvid exceeded with pstate %d\n", j);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (pst[j].fid > MAX_FID) {
|
||||
printk(KERN_ERR FW_BUG PFX "maxfid exceeded with pstate"
|
||||
" %d\n", j);
|
||||
pr_err(FW_BUG "maxfid exceeded with pstate %d\n", j);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) {
|
||||
/* Only first fid is allowed to be in "low" range */
|
||||
printk(KERN_ERR FW_BUG PFX "two low fids - %d : "
|
||||
"0x%x\n", j, pst[j].fid);
|
||||
pr_err(FW_BUG "two low fids - %d : 0x%x\n", j,
|
||||
pst[j].fid);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (pst[j].fid < lastfid)
|
||||
lastfid = pst[j].fid;
|
||||
}
|
||||
if (lastfid & 1) {
|
||||
printk(KERN_ERR FW_BUG PFX "lastfid invalid\n");
|
||||
pr_err(FW_BUG "lastfid invalid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (lastfid > LO_FID_TABLE_TOP)
|
||||
printk(KERN_INFO FW_BUG PFX
|
||||
"first fid not from lo freq table\n");
|
||||
pr_info(FW_BUG "first fid not from lo freq table\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -582,16 +558,14 @@ static void print_basics(struct powernow_k8_data *data)
|
|||
for (j = 0; j < data->numps; j++) {
|
||||
if (data->powernow_table[j].frequency !=
|
||||
CPUFREQ_ENTRY_INVALID) {
|
||||
printk(KERN_INFO PFX
|
||||
"fid 0x%x (%d MHz), vid 0x%x\n",
|
||||
data->powernow_table[j].driver_data & 0xff,
|
||||
data->powernow_table[j].frequency/1000,
|
||||
data->powernow_table[j].driver_data >> 8);
|
||||
pr_info("fid 0x%x (%d MHz), vid 0x%x\n",
|
||||
data->powernow_table[j].driver_data & 0xff,
|
||||
data->powernow_table[j].frequency/1000,
|
||||
data->powernow_table[j].driver_data >> 8);
|
||||
}
|
||||
}
|
||||
if (data->batps)
|
||||
printk(KERN_INFO PFX "Only %d pstates on battery\n",
|
||||
data->batps);
|
||||
pr_info("Only %d pstates on battery\n", data->batps);
|
||||
}
|
||||
|
||||
static int fill_powernow_table(struct powernow_k8_data *data,
|
||||
|
@ -602,21 +576,20 @@ static int fill_powernow_table(struct powernow_k8_data *data,
|
|||
|
||||
if (data->batps) {
|
||||
/* use ACPI support to get full speed on mains power */
|
||||
printk(KERN_WARNING PFX
|
||||
"Only %d pstates usable (use ACPI driver for full "
|
||||
"range\n", data->batps);
|
||||
pr_warn("Only %d pstates usable (use ACPI driver for full range\n",
|
||||
data->batps);
|
||||
data->numps = data->batps;
|
||||
}
|
||||
|
||||
for (j = 1; j < data->numps; j++) {
|
||||
if (pst[j-1].fid >= pst[j].fid) {
|
||||
printk(KERN_ERR PFX "PST out of sequence\n");
|
||||
pr_err("PST out of sequence\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (data->numps < 2) {
|
||||
printk(KERN_ERR PFX "no p states to transition\n");
|
||||
pr_err("no p states to transition\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -626,7 +599,7 @@ static int fill_powernow_table(struct powernow_k8_data *data,
|
|||
powernow_table = kzalloc((sizeof(*powernow_table)
|
||||
* (data->numps + 1)), GFP_KERNEL);
|
||||
if (!powernow_table) {
|
||||
printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
|
||||
pr_err("powernow_table memory alloc failure\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -681,13 +654,13 @@ static int find_psb_table(struct powernow_k8_data *data)
|
|||
|
||||
pr_debug("table vers: 0x%x\n", psb->tableversion);
|
||||
if (psb->tableversion != PSB_VERSION_1_4) {
|
||||
printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n");
|
||||
pr_err(FW_BUG "PSB table is not v1.4\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pr_debug("flags: 0x%x\n", psb->flags1);
|
||||
if (psb->flags1) {
|
||||
printk(KERN_ERR FW_BUG PFX "unknown flags\n");
|
||||
pr_err(FW_BUG "unknown flags\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -716,7 +689,7 @@ static int find_psb_table(struct powernow_k8_data *data)
|
|||
cpst = 1;
|
||||
}
|
||||
if (cpst != 1) {
|
||||
printk(KERN_ERR FW_BUG PFX "numpst must be 1\n");
|
||||
pr_err(FW_BUG "numpst must be 1\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -742,9 +715,8 @@ static int find_psb_table(struct powernow_k8_data *data)
|
|||
* BIOS and Kernel Developer's Guide, which is available on
|
||||
* www.amd.com
|
||||
*/
|
||||
printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n");
|
||||
printk(KERN_ERR PFX "Make sure that your BIOS is up to date"
|
||||
" and Cool'N'Quiet support is enabled in BIOS setup\n");
|
||||
pr_err(FW_BUG "No PSB or ACPI _PSS objects\n");
|
||||
pr_err("Make sure that your BIOS is up to date and Cool'N'Quiet support is enabled in BIOS setup\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -819,8 +791,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
|||
acpi_processor_notify_smm(THIS_MODULE);
|
||||
|
||||
if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
|
||||
printk(KERN_ERR PFX
|
||||
"unable to alloc powernow_k8_data cpumask\n");
|
||||
pr_err("unable to alloc powernow_k8_data cpumask\n");
|
||||
ret_val = -ENOMEM;
|
||||
goto err_out_mem;
|
||||
}
|
||||
|
@ -885,9 +856,8 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
|
|||
}
|
||||
|
||||
if (freq != (data->acpi_data.states[i].core_frequency * 1000)) {
|
||||
printk(KERN_INFO PFX "invalid freq entries "
|
||||
"%u kHz vs. %u kHz\n", freq,
|
||||
(unsigned int)
|
||||
pr_info("invalid freq entries %u kHz vs. %u kHz\n",
|
||||
freq, (unsigned int)
|
||||
(data->acpi_data.states[i].core_frequency
|
||||
* 1000));
|
||||
invalidate_entry(powernow_table, i);
|
||||
|
@ -916,7 +886,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
|
|||
max_latency = cur_latency;
|
||||
}
|
||||
if (max_latency == 0) {
|
||||
pr_err(FW_WARN PFX "Invalid zero transition latency\n");
|
||||
pr_err(FW_WARN "Invalid zero transition latency\n");
|
||||
max_latency = 1;
|
||||
}
|
||||
/* value in usecs, needs to be in nanoseconds */
|
||||
|
@ -991,7 +961,7 @@ static long powernowk8_target_fn(void *arg)
|
|||
checkvid = data->currvid;
|
||||
|
||||
if (pending_bit_stuck()) {
|
||||
printk(KERN_ERR PFX "failing targ, change pending bit set\n");
|
||||
pr_err("failing targ, change pending bit set\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
@ -1003,12 +973,11 @@ static long powernowk8_target_fn(void *arg)
|
|||
return -EIO;
|
||||
|
||||
pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
|
||||
data->currfid, data->currvid);
|
||||
data->currfid, data->currvid);
|
||||
|
||||
if ((checkvid != data->currvid) ||
|
||||
(checkfid != data->currfid)) {
|
||||
pr_info(PFX
|
||||
"error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
|
||||
pr_info("error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
|
||||
checkfid, data->currfid,
|
||||
checkvid, data->currvid);
|
||||
}
|
||||
|
@ -1020,7 +989,7 @@ static long powernowk8_target_fn(void *arg)
|
|||
ret = transition_frequency_fidvid(data, newstate);
|
||||
|
||||
if (ret) {
|
||||
printk(KERN_ERR PFX "transition frequency failed\n");
|
||||
pr_err("transition frequency failed\n");
|
||||
mutex_unlock(&fidvid_mutex);
|
||||
return 1;
|
||||
}
|
||||
|
@ -1049,7 +1018,7 @@ static void powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
|
|||
struct init_on_cpu *init_on_cpu = _init_on_cpu;
|
||||
|
||||
if (pending_bit_stuck()) {
|
||||
printk(KERN_ERR PFX "failing init, change pending bit set\n");
|
||||
pr_err("failing init, change pending bit set\n");
|
||||
init_on_cpu->rc = -ENODEV;
|
||||
return;
|
||||
}
|
||||
|
@ -1064,11 +1033,10 @@ static void powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
|
|||
init_on_cpu->rc = 0;
|
||||
}
|
||||
|
||||
static const char missing_pss_msg[] =
|
||||
KERN_ERR
|
||||
FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
|
||||
FW_BUG PFX "First, make sure Cool'N'Quiet is enabled in the BIOS.\n"
|
||||
FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n";
|
||||
#define MISSING_PSS_MSG \
|
||||
FW_BUG "No compatible ACPI _PSS objects found.\n" \
|
||||
FW_BUG "First, make sure Cool'N'Quiet is enabled in the BIOS.\n" \
|
||||
FW_BUG "If that doesn't help, try upgrading your BIOS.\n"
|
||||
|
||||
/* per CPU init entry point to the driver */
|
||||
static int powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
|
@ -1083,7 +1051,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
|
|||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data) {
|
||||
printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
|
||||
pr_err("unable to alloc powernow_k8_data");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1095,13 +1063,11 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
|
|||
* an UP version, and is deprecated by AMD.
|
||||
*/
|
||||
if (num_online_cpus() != 1) {
|
||||
printk_once(missing_pss_msg);
|
||||
pr_err_once(MISSING_PSS_MSG);
|
||||
goto err_out;
|
||||
}
|
||||
if (pol->cpu != 0) {
|
||||
printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
|
||||
"CPU other than CPU0. Complain to your BIOS "
|
||||
"vendor.\n");
|
||||
pr_err(FW_BUG "No ACPI _PSS objects for CPU other than CPU0. Complain to your BIOS vendor.\n");
|
||||
goto err_out;
|
||||
}
|
||||
rc = find_psb_table(data);
|
||||
|
@ -1129,7 +1095,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
|
|||
|
||||
/* min/max the cpu is capable of */
|
||||
if (cpufreq_table_validate_and_show(pol, data->powernow_table)) {
|
||||
printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n");
|
||||
pr_err(FW_BUG "invalid powernow_table\n");
|
||||
powernow_k8_cpu_exit_acpi(data);
|
||||
kfree(data->powernow_table);
|
||||
kfree(data);
|
||||
|
@ -1137,7 +1103,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
|
|||
}
|
||||
|
||||
pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
|
||||
data->currfid, data->currvid);
|
||||
data->currfid, data->currvid);
|
||||
|
||||
/* Point all the CPUs in this policy to the same data */
|
||||
for_each_cpu(cpu, pol->cpus)
|
||||
|
@ -1220,12 +1186,12 @@ static void __request_acpi_cpufreq(void)
|
|||
goto request;
|
||||
|
||||
if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv))))
|
||||
pr_warn(PFX "WTF driver: %s\n", cur_drv);
|
||||
pr_warn("WTF driver: %s\n", cur_drv);
|
||||
|
||||
return;
|
||||
|
||||
request:
|
||||
pr_warn(PFX "This CPU is not supported anymore, using acpi-cpufreq instead.\n");
|
||||
pr_warn("This CPU is not supported anymore, using acpi-cpufreq instead.\n");
|
||||
request_module(drv);
|
||||
}
|
||||
|
||||
|
@ -1260,7 +1226,7 @@ static int powernowk8_init(void)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
|
||||
pr_info("Found %d %s (%d cpu cores) (" VERSION ")\n",
|
||||
num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
|
||||
|
||||
return ret;
|
||||
|
@ -1274,8 +1240,8 @@ static void __exit powernowk8_exit(void)
|
|||
cpufreq_unregister_driver(&cpufreq_amd64_driver);
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and "
|
||||
"Mark Langsdorf <mark.langsdorf@amd.com>");
|
||||
MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>");
|
||||
MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@amd.com>");
|
||||
MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ struct powernow_k8_data {
|
|||
u32 vidmvs; /* usable value calculated from mvs */
|
||||
u32 vstable; /* voltage stabilization time, units 20 us */
|
||||
u32 plllock; /* pll lock time, units 1 us */
|
||||
u32 exttype; /* extended interface = 1 */
|
||||
u32 exttype; /* extended interface = 1 */
|
||||
|
||||
/* keep track of the current fid / vid or pstate */
|
||||
u32 currvid;
|
||||
|
|
|
@ -235,7 +235,7 @@ static void powernv_read_cpu_freq(void *arg)
|
|||
* firmware for CPU 'cpu'. This value is reported through the sysfs
|
||||
* file cpuinfo_cur_freq.
|
||||
*/
|
||||
unsigned int powernv_cpufreq_get(unsigned int cpu)
|
||||
static unsigned int powernv_cpufreq_get(unsigned int cpu)
|
||||
{
|
||||
struct powernv_smp_call_data freq_data;
|
||||
|
||||
|
|
|
@ -67,9 +67,10 @@ static int set_pmode(unsigned int cpu, unsigned int slow_mode)
|
|||
|
||||
static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_frequency_table *pos;
|
||||
const u32 *max_freqp;
|
||||
u32 max_freq;
|
||||
int i, cur_pmode;
|
||||
int cur_pmode;
|
||||
struct device_node *cpu;
|
||||
|
||||
cpu = of_get_cpu_node(policy->cpu, NULL);
|
||||
|
@ -102,9 +103,9 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
pr_debug("initializing frequency table\n");
|
||||
|
||||
/* initialize frequency table */
|
||||
for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
|
||||
cbe_freqs[i].frequency = max_freq / cbe_freqs[i].driver_data;
|
||||
pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
|
||||
cpufreq_for_each_entry(pos, cbe_freqs) {
|
||||
pos->frequency = max_freq / pos->driver_data;
|
||||
pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency);
|
||||
}
|
||||
|
||||
/* if DEBUG is enabled set_pmode() measures the latency
|
||||
|
|
|
@ -266,7 +266,7 @@ out:
|
|||
static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
|
||||
{
|
||||
int count, v, i, found;
|
||||
struct cpufreq_frequency_table *freq;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
struct s3c2416_dvfs *dvfs;
|
||||
|
||||
count = regulator_count_voltages(s3c_freq->vddarm);
|
||||
|
@ -275,12 +275,11 @@ static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
|
|||
return;
|
||||
}
|
||||
|
||||
freq = s3c_freq->freq_table;
|
||||
while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) {
|
||||
if (freq->frequency == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
if (!count)
|
||||
goto out;
|
||||
|
||||
dvfs = &s3c2416_dvfs_table[freq->driver_data];
|
||||
cpufreq_for_each_valid_entry(pos, s3c_freq->freq_table) {
|
||||
dvfs = &s3c2416_dvfs_table[pos->driver_data];
|
||||
found = 0;
|
||||
|
||||
/* Check only the min-voltage, more is always ok on S3C2416 */
|
||||
|
@ -292,13 +291,12 @@ static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
|
|||
|
||||
if (!found) {
|
||||
pr_debug("cpufreq: %dkHz unsupported by regulator\n",
|
||||
freq->frequency);
|
||||
freq->frequency = CPUFREQ_ENTRY_INVALID;
|
||||
pos->frequency);
|
||||
pos->frequency = CPUFREQ_ENTRY_INVALID;
|
||||
}
|
||||
|
||||
freq++;
|
||||
}
|
||||
|
||||
out:
|
||||
/* Guessed */
|
||||
s3c_freq->regulator_latency = 1 * 1000 * 1000;
|
||||
}
|
||||
|
@ -338,7 +336,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
|
|||
static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
|
||||
struct cpufreq_frequency_table *freq;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
struct clk *msysclk;
|
||||
unsigned long rate;
|
||||
int ret;
|
||||
|
@ -427,31 +425,27 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
|
|||
s3c_freq->regulator_latency = 0;
|
||||
#endif
|
||||
|
||||
freq = s3c_freq->freq_table;
|
||||
while (freq->frequency != CPUFREQ_TABLE_END) {
|
||||
cpufreq_for_each_entry(pos, s3c_freq->freq_table) {
|
||||
/* special handling for dvs mode */
|
||||
if (freq->driver_data == 0) {
|
||||
if (pos->driver_data == 0) {
|
||||
if (!s3c_freq->hclk) {
|
||||
pr_debug("cpufreq: %dkHz unsupported as it would need unavailable dvs mode\n",
|
||||
freq->frequency);
|
||||
freq->frequency = CPUFREQ_ENTRY_INVALID;
|
||||
pos->frequency);
|
||||
pos->frequency = CPUFREQ_ENTRY_INVALID;
|
||||
} else {
|
||||
freq++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check for frequencies we can generate */
|
||||
rate = clk_round_rate(s3c_freq->armdiv,
|
||||
freq->frequency * 1000);
|
||||
pos->frequency * 1000);
|
||||
rate /= 1000;
|
||||
if (rate != freq->frequency) {
|
||||
if (rate != pos->frequency) {
|
||||
pr_debug("cpufreq: %dkHz unsupported by clock (clk_round_rate return %lu)\n",
|
||||
freq->frequency, rate);
|
||||
freq->frequency = CPUFREQ_ENTRY_INVALID;
|
||||
pos->frequency, rate);
|
||||
pos->frequency = CPUFREQ_ENTRY_INVALID;
|
||||
}
|
||||
|
||||
freq++;
|
||||
}
|
||||
|
||||
/* Datasheet says PLL stabalisation time must be at least 300us,
|
||||
|
|
|
@ -118,11 +118,10 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
|
|||
pr_err("Unable to check supported voltages\n");
|
||||
}
|
||||
|
||||
freq = s3c64xx_freq_table;
|
||||
while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) {
|
||||
if (freq->frequency == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
if (!count)
|
||||
goto out;
|
||||
|
||||
cpufreq_for_each_valid_entry(freq, s3c64xx_freq_table) {
|
||||
dvfs = &s3c64xx_dvfs_table[freq->driver_data];
|
||||
found = 0;
|
||||
|
||||
|
@ -137,10 +136,9 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
|
|||
freq->frequency);
|
||||
freq->frequency = CPUFREQ_ENTRY_INVALID;
|
||||
}
|
||||
|
||||
freq++;
|
||||
}
|
||||
|
||||
out:
|
||||
/* Guess based on having to do an I2C/SPI write; in future we
|
||||
* will be able to query the regulator performance here. */
|
||||
regulator_latency = 1 * 1000 * 1000;
|
||||
|
@ -179,8 +177,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
|
|||
}
|
||||
#endif
|
||||
|
||||
freq = s3c64xx_freq_table;
|
||||
while (freq->frequency != CPUFREQ_TABLE_END) {
|
||||
cpufreq_for_each_entry(freq, s3c64xx_freq_table) {
|
||||
unsigned long r;
|
||||
|
||||
/* Check for frequencies we can generate */
|
||||
|
@ -196,8 +193,6 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
|
|||
* frequency is the maximum we can support. */
|
||||
if (!vddarm && freq->frequency > clk_get_rate(policy->clk) / 1000)
|
||||
freq->frequency = CPUFREQ_ENTRY_INVALID;
|
||||
|
||||
freq++;
|
||||
}
|
||||
|
||||
/* Datasheet says PLL stabalisation time (if we were to use
|
||||
|
|
|
@ -175,10 +175,8 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
|
|||
mutex_lock(&set_freq_lock);
|
||||
|
||||
if (no_cpufreq_access) {
|
||||
#ifdef CONFIG_PM_VERBOSE
|
||||
pr_err("%s:%d denied access to %s as it is disabled"
|
||||
"temporarily\n", __FILE__, __LINE__, __func__);
|
||||
#endif
|
||||
pr_err("Denied access to %s as it is disabled temporarily\n",
|
||||
__func__);
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include <asm/cpu_device_id.h>
|
||||
|
||||
#define PFX "speedstep-centrino: "
|
||||
#define MAINTAINER "cpufreq@vger.kernel.org"
|
||||
#define MAINTAINER "linux-pm@vger.kernel.org"
|
||||
|
||||
#define INTEL_MSR_RANGE (0xffff)
|
||||
|
||||
|
|
|
@ -82,9 +82,9 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
|
||||
unsigned long rate)
|
||||
static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
|
||||
{
|
||||
unsigned long rate = freq_table[index].frequency;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
|
@ -106,11 +106,6 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
|
||||
{
|
||||
return tegra_update_cpu_speed(policy, freq_table[index].frequency);
|
||||
}
|
||||
|
||||
static int tegra_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int ret;
|
||||
|
|
|
@ -1734,18 +1734,17 @@ static struct cpufreq_frequency_table db8500_cpufreq_table[] = {
|
|||
|
||||
static long round_armss_rate(unsigned long rate)
|
||||
{
|
||||
struct cpufreq_frequency_table *pos;
|
||||
long freq = 0;
|
||||
int i = 0;
|
||||
|
||||
/* cpufreq table frequencies is in KHz. */
|
||||
rate = rate / 1000;
|
||||
|
||||
/* Find the corresponding arm opp from the cpufreq table. */
|
||||
while (db8500_cpufreq_table[i].frequency != CPUFREQ_TABLE_END) {
|
||||
freq = db8500_cpufreq_table[i].frequency;
|
||||
cpufreq_for_each_entry(pos, db8500_cpufreq_table) {
|
||||
freq = pos->frequency;
|
||||
if (freq == rate)
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
|
||||
/* Return the last valid value, even if a match was not found. */
|
||||
|
@ -1886,23 +1885,21 @@ static void set_clock_rate(u8 clock, unsigned long rate)
|
|||
|
||||
static int set_armss_rate(unsigned long rate)
|
||||
{
|
||||
int i = 0;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
|
||||
/* cpufreq table frequencies is in KHz. */
|
||||
rate = rate / 1000;
|
||||
|
||||
/* Find the corresponding arm opp from the cpufreq table. */
|
||||
while (db8500_cpufreq_table[i].frequency != CPUFREQ_TABLE_END) {
|
||||
if (db8500_cpufreq_table[i].frequency == rate)
|
||||
cpufreq_for_each_entry(pos, db8500_cpufreq_table)
|
||||
if (pos->frequency == rate)
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (db8500_cpufreq_table[i].frequency != rate)
|
||||
if (pos->frequency != rate)
|
||||
return -EINVAL;
|
||||
|
||||
/* Set the new arm opp. */
|
||||
return db8500_prcmu_set_arm_opp(db8500_cpufreq_table[i].driver_data);
|
||||
return db8500_prcmu_set_arm_opp(pos->driver_data);
|
||||
}
|
||||
|
||||
static int set_plldsi_rate(unsigned long rate)
|
||||
|
|
|
@ -217,21 +217,17 @@ crc_init_out:
|
|||
static u32 sh_sir_find_sclk(struct clk *irda_clk)
|
||||
{
|
||||
struct cpufreq_frequency_table *freq_table = irda_clk->freq_table;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
struct clk *pclk = clk_get(NULL, "peripheral_clk");
|
||||
u32 limit, min = 0xffffffff, tmp;
|
||||
int i, index = 0;
|
||||
int index = 0;
|
||||
|
||||
limit = clk_get_rate(pclk);
|
||||
clk_put(pclk);
|
||||
|
||||
/* IrDA can not set over peripheral_clk */
|
||||
for (i = 0;
|
||||
freq_table[i].frequency != CPUFREQ_TABLE_END;
|
||||
i++) {
|
||||
u32 freq = freq_table[i].frequency;
|
||||
|
||||
if (freq == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
cpufreq_for_each_valid_entry(pos, freq_table) {
|
||||
u32 freq = pos->frequency;
|
||||
|
||||
/* IrDA should not over peripheral_clk */
|
||||
if (freq > limit)
|
||||
|
@ -240,7 +236,7 @@ static u32 sh_sir_find_sclk(struct clk *irda_clk)
|
|||
tmp = freq % SCLK_BASE;
|
||||
if (tmp < min) {
|
||||
min = tmp;
|
||||
index = i;
|
||||
index = pos - freq_table;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -196,17 +196,11 @@ int clk_rate_table_find(struct clk *clk,
|
|||
struct cpufreq_frequency_table *freq_table,
|
||||
unsigned long rate)
|
||||
{
|
||||
int i;
|
||||
struct cpufreq_frequency_table *pos;
|
||||
|
||||
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
unsigned long freq = freq_table[i].frequency;
|
||||
|
||||
if (freq == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
|
||||
if (freq == rate)
|
||||
return i;
|
||||
}
|
||||
cpufreq_for_each_valid_entry(pos, freq_table)
|
||||
if (pos->frequency == rate)
|
||||
return pos - freq_table;
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
@ -575,11 +569,7 @@ long clk_round_parent(struct clk *clk, unsigned long target,
|
|||
return abs(target - *best_freq);
|
||||
}
|
||||
|
||||
for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END;
|
||||
freq++) {
|
||||
if (freq->frequency == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
|
||||
cpufreq_for_each_valid_entry(freq, parent->freq_table) {
|
||||
if (unlikely(freq->frequency / target <= div_min - 1)) {
|
||||
unsigned long freq_max;
|
||||
|
||||
|
|
|
@ -144,11 +144,11 @@ static int get_property(unsigned int cpu, unsigned long input,
|
|||
unsigned int *output,
|
||||
enum cpufreq_cooling_property property)
|
||||
{
|
||||
int i, j;
|
||||
int i;
|
||||
unsigned long max_level = 0, level = 0;
|
||||
unsigned int freq = CPUFREQ_ENTRY_INVALID;
|
||||
int descend = -1;
|
||||
struct cpufreq_frequency_table *table =
|
||||
struct cpufreq_frequency_table *pos, *table =
|
||||
cpufreq_frequency_get_table(cpu);
|
||||
|
||||
if (!output)
|
||||
|
@ -157,20 +157,16 @@ static int get_property(unsigned int cpu, unsigned long input,
|
|||
if (!table)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
/* ignore invalid entries */
|
||||
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
/* ignore duplicate entry */
|
||||
if (freq == table[i].frequency)
|
||||
if (freq == pos->frequency)
|
||||
continue;
|
||||
|
||||
/* get the frequency order */
|
||||
if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
|
||||
descend = !!(freq > table[i].frequency);
|
||||
descend = freq > pos->frequency;
|
||||
|
||||
freq = table[i].frequency;
|
||||
freq = pos->frequency;
|
||||
max_level++;
|
||||
}
|
||||
|
||||
|
@ -190,29 +186,26 @@ static int get_property(unsigned int cpu, unsigned long input,
|
|||
if (property == GET_FREQ)
|
||||
level = descend ? input : (max_level - input);
|
||||
|
||||
for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
/* ignore invalid entry */
|
||||
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
|
||||
i = 0;
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
/* ignore duplicate entry */
|
||||
if (freq == table[i].frequency)
|
||||
if (freq == pos->frequency)
|
||||
continue;
|
||||
|
||||
/* now we have a valid frequency entry */
|
||||
freq = table[i].frequency;
|
||||
freq = pos->frequency;
|
||||
|
||||
if (property == GET_LEVEL && (unsigned int)input == freq) {
|
||||
/* get level by frequency */
|
||||
*output = descend ? j : (max_level - j);
|
||||
*output = descend ? i : (max_level - i);
|
||||
return 0;
|
||||
}
|
||||
if (property == GET_FREQ && level == j) {
|
||||
if (property == GET_FREQ && level == i) {
|
||||
/* get frequency by level */
|
||||
*output = freq;
|
||||
return 0;
|
||||
}
|
||||
j++;
|
||||
i++;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
|
|
|
@ -110,6 +110,7 @@ struct cpufreq_policy {
|
|||
bool transition_ongoing; /* Tracks transition status */
|
||||
spinlock_t transition_lock;
|
||||
wait_queue_head_t transition_wait;
|
||||
struct task_struct *transition_task; /* Task which is doing the transition */
|
||||
};
|
||||
|
||||
/* Only for ACPI */
|
||||
|
@ -468,6 +469,55 @@ struct cpufreq_frequency_table {
|
|||
* order */
|
||||
};
|
||||
|
||||
#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
|
||||
int dev_pm_opp_init_cpufreq_table(struct device *dev,
|
||||
struct cpufreq_frequency_table **table);
|
||||
void dev_pm_opp_free_cpufreq_table(struct device *dev,
|
||||
struct cpufreq_frequency_table **table);
|
||||
#else
|
||||
static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
|
||||
struct cpufreq_frequency_table
|
||||
**table)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
|
||||
struct cpufreq_frequency_table
|
||||
**table)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool cpufreq_next_valid(struct cpufreq_frequency_table **pos)
|
||||
{
|
||||
while ((*pos)->frequency != CPUFREQ_TABLE_END)
|
||||
if ((*pos)->frequency != CPUFREQ_ENTRY_INVALID)
|
||||
return true;
|
||||
else
|
||||
(*pos)++;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
|
||||
* @pos: the cpufreq_frequency_table * to use as a loop cursor.
|
||||
* @table: the cpufreq_frequency_table * to iterate over.
|
||||
*/
|
||||
|
||||
#define cpufreq_for_each_entry(pos, table) \
|
||||
for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
|
||||
|
||||
/*
|
||||
* cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table
|
||||
* excluding CPUFREQ_ENTRY_INVALID frequencies.
|
||||
* @pos: the cpufreq_frequency_table * to use as a loop cursor.
|
||||
* @table: the cpufreq_frequency_table * to iterate over.
|
||||
*/
|
||||
|
||||
#define cpufreq_for_each_valid_entry(pos, table) \
|
||||
for (pos = table; cpufreq_next_valid(&pos); pos++)
|
||||
|
||||
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
|
||||
struct cpufreq_frequency_table *table);
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#define __LINUX_OPP_H__
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/notifier.h>
|
||||
|
||||
struct dev_pm_opp;
|
||||
|
@ -117,23 +116,4 @@ static inline int of_init_opp_table(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
|
||||
int dev_pm_opp_init_cpufreq_table(struct device *dev,
|
||||
struct cpufreq_frequency_table **table);
|
||||
void dev_pm_opp_free_cpufreq_table(struct device *dev,
|
||||
struct cpufreq_frequency_table **table);
|
||||
#else
|
||||
static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
|
||||
struct cpufreq_frequency_table **table)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline
|
||||
void dev_pm_opp_free_cpufreq_table(struct device *dev,
|
||||
struct cpufreq_frequency_table **table)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_CPU_FREQ */
|
||||
|
||||
#endif /* __LINUX_OPP_H__ */
|
||||
|
|
|
@ -62,7 +62,7 @@ LIB_MAJ= 0.0.0
|
|||
LIB_MIN= 0
|
||||
|
||||
PACKAGE = cpupower
|
||||
PACKAGE_BUGREPORT = cpufreq@vger.kernel.org
|
||||
PACKAGE_BUGREPORT = linux-pm@vger.kernel.org
|
||||
LANGUAGES = de fr it cs pt
|
||||
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
* 5.) if the third value, "diff_pmtmr", changes between 2. and 4., the
|
||||
* TSC-based delay routine on the Linux kernel does not correctly
|
||||
* handle the cpufreq transition. Please report this to
|
||||
* cpufreq@vger.kernel.org
|
||||
* linux-pm@vger.kernel.org
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
|
Загрузка…
Ссылка в новой задаче