Power management and ACPI material for 3.19-rc3
- Fix for a potential NULL pointer dereference in the cpufreq core due to an initialization race condition (Ethan Zhao). - Fixes for abuse of the OPP (Operating Performance Points) API related to RCU and other minor issues in the OPP library and the cpufreq-dt driver (Dmitry Torokhov). - cpuidle governors cleanup making them measure idle duration in a better way without using the CPUIDLE_FLAG_TIME_INVALID flag which allows that flag to be dropped from the ACPI cpuidle driver and from the core too (Len Brown). - New ACPI backlight blacklist entries for Samsung machines without a working native backlight interface that need to use the ACPI backlight instead (Aaron Lu). - New CPU IDs of future Intel Xeon CPUs for the Intel RAPL power capping driver (Jacob Pan). - Generic power domains framework modification to export the of_genpd_get_from_provider() function to modular drivers that will allow future driver modifications to be based on the mainline (Amit Daniel Kachhap). - Two fixes for the cpupower tool (Michal Privoznik, Prarit Bhargava). / -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABCAAGBQJUoccdAAoJEILEb/54YlRxP9IQAJIAz5RSRQpv50nq+oMLER/S G4FrVRgELYNTCvujlsgTydSa2IP/G4bPfa7zMquIs69j0L4/65z5aDAMNfHMiNQy 6ZC1Vey7mS+A91pdhhkI3MZ9GIu/HIBnXdhhzQaVOTFG+X8Ro+9WZva6fzgqhw3p hhtjdNkiURoul+uGL0/tk/Ia228gn0Evw+DiPxH+6TyuR2Nzuolc1mKBqE4n948l e99U3SPqT2/fH4otYKXKgz60NldRu9EjBtjSanLRlSEHw8NXI/fbaQJ/YUPL8qnT gfvm694nhU7lERG4jhKMIdx2ubeMpjyRLbgDu+8XmAyMUik9nHfFG5wC04V6vSP5 0JgJyFfXT0HjcvHf1EyFZ5zvqy5FrRUV81E0MXz1Yi14E/MiBvUyNz+ThTQ+7VYT MIdFcqo7hSOjXFjMU6orkxGWqJQpKz2I4BBuQikvCnHQxkevscRS8G0SpxLp6nyD dFBQBZgztZc0FbqlQINszPOa7FXwL042nk9+YobEyE9i3v7lC8zMwNtAav76LSTd Z0/OivQziIwyNxIjrNkTXGnqhxXwKGMQ/JOywaswos7MdnCS2B9sRgn9zCZZ0JSs Z59gAli3A0/qv7YiWy8jxEkL8uqeNu1PCmiwlVuEl15/p+C9w+t4iFJi9DiGFXiF sJOZmy1GcmQCGr9m+qA0 =CYBE -----END PGP SIGNATURE----- Merge tag 'pm+acpi-3.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull power management and ACPI material from Rafael J Wysocki: "These are fixes (operating performance points library, cpufreq-dt driver, cpufreq core, ACPI backlight, cpupower tool), cleanups (cpuidle), new processor IDs for the RAPL (Running Average Power Limit) power capping driver, and a modification of the generic power domains framework allowing modular drivers to call one of its helper functions. Specifics: - Fix for a potential NULL pointer dereference in the cpufreq core due to an initialization race condition (Ethan Zhao). - Fixes for abuse of the OPP (Operating Performance Points) API related to RCU and other minor issues in the OPP library and the cpufreq-dt driver (Dmitry Torokhov). - cpuidle governors cleanup making them measure idle duration in a better way without using the CPUIDLE_FLAG_TIME_INVALID flag which allows that flag to be dropped from the ACPI cpuidle driver and from the core too (Len Brown). - New ACPI backlight blacklist entries for Samsung machines without a working native backlight interface that need to use the ACPI backlight instead (Aaron Lu). - New CPU IDs of future Intel Xeon CPUs for the Intel RAPL power capping driver (Jacob Pan). - Generic power domains framework modification to export the of_genpd_get_from_provider() function to modular drivers that will allow future driver modifications to be based on the mainline (Amit Daniel Kachhap). - Two fixes for the cpupower tool (Michal Privoznik, Prarit Bhargava)" * tag 'pm+acpi-3.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: ACPI / video: Add some Samsung models to disable_native_backlight list tools / cpupower: Fix no idle state information return value tools / cpupower: Correctly detect if running as root cpufreq: fix a NULL pointer dereference in __cpufreq_governor() cpufreq-dt: defer probing if OPP table is not ready PM / OPP: take RCU lock in dev_pm_opp_get_opp_count PM / OPP: fix warning in of_free_opp_table() PM / OPP: add some lockdep annotations powercap / RAPL: add IDs for future Xeon CPUs PM / Domains: Export of_genpd_get_from_provider function cpuidle / ACPI: remove unused CPUIDLE_FLAG_TIME_INVALID cpuidle: ladder: Better idle duration measurement without using CPUIDLE_FLAG_TIME_INVALID cpuidle: menu: Better idle duration measurement without using CPUIDLE_FLAG_TIME_INVALID
This commit is contained in:
Коммит
df90dcd100
|
@ -985,8 +985,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
|
||||||
state->flags = 0;
|
state->flags = 0;
|
||||||
switch (cx->type) {
|
switch (cx->type) {
|
||||||
case ACPI_STATE_C1:
|
case ACPI_STATE_C1:
|
||||||
if (cx->entry_method != ACPI_CSTATE_FFH)
|
|
||||||
state->flags |= CPUIDLE_FLAG_TIME_INVALID;
|
|
||||||
|
|
||||||
state->enter = acpi_idle_enter_c1;
|
state->enter = acpi_idle_enter_c1;
|
||||||
state->enter_dead = acpi_idle_play_dead;
|
state->enter_dead = acpi_idle_play_dead;
|
||||||
|
|
|
@ -505,6 +505,23 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
|
||||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
.callback = video_disable_native_backlight,
|
||||||
|
.ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_disable_native_backlight,
|
||||||
|
.ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
|
||||||
|
},
|
||||||
|
},
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -2088,7 +2088,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider);
|
||||||
* Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
|
* Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
|
||||||
* on failure.
|
* on failure.
|
||||||
*/
|
*/
|
||||||
static struct generic_pm_domain *of_genpd_get_from_provider(
|
struct generic_pm_domain *of_genpd_get_from_provider(
|
||||||
struct of_phandle_args *genpdspec)
|
struct of_phandle_args *genpdspec)
|
||||||
{
|
{
|
||||||
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
|
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
|
||||||
|
@ -2108,6 +2108,7 @@ static struct generic_pm_domain *of_genpd_get_from_provider(
|
||||||
|
|
||||||
return genpd;
|
return genpd;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* genpd_dev_pm_detach - Detach a device from its PM domain.
|
* genpd_dev_pm_detach - Detach a device from its PM domain.
|
||||||
|
|
|
@ -108,6 +108,14 @@ static LIST_HEAD(dev_opp_list);
|
||||||
/* Lock to allow exclusive modification to the device and opp lists */
|
/* Lock to allow exclusive modification to the device and opp lists */
|
||||||
static DEFINE_MUTEX(dev_opp_list_lock);
|
static DEFINE_MUTEX(dev_opp_list_lock);
|
||||||
|
|
||||||
|
#define opp_rcu_lockdep_assert() \
|
||||||
|
do { \
|
||||||
|
rcu_lockdep_assert(rcu_read_lock_held() || \
|
||||||
|
lockdep_is_held(&dev_opp_list_lock), \
|
||||||
|
"Missing rcu_read_lock() or " \
|
||||||
|
"dev_opp_list_lock protection"); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* find_device_opp() - find device_opp struct using device pointer
|
* find_device_opp() - find device_opp struct using device pointer
|
||||||
* @dev: device pointer used to lookup device OPPs
|
* @dev: device pointer used to lookup device OPPs
|
||||||
|
@ -208,9 +216,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
|
||||||
* This function returns the number of available opps if there are any,
|
* This function returns the number of available opps if there are any,
|
||||||
* else returns 0 if none or the corresponding error value.
|
* else returns 0 if none or the corresponding error value.
|
||||||
*
|
*
|
||||||
* Locking: This function must be called under rcu_read_lock(). This function
|
* Locking: This function takes rcu_read_lock().
|
||||||
* internally references two RCU protected structures: device_opp and opp which
|
|
||||||
* are safe as long as we are under a common RCU locked section.
|
|
||||||
*/
|
*/
|
||||||
int dev_pm_opp_get_opp_count(struct device *dev)
|
int dev_pm_opp_get_opp_count(struct device *dev)
|
||||||
{
|
{
|
||||||
|
@ -218,11 +224,14 @@ int dev_pm_opp_get_opp_count(struct device *dev)
|
||||||
struct dev_pm_opp *temp_opp;
|
struct dev_pm_opp *temp_opp;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
dev_opp = find_device_opp(dev);
|
dev_opp = find_device_opp(dev);
|
||||||
if (IS_ERR(dev_opp)) {
|
if (IS_ERR(dev_opp)) {
|
||||||
int r = PTR_ERR(dev_opp);
|
count = PTR_ERR(dev_opp);
|
||||||
dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
|
dev_err(dev, "%s: device OPP not found (%d)\n",
|
||||||
return r;
|
__func__, count);
|
||||||
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
|
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
|
||||||
|
@ -230,6 +239,8 @@ int dev_pm_opp_get_opp_count(struct device *dev)
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
rcu_read_unlock();
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
|
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
|
||||||
|
@ -267,6 +278,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
|
||||||
struct device_opp *dev_opp;
|
struct device_opp *dev_opp;
|
||||||
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
|
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
|
||||||
|
|
||||||
|
opp_rcu_lockdep_assert();
|
||||||
|
|
||||||
dev_opp = find_device_opp(dev);
|
dev_opp = find_device_opp(dev);
|
||||||
if (IS_ERR(dev_opp)) {
|
if (IS_ERR(dev_opp)) {
|
||||||
int r = PTR_ERR(dev_opp);
|
int r = PTR_ERR(dev_opp);
|
||||||
|
@ -313,6 +326,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
|
||||||
struct device_opp *dev_opp;
|
struct device_opp *dev_opp;
|
||||||
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
|
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
|
||||||
|
|
||||||
|
opp_rcu_lockdep_assert();
|
||||||
|
|
||||||
if (!dev || !freq) {
|
if (!dev || !freq) {
|
||||||
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
|
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
@ -361,6 +376,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
|
||||||
struct device_opp *dev_opp;
|
struct device_opp *dev_opp;
|
||||||
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
|
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
|
||||||
|
|
||||||
|
opp_rcu_lockdep_assert();
|
||||||
|
|
||||||
if (!dev || !freq) {
|
if (!dev || !freq) {
|
||||||
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
|
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
@ -783,9 +800,15 @@ void of_free_opp_table(struct device *dev)
|
||||||
|
|
||||||
/* Check for existing list for 'dev' */
|
/* Check for existing list for 'dev' */
|
||||||
dev_opp = find_device_opp(dev);
|
dev_opp = find_device_opp(dev);
|
||||||
if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev),
|
if (IS_ERR(dev_opp)) {
|
||||||
PTR_ERR(dev_opp)))
|
int error = PTR_ERR(dev_opp);
|
||||||
|
if (error != -ENODEV)
|
||||||
|
WARN(1, "%s: dev_opp: %d\n",
|
||||||
|
IS_ERR_OR_NULL(dev) ?
|
||||||
|
"Invalid device" : dev_name(dev),
|
||||||
|
error);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* Hold our list modification lock here */
|
/* Hold our list modification lock here */
|
||||||
mutex_lock(&dev_opp_list_lock);
|
mutex_lock(&dev_opp_list_lock);
|
||||||
|
|
|
@ -211,6 +211,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
||||||
/* OPPs might be populated at runtime, don't check for error here */
|
/* OPPs might be populated at runtime, don't check for error here */
|
||||||
of_init_opp_table(cpu_dev);
|
of_init_opp_table(cpu_dev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* But we need OPP table to function so if it is not there let's
|
||||||
|
* give platform code chance to provide it for us.
|
||||||
|
*/
|
||||||
|
ret = dev_pm_opp_get_opp_count(cpu_dev);
|
||||||
|
if (ret <= 0) {
|
||||||
|
pr_debug("OPP table is not ready, deferring probe\n");
|
||||||
|
ret = -EPROBE_DEFER;
|
||||||
|
goto out_free_opp;
|
||||||
|
}
|
||||||
|
|
||||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||||
if (!priv) {
|
if (!priv) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
|
|
@ -2028,6 +2028,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
|
||||||
/* Don't start any governor operations if we are entering suspend */
|
/* Don't start any governor operations if we are entering suspend */
|
||||||
if (cpufreq_suspended)
|
if (cpufreq_suspended)
|
||||||
return 0;
|
return 0;
|
||||||
|
/*
|
||||||
|
* Governor might not be initiated here if ACPI _PPC changed
|
||||||
|
* notification happened, so check it.
|
||||||
|
*/
|
||||||
|
if (!policy->governor)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (policy->governor->max_transition_latency &&
|
if (policy->governor->max_transition_latency &&
|
||||||
policy->cpuinfo.transition_latency >
|
policy->cpuinfo.transition_latency >
|
||||||
|
|
|
@ -79,12 +79,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
|
||||||
|
|
||||||
last_state = &ldev->states[last_idx];
|
last_state = &ldev->states[last_idx];
|
||||||
|
|
||||||
if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
|
last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
|
||||||
last_residency = cpuidle_get_last_residency(dev) - \
|
|
||||||
drv->states[last_idx].exit_latency;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
last_residency = last_state->threshold.promotion_time + 1;
|
|
||||||
|
|
||||||
/* consider promotion */
|
/* consider promotion */
|
||||||
if (last_idx < drv->state_count - 1 &&
|
if (last_idx < drv->state_count - 1 &&
|
||||||
|
|
|
@ -396,8 +396,8 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
* power state and occurrence of the wakeup event.
|
* power state and occurrence of the wakeup event.
|
||||||
*
|
*
|
||||||
* If the entered idle state didn't support residency measurements,
|
* If the entered idle state didn't support residency measurements,
|
||||||
* we are basically lost in the dark how much time passed.
|
* we use them anyway if they are short, and if long,
|
||||||
* As a compromise, assume we slept for the whole expected time.
|
* truncate to the whole expected time.
|
||||||
*
|
*
|
||||||
* Any measured amount of time will include the exit latency.
|
* Any measured amount of time will include the exit latency.
|
||||||
* Since we are interested in when the wakeup begun, not when it
|
* Since we are interested in when the wakeup begun, not when it
|
||||||
|
@ -405,23 +405,18 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
* the measured amount of time is less than the exit latency,
|
* the measured amount of time is less than the exit latency,
|
||||||
* assume the state was never reached and the exit latency is 0.
|
* assume the state was never reached and the exit latency is 0.
|
||||||
*/
|
*/
|
||||||
if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
|
|
||||||
/* Use timer value as is */
|
/* measured value */
|
||||||
|
measured_us = cpuidle_get_last_residency(dev);
|
||||||
|
|
||||||
|
/* Deduct exit latency */
|
||||||
|
if (measured_us > target->exit_latency)
|
||||||
|
measured_us -= target->exit_latency;
|
||||||
|
|
||||||
|
/* Make sure our coefficients do not exceed unity */
|
||||||
|
if (measured_us > data->next_timer_us)
|
||||||
measured_us = data->next_timer_us;
|
measured_us = data->next_timer_us;
|
||||||
|
|
||||||
} else {
|
|
||||||
/* Use measured value */
|
|
||||||
measured_us = cpuidle_get_last_residency(dev);
|
|
||||||
|
|
||||||
/* Deduct exit latency */
|
|
||||||
if (measured_us > target->exit_latency)
|
|
||||||
measured_us -= target->exit_latency;
|
|
||||||
|
|
||||||
/* Make sure our coefficients do not exceed unity */
|
|
||||||
if (measured_us > data->next_timer_us)
|
|
||||||
measured_us = data->next_timer_us;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Update our correction ratio */
|
/* Update our correction ratio */
|
||||||
new_factor = data->correction_factor[data->bucket];
|
new_factor = data->correction_factor[data->bucket];
|
||||||
new_factor -= new_factor / DECAY;
|
new_factor -= new_factor / DECAY;
|
||||||
|
|
|
@ -1041,6 +1041,7 @@ static const struct x86_cpu_id rapl_ids[] = {
|
||||||
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
|
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
|
||||||
RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
|
RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
|
||||||
RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
|
RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
|
||||||
|
RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
|
||||||
RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
|
RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
|
@ -53,7 +53,6 @@ struct cpuidle_state {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Idle State Flags */
|
/* Idle State Flags */
|
||||||
#define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */
|
|
||||||
#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
|
#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
|
||||||
#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
|
#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
|
||||||
|
|
||||||
|
@ -89,8 +88,6 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
|
||||||
/**
|
/**
|
||||||
* cpuidle_get_last_residency - retrieves the last state's residency time
|
* cpuidle_get_last_residency - retrieves the last state's residency time
|
||||||
* @dev: the target CPU
|
* @dev: the target CPU
|
||||||
*
|
|
||||||
* NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set
|
|
||||||
*/
|
*/
|
||||||
static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
|
static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
|
||||||
{
|
{
|
||||||
|
|
|
@ -271,6 +271,8 @@ typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
|
||||||
int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
|
int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
|
||||||
void *data);
|
void *data);
|
||||||
void of_genpd_del_provider(struct device_node *np);
|
void of_genpd_del_provider(struct device_node *np);
|
||||||
|
struct generic_pm_domain *of_genpd_get_from_provider(
|
||||||
|
struct of_phandle_args *genpdspec);
|
||||||
|
|
||||||
struct generic_pm_domain *__of_genpd_xlate_simple(
|
struct generic_pm_domain *__of_genpd_xlate_simple(
|
||||||
struct of_phandle_args *genpdspec,
|
struct of_phandle_args *genpdspec,
|
||||||
|
@ -288,6 +290,12 @@ static inline int __of_genpd_add_provider(struct device_node *np,
|
||||||
}
|
}
|
||||||
static inline void of_genpd_del_provider(struct device_node *np) {}
|
static inline void of_genpd_del_provider(struct device_node *np) {}
|
||||||
|
|
||||||
|
static inline struct generic_pm_domain *of_genpd_get_from_provider(
|
||||||
|
struct of_phandle_args *genpdspec)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
#define __of_genpd_xlate_simple NULL
|
#define __of_genpd_xlate_simple NULL
|
||||||
#define __of_genpd_xlate_onecell NULL
|
#define __of_genpd_xlate_onecell NULL
|
||||||
|
|
||||||
|
|
|
@ -199,7 +199,7 @@ int main(int argc, const char *argv[])
|
||||||
}
|
}
|
||||||
|
|
||||||
get_cpu_info(0, &cpupower_cpu_info);
|
get_cpu_info(0, &cpupower_cpu_info);
|
||||||
run_as_root = !getuid();
|
run_as_root = !geteuid();
|
||||||
if (run_as_root) {
|
if (run_as_root) {
|
||||||
ret = uname(&uts);
|
ret = uname(&uts);
|
||||||
if (!ret && !strcmp(uts.machine, "x86_64") &&
|
if (!ret && !strcmp(uts.machine, "x86_64") &&
|
||||||
|
|
|
@ -361,7 +361,7 @@ unsigned int sysfs_get_idlestate_count(unsigned int cpu)
|
||||||
|
|
||||||
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
|
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
|
||||||
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
|
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
|
||||||
return -ENODEV;
|
return 0;
|
||||||
|
|
||||||
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
|
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
|
||||||
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
|
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
|
||||||
|
|
Загрузка…
Ссылка в новой задаче