Merge branches 'pm-core', 'pm-sleep', 'pm-qos', 'pm-domains' and 'pm-em'
* pm-core: PM / core: Add support to skip power management in device/driver model PM / suspend: Print debug messages for device using direct-complete PM-runtime: update time accounting only when enabled PM-runtime: Switch accounting over to ktime_get_mono_fast_ns() PM-runtime: Optimize pm_runtime_autosuspend_expiration() PM-runtime: Replace jiffies-based accounting with ktime-based accounting PM-runtime: update accounting_timestamp on enable PM: clock_ops: fix missing clk_prepare() return value check drm/i915: Move on the new pm runtime interface PM-runtime: Add new interface to get accounted time * pm-sleep: PM / wakeup: fix kerneldoc comment for pm_wakeup_dev_event() * pm-qos: PM: QoS: no need to check return value of debugfs_create functions * pm-domains: PM / Domains: Mark "name" const in dev_pm_domain_attach_by_name() PM / Domains: Mark "name" const in genpd_dev_pm_attach_by_name() PM: domains: no need to check return value of debugfs_create functions * pm-em: PM / EM: Expose the Energy Model in debugfs
This commit is contained in:
Коммит
c3739c50ef
|
@ -427,6 +427,7 @@ __cpu_device_create(struct device *parent, void *drvdata,
|
|||
dev->parent = parent;
|
||||
dev->groups = groups;
|
||||
dev->release = device_create_release;
|
||||
device_set_pm_not_required(dev);
|
||||
dev_set_drvdata(dev, drvdata);
|
||||
|
||||
retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
|
||||
|
|
|
@ -65,10 +65,15 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
|
|||
if (IS_ERR(ce->clk)) {
|
||||
ce->status = PCE_STATUS_ERROR;
|
||||
} else {
|
||||
clk_prepare(ce->clk);
|
||||
ce->status = PCE_STATUS_ACQUIRED;
|
||||
dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
|
||||
ce->clk, ce->con_id);
|
||||
if (clk_prepare(ce->clk)) {
|
||||
ce->status = PCE_STATUS_ERROR;
|
||||
dev_err(dev, "clk_prepare() failed\n");
|
||||
} else {
|
||||
ce->status = PCE_STATUS_ACQUIRED;
|
||||
dev_dbg(dev,
|
||||
"Clock %pC con_id %s managed by runtime PM.\n",
|
||||
ce->clk, ce->con_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id);
|
|||
* For a detailed function description, see dev_pm_domain_attach_by_id().
|
||||
*/
|
||||
struct device *dev_pm_domain_attach_by_name(struct device *dev,
|
||||
char *name)
|
||||
const char *name)
|
||||
{
|
||||
if (dev->pm_domain)
|
||||
return ERR_PTR(-EEXIST);
|
||||
|
|
|
@ -2483,7 +2483,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
|
|||
* power-domain-names DT property. For further description see
|
||||
* genpd_dev_pm_attach_by_id().
|
||||
*/
|
||||
struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name)
|
||||
struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
|
||||
{
|
||||
int index;
|
||||
|
||||
|
@ -2948,18 +2948,11 @@ static int __init genpd_debug_init(void)
|
|||
|
||||
genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
|
||||
|
||||
if (!genpd_debugfs_dir)
|
||||
return -ENOMEM;
|
||||
|
||||
d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
|
||||
genpd_debugfs_dir, NULL, &summary_fops);
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
|
||||
NULL, &summary_fops);
|
||||
|
||||
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
|
||||
d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
|
||||
debugfs_create_file("current_state", 0444,
|
||||
d, genpd, &status_fops);
|
||||
|
|
|
@ -124,6 +124,10 @@ void device_pm_unlock(void)
|
|||
*/
|
||||
void device_pm_add(struct device *dev)
|
||||
{
|
||||
/* Skip PM setup/initialization. */
|
||||
if (device_pm_not_required(dev))
|
||||
return;
|
||||
|
||||
pr_debug("PM: Adding info for %s:%s\n",
|
||||
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
|
||||
device_pm_check_callbacks(dev);
|
||||
|
@ -142,6 +146,9 @@ void device_pm_add(struct device *dev)
|
|||
*/
|
||||
void device_pm_remove(struct device *dev)
|
||||
{
|
||||
if (device_pm_not_required(dev))
|
||||
return;
|
||||
|
||||
pr_debug("PM: Removing info for %s:%s\n",
|
||||
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
|
||||
complete_all(&dev->power.completion);
|
||||
|
@ -1741,8 +1748,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
|||
if (dev->power.direct_complete) {
|
||||
if (pm_runtime_status_suspended(dev)) {
|
||||
pm_runtime_disable(dev);
|
||||
if (pm_runtime_status_suspended(dev))
|
||||
if (pm_runtime_status_suspended(dev)) {
|
||||
pm_dev_dbg(dev, state, "direct-complete ");
|
||||
goto Complete;
|
||||
}
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
}
|
||||
|
|
|
@ -66,20 +66,30 @@ static int rpm_suspend(struct device *dev, int rpmflags);
|
|||
*/
|
||||
void update_pm_runtime_accounting(struct device *dev)
|
||||
{
|
||||
unsigned long now = jiffies;
|
||||
unsigned long delta;
|
||||
|
||||
delta = now - dev->power.accounting_timestamp;
|
||||
|
||||
dev->power.accounting_timestamp = now;
|
||||
u64 now, last, delta;
|
||||
|
||||
if (dev->power.disable_depth > 0)
|
||||
return;
|
||||
|
||||
last = dev->power.accounting_timestamp;
|
||||
|
||||
now = ktime_get_mono_fast_ns();
|
||||
dev->power.accounting_timestamp = now;
|
||||
|
||||
/*
|
||||
* Because ktime_get_mono_fast_ns() is not monotonic during
|
||||
* timekeeping updates, ensure that 'now' is after the last saved
|
||||
* timesptamp.
|
||||
*/
|
||||
if (now < last)
|
||||
return;
|
||||
|
||||
delta = now - last;
|
||||
|
||||
if (dev->power.runtime_status == RPM_SUSPENDED)
|
||||
dev->power.suspended_jiffies += delta;
|
||||
dev->power.suspended_time += delta;
|
||||
else
|
||||
dev->power.active_jiffies += delta;
|
||||
dev->power.active_time += delta;
|
||||
}
|
||||
|
||||
static void __update_runtime_status(struct device *dev, enum rpm_status status)
|
||||
|
@ -88,6 +98,22 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
|
|||
dev->power.runtime_status = status;
|
||||
}
|
||||
|
||||
u64 pm_runtime_suspended_time(struct device *dev)
|
||||
{
|
||||
u64 time;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
|
||||
update_pm_runtime_accounting(dev);
|
||||
time = dev->power.suspended_time;
|
||||
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
|
||||
return time;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
|
||||
|
||||
/**
|
||||
* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
|
||||
* @dev: Device to handle.
|
||||
|
@ -129,24 +155,21 @@ static void pm_runtime_cancel_pending(struct device *dev)
|
|||
u64 pm_runtime_autosuspend_expiration(struct device *dev)
|
||||
{
|
||||
int autosuspend_delay;
|
||||
u64 last_busy, expires = 0;
|
||||
u64 now = ktime_get_mono_fast_ns();
|
||||
u64 expires;
|
||||
|
||||
if (!dev->power.use_autosuspend)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
|
||||
if (autosuspend_delay < 0)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
last_busy = READ_ONCE(dev->power.last_busy);
|
||||
expires = READ_ONCE(dev->power.last_busy);
|
||||
expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
|
||||
if (expires > ktime_get_mono_fast_ns())
|
||||
return expires; /* Expires in the future */
|
||||
|
||||
expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC;
|
||||
if (expires <= now)
|
||||
expires = 0; /* Already expired. */
|
||||
|
||||
out:
|
||||
return expires;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
|
||||
|
||||
|
@ -1276,6 +1299,9 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
|
|||
pm_runtime_put_noidle(dev);
|
||||
}
|
||||
|
||||
/* Update time accounting before disabling PM-runtime. */
|
||||
update_pm_runtime_accounting(dev);
|
||||
|
||||
if (!dev->power.disable_depth++)
|
||||
__pm_runtime_barrier(dev);
|
||||
|
||||
|
@ -1294,10 +1320,15 @@ void pm_runtime_enable(struct device *dev)
|
|||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
|
||||
if (dev->power.disable_depth > 0)
|
||||
if (dev->power.disable_depth > 0) {
|
||||
dev->power.disable_depth--;
|
||||
else
|
||||
|
||||
/* About to enable runtime pm, set accounting_timestamp to now */
|
||||
if (!dev->power.disable_depth)
|
||||
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
|
||||
} else {
|
||||
dev_warn(dev, "Unbalanced %s!\n", __func__);
|
||||
}
|
||||
|
||||
WARN(!dev->power.disable_depth &&
|
||||
dev->power.runtime_status == RPM_SUSPENDED &&
|
||||
|
@ -1494,7 +1525,6 @@ void pm_runtime_init(struct device *dev)
|
|||
dev->power.request_pending = false;
|
||||
dev->power.request = RPM_REQ_NONE;
|
||||
dev->power.deferred_resume = false;
|
||||
dev->power.accounting_timestamp = jiffies;
|
||||
INIT_WORK(&dev->power.work, pm_runtime_work);
|
||||
|
||||
dev->power.timer_expires = 0;
|
||||
|
|
|
@ -125,9 +125,12 @@ static ssize_t runtime_active_time_show(struct device *dev,
|
|||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
int ret;
|
||||
u64 tmp;
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
update_pm_runtime_accounting(dev);
|
||||
ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
|
||||
tmp = dev->power.active_time;
|
||||
do_div(tmp, NSEC_PER_MSEC);
|
||||
ret = sprintf(buf, "%llu\n", tmp);
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -138,10 +141,12 @@ static ssize_t runtime_suspended_time_show(struct device *dev,
|
|||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
int ret;
|
||||
u64 tmp;
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
update_pm_runtime_accounting(dev);
|
||||
ret = sprintf(buf, "%i\n",
|
||||
jiffies_to_msecs(dev->power.suspended_jiffies));
|
||||
tmp = dev->power.suspended_time;
|
||||
do_div(tmp, NSEC_PER_MSEC);
|
||||
ret = sprintf(buf, "%llu\n", tmp);
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -648,6 +653,10 @@ int dpm_sysfs_add(struct device *dev)
|
|||
{
|
||||
int rc;
|
||||
|
||||
/* No need to create PM sysfs if explicitly disabled. */
|
||||
if (device_pm_not_required(dev))
|
||||
return 0;
|
||||
|
||||
rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -727,6 +736,8 @@ void rpm_sysfs_remove(struct device *dev)
|
|||
|
||||
void dpm_sysfs_remove(struct device *dev)
|
||||
{
|
||||
if (device_pm_not_required(dev))
|
||||
return;
|
||||
sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
|
||||
dev_pm_qos_constraints_destroy(dev);
|
||||
rpm_sysfs_remove(dev);
|
||||
|
|
|
@ -783,7 +783,7 @@ void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
|
|||
EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
|
||||
|
||||
/**
|
||||
* pm_wakeup_event - Notify the PM core of a wakeup event.
|
||||
* pm_wakeup_dev_event - Notify the PM core of a wakeup event.
|
||||
* @dev: Device the wakeup event is related to.
|
||||
* @msec: Anticipated event processing time (in milliseconds).
|
||||
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include "i915_pmu.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
#include "i915_drv.h"
|
||||
|
@ -478,7 +479,6 @@ static u64 get_rc6(struct drm_i915_private *i915)
|
|||
* counter value.
|
||||
*/
|
||||
spin_lock_irqsave(&i915->pmu.lock, flags);
|
||||
spin_lock(&kdev->power.lock);
|
||||
|
||||
/*
|
||||
* After the above branch intel_runtime_pm_get_if_in_use failed
|
||||
|
@ -491,16 +491,13 @@ static u64 get_rc6(struct drm_i915_private *i915)
|
|||
* suspended and if not we cannot do better than report the last
|
||||
* known RC6 value.
|
||||
*/
|
||||
if (kdev->power.runtime_status == RPM_SUSPENDED) {
|
||||
if (pm_runtime_status_suspended(kdev)) {
|
||||
val = pm_runtime_suspended_time(kdev);
|
||||
|
||||
if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
|
||||
i915->pmu.suspended_jiffies_last =
|
||||
kdev->power.suspended_jiffies;
|
||||
i915->pmu.suspended_time_last = val;
|
||||
|
||||
val = kdev->power.suspended_jiffies -
|
||||
i915->pmu.suspended_jiffies_last;
|
||||
val += jiffies - kdev->power.accounting_timestamp;
|
||||
|
||||
val = jiffies_to_nsecs(val);
|
||||
val -= i915->pmu.suspended_time_last;
|
||||
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
|
||||
|
||||
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
|
||||
|
@ -510,7 +507,6 @@ static u64 get_rc6(struct drm_i915_private *i915)
|
|||
val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
|
||||
}
|
||||
|
||||
spin_unlock(&kdev->power.lock);
|
||||
spin_unlock_irqrestore(&i915->pmu.lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -97,9 +97,9 @@ struct i915_pmu {
|
|||
*/
|
||||
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
|
||||
/**
|
||||
* @suspended_jiffies_last: Cached suspend time from PM core.
|
||||
* @suspended_time_last: Cached suspend time from PM core.
|
||||
*/
|
||||
unsigned long suspended_jiffies_last;
|
||||
u64 suspended_time_last;
|
||||
/**
|
||||
* @i915_attr: Memory block holding device attributes.
|
||||
*/
|
||||
|
|
|
@ -1165,6 +1165,16 @@ static inline bool device_async_suspend_enabled(struct device *dev)
|
|||
return !!dev->power.async_suspend;
|
||||
}
|
||||
|
||||
static inline bool device_pm_not_required(struct device *dev)
|
||||
{
|
||||
return dev->power.no_pm;
|
||||
}
|
||||
|
||||
static inline void device_set_pm_not_required(struct device *dev)
|
||||
{
|
||||
dev->power.no_pm = true;
|
||||
}
|
||||
|
||||
static inline void dev_pm_syscore_device(struct device *dev, bool val)
|
||||
{
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
|
|
@ -592,6 +592,7 @@ struct dev_pm_info {
|
|||
bool is_suspended:1; /* Ditto */
|
||||
bool is_noirq_suspended:1;
|
||||
bool is_late_suspended:1;
|
||||
bool no_pm:1;
|
||||
bool early_init:1; /* Owned by the PM core */
|
||||
bool direct_complete:1; /* Owned by the PM core */
|
||||
u32 driver_flags;
|
||||
|
@ -633,9 +634,9 @@ struct dev_pm_info {
|
|||
int runtime_error;
|
||||
int autosuspend_delay;
|
||||
u64 last_busy;
|
||||
unsigned long active_jiffies;
|
||||
unsigned long suspended_jiffies;
|
||||
unsigned long accounting_timestamp;
|
||||
u64 active_time;
|
||||
u64 suspended_time;
|
||||
u64 accounting_timestamp;
|
||||
#endif
|
||||
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
|
||||
void (*set_latency_tolerance)(struct device *, s32);
|
||||
|
|
|
@ -271,7 +271,7 @@ int genpd_dev_pm_attach(struct device *dev);
|
|||
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
|
||||
unsigned int index);
|
||||
struct device *genpd_dev_pm_attach_by_name(struct device *dev,
|
||||
char *name);
|
||||
const char *name);
|
||||
#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
|
||||
static inline int of_genpd_add_provider_simple(struct device_node *np,
|
||||
struct generic_pm_domain *genpd)
|
||||
|
@ -324,7 +324,7 @@ static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev,
|
|||
}
|
||||
|
||||
static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev,
|
||||
char *name)
|
||||
const char *name)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
@ -341,7 +341,7 @@ int dev_pm_domain_attach(struct device *dev, bool power_on);
|
|||
struct device *dev_pm_domain_attach_by_id(struct device *dev,
|
||||
unsigned int index);
|
||||
struct device *dev_pm_domain_attach_by_name(struct device *dev,
|
||||
char *name);
|
||||
const char *name);
|
||||
void dev_pm_domain_detach(struct device *dev, bool power_off);
|
||||
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
|
||||
#else
|
||||
|
@ -355,7 +355,7 @@ static inline struct device *dev_pm_domain_attach_by_id(struct device *dev,
|
|||
return NULL;
|
||||
}
|
||||
static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
|
||||
char *name)
|
||||
const char *name)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -113,6 +113,8 @@ static inline bool pm_runtime_is_irq_safe(struct device *dev)
|
|||
return dev->power.irq_safe;
|
||||
}
|
||||
|
||||
extern u64 pm_runtime_suspended_time(struct device *dev);
|
||||
|
||||
#else /* !CONFIG_PM */
|
||||
|
||||
static inline bool queue_pm_work(struct work_struct *work) { return false; }
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/energy_model.h>
|
||||
#include <linux/sched/topology.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -23,6 +24,60 @@ static DEFINE_PER_CPU(struct em_perf_domain *, em_data);
|
|||
*/
|
||||
static DEFINE_MUTEX(em_pd_mutex);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static struct dentry *rootdir;
|
||||
|
||||
static void em_debug_create_cs(struct em_cap_state *cs, struct dentry *pd)
|
||||
{
|
||||
struct dentry *d;
|
||||
char name[24];
|
||||
|
||||
snprintf(name, sizeof(name), "cs:%lu", cs->frequency);
|
||||
|
||||
/* Create per-cs directory */
|
||||
d = debugfs_create_dir(name, pd);
|
||||
debugfs_create_ulong("frequency", 0444, d, &cs->frequency);
|
||||
debugfs_create_ulong("power", 0444, d, &cs->power);
|
||||
debugfs_create_ulong("cost", 0444, d, &cs->cost);
|
||||
}
|
||||
|
||||
static int em_debug_cpus_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
seq_printf(s, "%*pbl\n", cpumask_pr_args(to_cpumask(s->private)));
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
|
||||
|
||||
static void em_debug_create_pd(struct em_perf_domain *pd, int cpu)
|
||||
{
|
||||
struct dentry *d;
|
||||
char name[8];
|
||||
int i;
|
||||
|
||||
snprintf(name, sizeof(name), "pd%d", cpu);
|
||||
|
||||
/* Create the directory of the performance domain */
|
||||
d = debugfs_create_dir(name, rootdir);
|
||||
|
||||
debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops);
|
||||
|
||||
/* Create a sub-directory for each capacity state */
|
||||
for (i = 0; i < pd->nr_cap_states; i++)
|
||||
em_debug_create_cs(&pd->table[i], d);
|
||||
}
|
||||
|
||||
static int __init em_debug_init(void)
|
||||
{
|
||||
/* Create /sys/kernel/debug/energy_model directory */
|
||||
rootdir = debugfs_create_dir("energy_model", NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(em_debug_init);
|
||||
#else /* CONFIG_DEBUG_FS */
|
||||
static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {}
|
||||
#endif
|
||||
static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
|
||||
struct em_data_callback *cb)
|
||||
{
|
||||
|
@ -102,6 +157,8 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
|
|||
pd->nr_cap_states = nr_states;
|
||||
cpumask_copy(to_cpumask(pd->cpus), span);
|
||||
|
||||
em_debug_create_pd(pd, cpu);
|
||||
|
||||
return pd;
|
||||
|
||||
free_cs_table:
|
||||
|
|
|
@ -582,10 +582,8 @@ static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
|
|||
qos->pm_qos_power_miscdev.name = qos->name;
|
||||
qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
|
||||
|
||||
if (d) {
|
||||
(void)debugfs_create_file(qos->name, S_IRUGO, d,
|
||||
(void *)qos, &pm_qos_debug_fops);
|
||||
}
|
||||
debugfs_create_file(qos->name, S_IRUGO, d, (void *)qos,
|
||||
&pm_qos_debug_fops);
|
||||
|
||||
return misc_register(&qos->pm_qos_power_miscdev);
|
||||
}
|
||||
|
@ -685,8 +683,6 @@ static int __init pm_qos_power_init(void)
|
|||
BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
|
||||
|
||||
d = debugfs_create_dir("pm_qos", NULL);
|
||||
if (IS_ERR_OR_NULL(d))
|
||||
d = NULL;
|
||||
|
||||
for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
|
||||
ret = register_pm_qos_misc(pm_qos_array[i], d);
|
||||
|
|
Загрузка…
Ссылка в новой задаче