Merge branch 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm into pm-opp
Pull operating performance points (OPP) material for 4.20 from Viresh Kumar. "This contains patches that fix several bugs in the OPP core and makes it more stable." * 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm: OPP: Pass OPP table to _of_add_opp_table_v{1|2}() OPP: Prevent creating multiple OPP tables for devices sharing OPP nodes OPP: Use a single mechanism to free the OPP table OPP: Don't remove dynamic OPPs from _dev_pm_opp_remove_table() cpufreq: mvebu: Remove OPPs using dev_pm_opp_remove() OPP: Create separate kref for static OPPs list OPP: Don't take OPP table's kref for static OPPs OPP: Parse OPP table's DT properties from _of_init_opp_table() OPP: Pass index to _of_init_opp_table() OPP: Protect dev_list with opp_table lock OPP: Don't try to remove all OPP tables on failure OPP: Free OPP table properly on performance state irregularities
This commit is contained in:
Коммит
fb64207bf5
|
@ -84,9 +84,10 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
|
|||
|
||||
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
|
||||
if (ret) {
|
||||
dev_pm_opp_remove(cpu_dev, clk_get_rate(clk));
|
||||
clk_put(clk);
|
||||
dev_err(cpu_dev, "Failed to register OPPs\n");
|
||||
goto opp_register_failed;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
|
||||
|
@ -99,11 +100,5 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
|
|||
|
||||
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
|
||||
return 0;
|
||||
|
||||
opp_register_failed:
|
||||
/* As registering has failed remove all the opp for all cpus */
|
||||
dev_pm_opp_cpumask_remove_table(cpu_possible_mask);
|
||||
|
||||
return ret;
|
||||
}
|
||||
device_initcall(armada_xp_pmsu_cpufreq_init);
|
||||
|
|
|
@ -48,9 +48,14 @@ static struct opp_device *_find_opp_dev(const struct device *dev,
|
|||
static struct opp_table *_find_opp_table_unlocked(struct device *dev)
|
||||
{
|
||||
struct opp_table *opp_table;
|
||||
bool found;
|
||||
|
||||
list_for_each_entry(opp_table, &opp_tables, node) {
|
||||
if (_find_opp_dev(dev, opp_table)) {
|
||||
mutex_lock(&opp_table->lock);
|
||||
found = !!_find_opp_dev(dev, opp_table);
|
||||
mutex_unlock(&opp_table->lock);
|
||||
|
||||
if (found) {
|
||||
_get_opp_table_kref(opp_table);
|
||||
|
||||
return opp_table;
|
||||
|
@ -754,8 +759,8 @@ static void _remove_opp_dev(struct opp_device *opp_dev,
|
|||
kfree(opp_dev);
|
||||
}
|
||||
|
||||
struct opp_device *_add_opp_dev(const struct device *dev,
|
||||
struct opp_table *opp_table)
|
||||
static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
|
||||
struct opp_table *opp_table)
|
||||
{
|
||||
struct opp_device *opp_dev;
|
||||
int ret;
|
||||
|
@ -766,6 +771,7 @@ struct opp_device *_add_opp_dev(const struct device *dev,
|
|||
|
||||
/* Initialize opp-dev */
|
||||
opp_dev->dev = dev;
|
||||
|
||||
list_add(&opp_dev->node, &opp_table->dev_list);
|
||||
|
||||
/* Create debugfs entries for the opp_table */
|
||||
|
@ -777,7 +783,19 @@ struct opp_device *_add_opp_dev(const struct device *dev,
|
|||
return opp_dev;
|
||||
}
|
||||
|
||||
static struct opp_table *_allocate_opp_table(struct device *dev)
|
||||
struct opp_device *_add_opp_dev(const struct device *dev,
|
||||
struct opp_table *opp_table)
|
||||
{
|
||||
struct opp_device *opp_dev;
|
||||
|
||||
mutex_lock(&opp_table->lock);
|
||||
opp_dev = _add_opp_dev_unlocked(dev, opp_table);
|
||||
mutex_unlock(&opp_table->lock);
|
||||
|
||||
return opp_dev;
|
||||
}
|
||||
|
||||
static struct opp_table *_allocate_opp_table(struct device *dev, int index)
|
||||
{
|
||||
struct opp_table *opp_table;
|
||||
struct opp_device *opp_dev;
|
||||
|
@ -791,6 +809,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev)
|
|||
if (!opp_table)
|
||||
return NULL;
|
||||
|
||||
mutex_init(&opp_table->lock);
|
||||
INIT_LIST_HEAD(&opp_table->dev_list);
|
||||
|
||||
opp_dev = _add_opp_dev(dev, opp_table);
|
||||
|
@ -799,7 +818,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
_of_init_opp_table(opp_table, dev);
|
||||
_of_init_opp_table(opp_table, dev, index);
|
||||
|
||||
/* Find clk for the device */
|
||||
opp_table->clk = clk_get(dev, NULL);
|
||||
|
@ -812,7 +831,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev)
|
|||
|
||||
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
|
||||
INIT_LIST_HEAD(&opp_table->opp_list);
|
||||
mutex_init(&opp_table->lock);
|
||||
kref_init(&opp_table->kref);
|
||||
|
||||
/* Secure the device table modification */
|
||||
|
@ -825,7 +843,7 @@ void _get_opp_table_kref(struct opp_table *opp_table)
|
|||
kref_get(&opp_table->kref);
|
||||
}
|
||||
|
||||
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
|
||||
static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
|
||||
{
|
||||
struct opp_table *opp_table;
|
||||
|
||||
|
@ -836,31 +854,56 @@ struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
|
|||
if (!IS_ERR(opp_table))
|
||||
goto unlock;
|
||||
|
||||
opp_table = _allocate_opp_table(dev);
|
||||
opp_table = _managed_opp(dev, index);
|
||||
if (opp_table) {
|
||||
if (!_add_opp_dev_unlocked(dev, opp_table)) {
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
opp_table = NULL;
|
||||
}
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
opp_table = _allocate_opp_table(dev, index);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&opp_table_lock);
|
||||
|
||||
return opp_table;
|
||||
}
|
||||
|
||||
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
|
||||
{
|
||||
return _opp_get_opp_table(dev, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
|
||||
|
||||
struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev,
|
||||
int index)
|
||||
{
|
||||
return _opp_get_opp_table(dev, index);
|
||||
}
|
||||
|
||||
static void _opp_table_kref_release(struct kref *kref)
|
||||
{
|
||||
struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
|
||||
struct opp_device *opp_dev;
|
||||
struct opp_device *opp_dev, *temp;
|
||||
|
||||
/* Release clk */
|
||||
if (!IS_ERR(opp_table->clk))
|
||||
clk_put(opp_table->clk);
|
||||
|
||||
opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
|
||||
node);
|
||||
WARN_ON(!list_empty(&opp_table->opp_list));
|
||||
|
||||
_remove_opp_dev(opp_dev, opp_table);
|
||||
list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
|
||||
/*
|
||||
* The OPP table is getting removed, drop the performance state
|
||||
* constraints.
|
||||
*/
|
||||
if (opp_table->genpd_performance_state)
|
||||
dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0);
|
||||
|
||||
/* dev_list must be empty now */
|
||||
WARN_ON(!list_empty(&opp_table->dev_list));
|
||||
_remove_opp_dev(opp_dev, opp_table);
|
||||
}
|
||||
|
||||
mutex_destroy(&opp_table->lock);
|
||||
list_del(&opp_table->node);
|
||||
|
@ -869,6 +912,33 @@ static void _opp_table_kref_release(struct kref *kref)
|
|||
mutex_unlock(&opp_table_lock);
|
||||
}
|
||||
|
||||
void _opp_remove_all_static(struct opp_table *opp_table)
|
||||
{
|
||||
struct dev_pm_opp *opp, *tmp;
|
||||
|
||||
list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
|
||||
if (!opp->dynamic)
|
||||
dev_pm_opp_put(opp);
|
||||
}
|
||||
|
||||
opp_table->parsed_static_opps = false;
|
||||
}
|
||||
|
||||
static void _opp_table_list_kref_release(struct kref *kref)
|
||||
{
|
||||
struct opp_table *opp_table = container_of(kref, struct opp_table,
|
||||
list_kref);
|
||||
|
||||
_opp_remove_all_static(opp_table);
|
||||
mutex_unlock(&opp_table_lock);
|
||||
}
|
||||
|
||||
void _put_opp_list_kref(struct opp_table *opp_table)
|
||||
{
|
||||
kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release,
|
||||
&opp_table_lock);
|
||||
}
|
||||
|
||||
void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
|
||||
{
|
||||
kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
|
||||
|
@ -896,7 +966,6 @@ static void _opp_kref_release(struct kref *kref)
|
|||
kfree(opp);
|
||||
|
||||
mutex_unlock(&opp_table->lock);
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
}
|
||||
|
||||
void dev_pm_opp_get(struct dev_pm_opp *opp)
|
||||
|
@ -940,11 +1009,15 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
|
|||
|
||||
if (found) {
|
||||
dev_pm_opp_put(opp);
|
||||
|
||||
/* Drop the reference taken by dev_pm_opp_add() */
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
} else {
|
||||
dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
|
||||
__func__, freq);
|
||||
}
|
||||
|
||||
/* Drop the reference taken by _find_opp_table() */
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
|
||||
|
@ -1062,9 +1135,6 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
|
|||
new_opp->opp_table = opp_table;
|
||||
kref_init(&new_opp->kref);
|
||||
|
||||
/* Get a reference to the OPP table */
|
||||
_get_opp_table_kref(opp_table);
|
||||
|
||||
ret = opp_debug_create_one(new_opp, opp_table);
|
||||
if (ret)
|
||||
dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
|
||||
|
@ -1543,8 +1613,9 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
|
|||
return -ENOMEM;
|
||||
|
||||
ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
|
||||
if (ret)
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_add);
|
||||
|
@ -1707,35 +1778,7 @@ int dev_pm_opp_unregister_notifier(struct device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
|
||||
|
||||
/*
|
||||
* Free OPPs either created using static entries present in DT or even the
|
||||
* dynamically added entries based on remove_all param.
|
||||
*/
|
||||
void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
|
||||
bool remove_all)
|
||||
{
|
||||
struct dev_pm_opp *opp, *tmp;
|
||||
|
||||
/* Find if opp_table manages a single device */
|
||||
if (list_is_singular(&opp_table->dev_list)) {
|
||||
/* Free static OPPs */
|
||||
list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
|
||||
if (remove_all || !opp->dynamic)
|
||||
dev_pm_opp_put(opp);
|
||||
}
|
||||
|
||||
/*
|
||||
* The OPP table is getting removed, drop the performance state
|
||||
* constraints.
|
||||
*/
|
||||
if (opp_table->genpd_performance_state)
|
||||
dev_pm_genpd_set_performance_state(dev, 0);
|
||||
} else {
|
||||
_remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
|
||||
}
|
||||
}
|
||||
|
||||
void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
|
||||
void _dev_pm_opp_find_and_remove_table(struct device *dev)
|
||||
{
|
||||
struct opp_table *opp_table;
|
||||
|
||||
|
@ -1752,8 +1795,12 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
|
|||
return;
|
||||
}
|
||||
|
||||
_dev_pm_opp_remove_table(opp_table, dev, remove_all);
|
||||
_put_opp_list_kref(opp_table);
|
||||
|
||||
/* Drop reference taken by _find_opp_table() */
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
|
||||
/* Drop reference taken while the OPP table was added */
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
}
|
||||
|
||||
|
@ -1766,6 +1813,6 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
|
|||
*/
|
||||
void dev_pm_opp_remove_table(struct device *dev)
|
||||
{
|
||||
_dev_pm_opp_find_and_remove_table(dev, true);
|
||||
_dev_pm_opp_find_and_remove_table(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
|
||||
|
|
|
@ -108,7 +108,8 @@ void dev_pm_opp_free_cpufreq_table(struct device *dev,
|
|||
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
|
||||
#endif /* CONFIG_CPU_FREQ */
|
||||
|
||||
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
|
||||
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask,
|
||||
int last_cpu)
|
||||
{
|
||||
struct device *cpu_dev;
|
||||
int cpu;
|
||||
|
@ -116,6 +117,9 @@ void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
|
|||
WARN_ON(cpumask_empty(cpumask));
|
||||
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
if (cpu == last_cpu)
|
||||
break;
|
||||
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (!cpu_dev) {
|
||||
pr_err("%s: failed to get cpu%d device\n", __func__,
|
||||
|
@ -123,10 +127,7 @@ void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (of)
|
||||
dev_pm_opp_of_remove_table(cpu_dev);
|
||||
else
|
||||
dev_pm_opp_remove_table(cpu_dev);
|
||||
_dev_pm_opp_find_and_remove_table(cpu_dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,7 +141,7 @@ void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
|
|||
*/
|
||||
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
|
||||
{
|
||||
_dev_pm_opp_cpumask_remove_table(cpumask, false);
|
||||
_dev_pm_opp_cpumask_remove_table(cpumask, -1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
|
||||
|
||||
|
@ -222,8 +223,10 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
|
|||
cpumask_clear(cpumask);
|
||||
|
||||
if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
|
||||
mutex_lock(&opp_table->lock);
|
||||
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
|
||||
cpumask_set_cpu(opp_dev->dev->id, cpumask);
|
||||
mutex_unlock(&opp_table->lock);
|
||||
} else {
|
||||
cpumask_set_cpu(cpu_dev->id, cpumask);
|
||||
}
|
||||
|
|
190
drivers/opp/of.c
190
drivers/opp/of.c
|
@ -23,11 +23,32 @@
|
|||
|
||||
#include "opp.h"
|
||||
|
||||
static struct opp_table *_managed_opp(const struct device_node *np)
|
||||
/*
|
||||
* Returns opp descriptor node for a device node, caller must
|
||||
* do of_node_put().
|
||||
*/
|
||||
static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
|
||||
int index)
|
||||
{
|
||||
/* "operating-points-v2" can be an array for power domain providers */
|
||||
return of_parse_phandle(np, "operating-points-v2", index);
|
||||
}
|
||||
|
||||
/* Returns opp descriptor node for a device, caller must do of_node_put() */
|
||||
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
|
||||
{
|
||||
return _opp_of_get_opp_desc_node(dev->of_node, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
|
||||
|
||||
struct opp_table *_managed_opp(struct device *dev, int index)
|
||||
{
|
||||
struct opp_table *opp_table, *managed_table = NULL;
|
||||
struct device_node *np;
|
||||
|
||||
mutex_lock(&opp_table_lock);
|
||||
np = _opp_of_get_opp_desc_node(dev->of_node, index);
|
||||
if (!np)
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry(opp_table, &opp_tables, node) {
|
||||
if (opp_table->np == np) {
|
||||
|
@ -47,29 +68,45 @@ static struct opp_table *_managed_opp(const struct device_node *np)
|
|||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&opp_table_lock);
|
||||
of_node_put(np);
|
||||
|
||||
return managed_table;
|
||||
}
|
||||
|
||||
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
|
||||
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
|
||||
int index)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct device_node *np, *opp_np;
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* Only required for backward compatibility with v1 bindings, but isn't
|
||||
* harmful for other cases. And so we do it unconditionally.
|
||||
*/
|
||||
np = of_node_get(dev->of_node);
|
||||
if (np) {
|
||||
u32 val;
|
||||
if (!np)
|
||||
return;
|
||||
|
||||
if (!of_property_read_u32(np, "clock-latency", &val))
|
||||
opp_table->clock_latency_ns_max = val;
|
||||
of_property_read_u32(np, "voltage-tolerance",
|
||||
&opp_table->voltage_tolerance_v1);
|
||||
of_node_put(np);
|
||||
}
|
||||
if (!of_property_read_u32(np, "clock-latency", &val))
|
||||
opp_table->clock_latency_ns_max = val;
|
||||
of_property_read_u32(np, "voltage-tolerance",
|
||||
&opp_table->voltage_tolerance_v1);
|
||||
|
||||
/* Get OPP table node */
|
||||
opp_np = _opp_of_get_opp_desc_node(np, index);
|
||||
of_node_put(np);
|
||||
|
||||
if (!opp_np)
|
||||
return;
|
||||
|
||||
if (of_property_read_bool(opp_np, "opp-shared"))
|
||||
opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
|
||||
else
|
||||
opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
|
||||
|
||||
opp_table->np = opp_np;
|
||||
|
||||
of_node_put(opp_np);
|
||||
}
|
||||
|
||||
static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
|
||||
|
@ -245,26 +282,10 @@ free_microvolt:
|
|||
*/
|
||||
void dev_pm_opp_of_remove_table(struct device *dev)
|
||||
{
|
||||
_dev_pm_opp_find_and_remove_table(dev, false);
|
||||
_dev_pm_opp_find_and_remove_table(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
|
||||
|
||||
/* Returns opp descriptor node for a device node, caller must
|
||||
* do of_node_put() */
|
||||
static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
|
||||
int index)
|
||||
{
|
||||
/* "operating-points-v2" can be an array for power domain providers */
|
||||
return of_parse_phandle(np, "operating-points-v2", index);
|
||||
}
|
||||
|
||||
/* Returns opp descriptor node for a device, caller must do of_node_put() */
|
||||
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
|
||||
{
|
||||
return _opp_of_get_opp_desc_node(dev->of_node, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
|
||||
|
||||
/**
|
||||
* _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
|
||||
* @opp_table: OPP table
|
||||
|
@ -378,43 +399,37 @@ free_opp:
|
|||
}
|
||||
|
||||
/* Initializes OPP tables based on new bindings */
|
||||
static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
|
||||
static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct opp_table *opp_table;
|
||||
int ret = 0, count = 0, pstate_count = 0;
|
||||
int ret, count = 0, pstate_count = 0;
|
||||
struct dev_pm_opp *opp;
|
||||
|
||||
opp_table = _managed_opp(opp_np);
|
||||
if (opp_table) {
|
||||
/* OPPs are already managed */
|
||||
if (!_add_opp_dev(dev, opp_table))
|
||||
ret = -ENOMEM;
|
||||
goto put_opp_table;
|
||||
/* OPP table is already initialized for the device */
|
||||
if (opp_table->parsed_static_opps) {
|
||||
kref_get(&opp_table->list_kref);
|
||||
return 0;
|
||||
}
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table(dev);
|
||||
if (!opp_table)
|
||||
return -ENOMEM;
|
||||
kref_init(&opp_table->list_kref);
|
||||
|
||||
/* We have opp-table node now, iterate over it and add OPPs */
|
||||
for_each_available_child_of_node(opp_np, np) {
|
||||
for_each_available_child_of_node(opp_table->np, np) {
|
||||
count++;
|
||||
|
||||
ret = _opp_add_static_v2(opp_table, dev, np);
|
||||
if (ret) {
|
||||
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
|
||||
ret);
|
||||
_dev_pm_opp_remove_table(opp_table, dev, false);
|
||||
of_node_put(np);
|
||||
goto put_opp_table;
|
||||
goto put_list_kref;
|
||||
}
|
||||
}
|
||||
|
||||
/* There should be one of more OPP defined */
|
||||
if (WARN_ON(!count)) {
|
||||
ret = -ENOENT;
|
||||
goto put_opp_table;
|
||||
goto put_list_kref;
|
||||
}
|
||||
|
||||
list_for_each_entry(opp, &opp_table->opp_list, node)
|
||||
|
@ -425,28 +440,25 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
|
|||
dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
|
||||
count, pstate_count);
|
||||
ret = -ENOENT;
|
||||
goto put_opp_table;
|
||||
goto put_list_kref;
|
||||
}
|
||||
|
||||
if (pstate_count)
|
||||
opp_table->genpd_performance_state = true;
|
||||
|
||||
opp_table->np = opp_np;
|
||||
if (of_property_read_bool(opp_np, "opp-shared"))
|
||||
opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
|
||||
else
|
||||
opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
|
||||
opp_table->parsed_static_opps = true;
|
||||
|
||||
put_opp_table:
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
return 0;
|
||||
|
||||
put_list_kref:
|
||||
_put_opp_list_kref(opp_table);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Initializes OPP tables based on old-deprecated bindings */
|
||||
static int _of_add_opp_table_v1(struct device *dev)
|
||||
static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
|
||||
{
|
||||
struct opp_table *opp_table;
|
||||
const struct property *prop;
|
||||
const __be32 *val;
|
||||
int nr, ret = 0;
|
||||
|
@ -467,9 +479,7 @@ static int _of_add_opp_table_v1(struct device *dev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table(dev);
|
||||
if (!opp_table)
|
||||
return -ENOMEM;
|
||||
kref_init(&opp_table->list_kref);
|
||||
|
||||
val = prop->value;
|
||||
while (nr) {
|
||||
|
@ -480,13 +490,12 @@ static int _of_add_opp_table_v1(struct device *dev)
|
|||
if (ret) {
|
||||
dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
|
||||
__func__, freq, ret);
|
||||
_dev_pm_opp_remove_table(opp_table, dev, false);
|
||||
break;
|
||||
_put_opp_list_kref(opp_table);
|
||||
return ret;
|
||||
}
|
||||
nr -= 2;
|
||||
}
|
||||
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -509,24 +518,24 @@ static int _of_add_opp_table_v1(struct device *dev)
|
|||
*/
|
||||
int dev_pm_opp_of_add_table(struct device *dev)
|
||||
{
|
||||
struct device_node *opp_np;
|
||||
struct opp_table *opp_table;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* OPPs have two version of bindings now. The older one is deprecated,
|
||||
* try for the new binding first.
|
||||
*/
|
||||
opp_np = dev_pm_opp_of_get_opp_desc_node(dev);
|
||||
if (!opp_np) {
|
||||
/*
|
||||
* Try old-deprecated bindings for backward compatibility with
|
||||
* older dtbs.
|
||||
*/
|
||||
return _of_add_opp_table_v1(dev);
|
||||
}
|
||||
opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0);
|
||||
if (!opp_table)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = _of_add_opp_table_v2(dev, opp_np);
|
||||
of_node_put(opp_np);
|
||||
/*
|
||||
* OPPs have two version of bindings now. Also try the old (v1)
|
||||
* bindings for backward compatibility with older dtbs.
|
||||
*/
|
||||
if (opp_table->np)
|
||||
ret = _of_add_opp_table_v2(dev, opp_table);
|
||||
else
|
||||
ret = _of_add_opp_table_v1(dev, opp_table);
|
||||
|
||||
if (ret)
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -553,28 +562,29 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
|
|||
*/
|
||||
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
|
||||
{
|
||||
struct device_node *opp_np;
|
||||
struct opp_table *opp_table;
|
||||
int ret, count;
|
||||
|
||||
again:
|
||||
opp_np = _opp_of_get_opp_desc_node(dev->of_node, index);
|
||||
if (!opp_np) {
|
||||
if (index) {
|
||||
/*
|
||||
* If only one phandle is present, then the same OPP table
|
||||
* applies for all index requests.
|
||||
*/
|
||||
count = of_count_phandle_with_args(dev->of_node,
|
||||
"operating-points-v2", NULL);
|
||||
if (count == 1 && index) {
|
||||
index = 0;
|
||||
goto again;
|
||||
}
|
||||
if (count != 1)
|
||||
return -ENODEV;
|
||||
|
||||
return -ENODEV;
|
||||
index = 0;
|
||||
}
|
||||
|
||||
ret = _of_add_opp_table_v2(dev, opp_np);
|
||||
of_node_put(opp_np);
|
||||
opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
|
||||
if (!opp_table)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = _of_add_opp_table_v2(dev, opp_table);
|
||||
if (ret)
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -591,7 +601,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
|
|||
*/
|
||||
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
|
||||
{
|
||||
_dev_pm_opp_cpumask_remove_table(cpumask, true);
|
||||
_dev_pm_opp_cpumask_remove_table(cpumask, -1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
|
||||
|
||||
|
@ -626,7 +636,7 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
|
|||
__func__, cpu, ret);
|
||||
|
||||
/* Free all other OPPs */
|
||||
dev_pm_opp_of_cpumask_remove_table(cpumask);
|
||||
_dev_pm_opp_cpumask_remove_table(cpumask, cpu);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -126,9 +126,11 @@ enum opp_table_access {
|
|||
* @dev_list: list of devices that share these OPPs
|
||||
* @opp_list: table of opps
|
||||
* @kref: for reference count of the table.
|
||||
* @lock: mutex protecting the opp_list.
|
||||
* @list_kref: for reference count of the OPP list.
|
||||
* @lock: mutex protecting the opp_list and dev_list.
|
||||
* @np: struct device_node pointer for opp's DT node.
|
||||
* @clock_latency_ns_max: Max clock latency in nanoseconds.
|
||||
* @parsed_static_opps: True if OPPs are initialized from DT.
|
||||
* @shared_opp: OPP is shared between multiple devices.
|
||||
* @suspend_opp: Pointer to OPP to be used during device suspend.
|
||||
* @supported_hw: Array of version number to support.
|
||||
|
@ -156,6 +158,7 @@ struct opp_table {
|
|||
struct list_head dev_list;
|
||||
struct list_head opp_list;
|
||||
struct kref kref;
|
||||
struct kref list_kref;
|
||||
struct mutex lock;
|
||||
|
||||
struct device_node *np;
|
||||
|
@ -164,6 +167,7 @@ struct opp_table {
|
|||
/* For backward compatibility with v1 bindings */
|
||||
unsigned int voltage_tolerance_v1;
|
||||
|
||||
bool parsed_static_opps;
|
||||
enum opp_table_access shared_opp;
|
||||
struct dev_pm_opp *suspend_opp;
|
||||
|
||||
|
@ -186,23 +190,26 @@ struct opp_table {
|
|||
|
||||
/* Routines internal to opp core */
|
||||
void dev_pm_opp_get(struct dev_pm_opp *opp);
|
||||
void _opp_remove_all_static(struct opp_table *opp_table);
|
||||
void _get_opp_table_kref(struct opp_table *opp_table);
|
||||
int _get_opp_count(struct opp_table *opp_table);
|
||||
struct opp_table *_find_opp_table(struct device *dev);
|
||||
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
|
||||
void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, bool remove_all);
|
||||
void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all);
|
||||
void _dev_pm_opp_find_and_remove_table(struct device *dev);
|
||||
struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
|
||||
void _opp_free(struct dev_pm_opp *opp);
|
||||
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
|
||||
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
|
||||
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
|
||||
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
|
||||
struct opp_table *_add_opp_table(struct device *dev);
|
||||
void _put_opp_list_kref(struct opp_table *opp_table);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev);
|
||||
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index);
|
||||
struct opp_table *_managed_opp(struct device *dev, int index);
|
||||
#else
|
||||
static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {}
|
||||
static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) {}
|
||||
static inline struct opp_table *_managed_opp(struct device *dev, int index) { return NULL; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
|
|
@ -79,6 +79,7 @@ struct dev_pm_set_opp_data {
|
|||
#if defined(CONFIG_PM_OPP)
|
||||
|
||||
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
|
||||
struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index);
|
||||
void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
|
||||
|
||||
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
|
||||
|
@ -136,6 +137,11 @@ static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
|
|||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index)
|
||||
{
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
|
||||
|
||||
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
|
||||
|
|
Загрузка…
Ссылка в новой задаче