Merge branch 'opp/genpd-pstate-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm

Pull Operating Performance Points (OPP) library changes for v4.18
from Viresh Kumar.

* 'opp/genpd-pstate-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm:
  PM / OPP: Remove dev_pm_opp_{un}register_get_pstate_helper()
  PM / OPP: Get performance state using genpd helper
  PM / Domain: Implement of_genpd_opp_to_performance_state()
  PM / Domain: Add support to parse domain's OPP table
  PM / Domain: Add struct device to genpd
  PM / OPP: Implement dev_pm_opp_get_of_node()
  PM / OPP: Implement of_dev_pm_opp_find_required_opp()
  PM / OPP: Implement dev_pm_opp_of_add_table_indexed()
  PM / OPP: "opp-hz" is optional for power domains
  PM / OPP: dt-bindings: Make "opp-hz" optional for power domains
  PM / OPP: dt-bindings: Rename "required-opp" as "required-opps"
  soc/tegra: pmc: Don't allocate struct tegra_powergate on stack
This commit is contained in:
Rafael J. Wysocki 2018-05-14 23:12:48 +02:00
Родитель 71f277a7bf 28fa4aca26
Коммит 8ad17c8eb1
10 изменённых файлов: 395 добавлений и 183 удалений

Просмотреть файл

@ -82,7 +82,10 @@ This defines voltage-current-frequency combinations along with other related
properties.
Required properties:
- opp-hz: Frequency in Hz, expressed as a 64-bit big-endian integer.
- opp-hz: Frequency in Hz, expressed as a 64-bit big-endian integer. This is a
required property for all device nodes but devices like power domains. The
power domain nodes must have another (implementation dependent) property which
uniquely identifies the OPP nodes.
Optional properties:
- opp-microvolt: voltage in micro Volts.
@ -159,7 +162,7 @@ Optional properties:
- status: Marks the node enabled/disabled.
- required-opp: This contains phandle to an OPP node in another device's OPP
- required-opps: This contains phandle to an OPP node in another device's OPP
table. It may contain an array of phandles, where each phandle points to an
OPP of a different device. It should not contain multiple phandles to the OPP
nodes in the same OPP table. This specifies the minimum required OPP of the

Просмотреть файл

@ -127,7 +127,7 @@ inside a PM domain with index 0 of a power controller represented by a node
with the label "power".
Optional properties:
- required-opp: This contains phandle to an OPP node in another device's OPP
- required-opps: This contains phandle to an OPP node in another device's OPP
table. It may contain an array of phandles, where each phandle points to an
OPP of a different device. It should not contain multiple phandles to the OPP
nodes in the same OPP table. This specifies the minimum required OPP of the
@ -175,14 +175,14 @@ Example:
compatible = "foo,i-leak-current";
reg = <0x12350000 0x1000>;
power-domains = <&power 0>;
required-opp = <&domain0_opp_0>;
required-opps = <&domain0_opp_0>;
};
leaky-device1@12350000 {
compatible = "foo,i-leak-current";
reg = <0x12350000 0x1000>;
power-domains = <&power 1>;
required-opp = <&domain1_opp_1>;
required-opps = <&domain1_opp_1>;
};
[1]. Documentation/devicetree/bindings/power/domain-idle-state.txt

Просмотреть файл

@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
@ -1691,6 +1692,9 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
return ret;
}
device_initialize(&genpd->dev);
dev_set_name(&genpd->dev, "%s", genpd->name);
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
@ -1887,14 +1891,33 @@ int of_genpd_add_provider_simple(struct device_node *np,
mutex_lock(&gpd_list_lock);
if (genpd_present(genpd)) {
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (!ret) {
genpd->provider = &np->fwnode;
genpd->has_provider = true;
if (!genpd_present(genpd))
goto unlock;
genpd->dev.of_node = np;
/* Parse genpd OPP table */
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table(&genpd->dev);
if (ret) {
dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
ret);
goto unlock;
}
}
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (ret) {
if (genpd->set_performance_state)
dev_pm_opp_of_remove_table(&genpd->dev);
goto unlock;
}
genpd->provider = &np->fwnode;
genpd->has_provider = true;
unlock:
mutex_unlock(&gpd_list_lock);
return ret;
@ -1909,6 +1932,7 @@ EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
int of_genpd_add_provider_onecell(struct device_node *np,
struct genpd_onecell_data *data)
{
struct generic_pm_domain *genpd;
unsigned int i;
int ret = -EINVAL;
@ -1921,13 +1945,27 @@ int of_genpd_add_provider_onecell(struct device_node *np,
data->xlate = genpd_xlate_onecell;
for (i = 0; i < data->num_domains; i++) {
if (!data->domains[i])
genpd = data->domains[i];
if (!genpd)
continue;
if (!genpd_present(data->domains[i]))
if (!genpd_present(genpd))
goto error;
data->domains[i]->provider = &np->fwnode;
data->domains[i]->has_provider = true;
genpd->dev.of_node = np;
/* Parse genpd OPP table */
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
if (ret) {
dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
i, ret);
goto error;
}
}
genpd->provider = &np->fwnode;
genpd->has_provider = true;
}
ret = genpd_add_provider(np, data->xlate, data);
@ -1940,10 +1978,16 @@ int of_genpd_add_provider_onecell(struct device_node *np,
error:
while (i--) {
if (!data->domains[i])
genpd = data->domains[i];
if (!genpd)
continue;
data->domains[i]->provider = NULL;
data->domains[i]->has_provider = false;
genpd->provider = NULL;
genpd->has_provider = false;
if (genpd->set_performance_state)
dev_pm_opp_of_remove_table(&genpd->dev);
}
mutex_unlock(&gpd_list_lock);
@ -1970,10 +2014,17 @@ void of_genpd_del_provider(struct device_node *np)
* provider, set the 'has_provider' to false
* so that the PM domain can be safely removed.
*/
list_for_each_entry(gpd, &gpd_list, gpd_list_node)
if (gpd->provider == &np->fwnode)
list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
if (gpd->provider == &np->fwnode) {
gpd->has_provider = false;
if (!gpd->set_performance_state)
continue;
dev_pm_opp_of_remove_table(&gpd->dev);
}
}
list_del(&cp->link);
of_node_put(cp->node);
kfree(cp);
@ -2346,6 +2397,54 @@ int of_genpd_parse_idle_states(struct device_node *dn,
}
EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
/**
* of_genpd_opp_to_performance_state- Gets performance state of device's
* power domain corresponding to a DT node's "required-opps" property.
*
* @dev: Device for which the performance-state needs to be found.
* @opp_node: DT node where the "required-opps" property is present. This can be
* the device node itself (if it doesn't have an OPP table) or a node
* within the OPP table of a device (if device has an OPP table).
* @state: Pointer to return performance state.
*
* Returns performance state corresponding to the "required-opps" property of
* a DT node. This calls platform specific genpd->opp_to_performance_state()
* callback to translate power domain OPP to performance state.
*
* Returns performance state on success and 0 on failure.
*/
unsigned int of_genpd_opp_to_performance_state(struct device *dev,
struct device_node *opp_node)
{
struct generic_pm_domain *genpd;
struct dev_pm_opp *opp;
int state = 0;
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return 0;
if (unlikely(!genpd->set_performance_state))
return 0;
genpd_lock(genpd);
opp = of_dev_pm_opp_find_required_opp(&genpd->dev, opp_node);
if (IS_ERR(opp)) {
state = PTR_ERR(opp);
goto unlock;
}
state = genpd->opp_to_performance_state(genpd, opp);
dev_pm_opp_put(opp);
unlock:
genpd_unlock(genpd);
return state;
}
EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state);
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */

Просмотреть файл

@ -33,8 +33,6 @@ LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
static void dev_pm_opp_get(struct dev_pm_opp *opp);
static struct opp_device *_find_opp_dev(const struct device *dev,
struct opp_table *opp_table)
{
@ -281,6 +279,23 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
int _get_opp_count(struct opp_table *opp_table)
{
struct dev_pm_opp *opp;
int count = 0;
mutex_lock(&opp_table->lock);
list_for_each_entry(opp, &opp_table->opp_list, node) {
if (opp->available)
count++;
}
mutex_unlock(&opp_table->lock);
return count;
}
/**
* dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
* @dev: device for which we do this operation
@ -291,25 +306,17 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
int dev_pm_opp_get_opp_count(struct device *dev)
{
struct opp_table *opp_table;
struct dev_pm_opp *temp_opp;
int count = 0;
int count;
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
count = PTR_ERR(opp_table);
dev_dbg(dev, "%s: OPP table not found (%d)\n",
__func__, count);
return count;
return 0;
}
mutex_lock(&opp_table->lock);
list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available)
count++;
}
mutex_unlock(&opp_table->lock);
count = _get_opp_count(opp_table);
dev_pm_opp_put_opp_table(opp_table);
return count;
@ -892,7 +899,7 @@ static void _opp_kref_release(struct kref *kref)
dev_pm_opp_put_opp_table(opp_table);
}
static void dev_pm_opp_get(struct dev_pm_opp *opp)
void dev_pm_opp_get(struct dev_pm_opp *opp)
{
kref_get(&opp->kref);
}
@ -985,6 +992,43 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
return true;
}
static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
struct opp_table *opp_table,
struct list_head **head)
{
struct dev_pm_opp *opp;
/*
* Insert new OPP in order of increasing frequency and discard if
* already present.
*
* Need to use &opp_table->opp_list in the condition part of the 'for'
* loop, don't replace it with head otherwise it will become an infinite
* loop.
*/
list_for_each_entry(opp, &opp_table->opp_list, node) {
if (new_opp->rate > opp->rate) {
*head = &opp->node;
continue;
}
if (new_opp->rate < opp->rate)
return 0;
/* Duplicate OPPs */
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
__func__, opp->rate, opp->supplies[0].u_volt,
opp->available, new_opp->rate,
new_opp->supplies[0].u_volt, new_opp->available);
/* Should we compare voltages for all regulators here ? */
return opp->available &&
new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
}
return 0;
}
/*
* Returns:
* 0: On success. And appropriate error message for duplicate OPPs.
@ -996,49 +1040,22 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
* should be considered an error by the callers of _opp_add().
*/
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
struct opp_table *opp_table)
struct opp_table *opp_table, bool rate_not_available)
{
struct dev_pm_opp *opp;
struct list_head *head;
int ret;
/*
* Insert new OPP in order of increasing frequency and discard if
* already present.
*
* Need to use &opp_table->opp_list in the condition part of the 'for'
* loop, don't replace it with head otherwise it will become an infinite
* loop.
*/
mutex_lock(&opp_table->lock);
head = &opp_table->opp_list;
list_for_each_entry(opp, &opp_table->opp_list, node) {
if (new_opp->rate > opp->rate) {
head = &opp->node;
continue;
if (likely(!rate_not_available)) {
ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
if (ret) {
mutex_unlock(&opp_table->lock);
return ret;
}
if (new_opp->rate < opp->rate)
break;
/* Duplicate OPPs */
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
__func__, opp->rate, opp->supplies[0].u_volt,
opp->available, new_opp->rate,
new_opp->supplies[0].u_volt, new_opp->available);
/* Should we compare voltages for all regulators here ? */
ret = opp->available &&
new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
mutex_unlock(&opp_table->lock);
return ret;
}
if (opp_table->get_pstate)
new_opp->pstate = opp_table->get_pstate(dev, new_opp->rate);
list_add(&new_opp->node, head);
mutex_unlock(&opp_table->lock);
@ -1104,7 +1121,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
new_opp->available = true;
new_opp->dynamic = dynamic;
ret = _opp_add(dev, new_opp, opp_table);
ret = _opp_add(dev, new_opp, opp_table, false);
if (ret) {
/* Don't return error for duplicate OPPs */
if (ret == -EBUSY)
@ -1550,81 +1567,6 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
/**
* dev_pm_opp_register_get_pstate_helper() - Register get_pstate() helper.
* @dev: Device for which the helper is getting registered.
* @get_pstate: Helper.
*
* TODO: Remove this callback after the same information is available via Device
* Tree.
*
* This allows a platform to initialize the performance states of individual
* OPPs for its devices, until we get similar information directly from DT.
*
* This must be called before the OPPs are initialized for the device.
*/
struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev,
int (*get_pstate)(struct device *dev, unsigned long rate))
{
struct opp_table *opp_table;
int ret;
if (!get_pstate)
return ERR_PTR(-EINVAL);
opp_table = dev_pm_opp_get_opp_table(dev);
if (!opp_table)
return ERR_PTR(-ENOMEM);
/* This should be called before OPPs are initialized */
if (WARN_ON(!list_empty(&opp_table->opp_list))) {
ret = -EBUSY;
goto err;
}
/* Already have genpd_performance_state set */
if (WARN_ON(opp_table->genpd_performance_state)) {
ret = -EBUSY;
goto err;
}
opp_table->genpd_performance_state = true;
opp_table->get_pstate = get_pstate;
return opp_table;
err:
dev_pm_opp_put_opp_table(opp_table);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_register_get_pstate_helper);
/**
* dev_pm_opp_unregister_get_pstate_helper() - Releases resources blocked for
* get_pstate() helper
* @opp_table: OPP table returned from dev_pm_opp_register_get_pstate_helper().
*
* Release resources blocked for platform specific get_pstate() helper.
*/
void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table)
{
if (!opp_table->genpd_performance_state) {
pr_err("%s: Doesn't have performance states set\n",
__func__);
return;
}
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
opp_table->genpd_performance_state = false;
opp_table->get_pstate = NULL;
dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_get_pstate_helper);
/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation

Просмотреть файл

@ -77,10 +77,21 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
{
struct dentry *pdentry = opp_table->dentry;
struct dentry *d;
unsigned long id;
char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */
/* Rate is unique to each OPP, use it to give opp-name */
snprintf(name, sizeof(name), "opp:%lu", opp->rate);
/*
* Get directory name for OPP.
*
* - Normally rate is unique to each OPP, use it to get unique opp-name.
* - For some devices rate isn't available, use index instead.
*/
if (likely(opp->rate))
id = opp->rate;
else
id = _get_opp_count(opp_table);
snprintf(name, sizeof(name), "opp:%lu", id);
/* Create per-opp directory */
d = debugfs_create_dir(name, pdentry);

Просмотреть файл

@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/of_device.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/export.h>
@ -250,20 +251,17 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
/* Returns opp descriptor node for a device node, caller must
* do of_node_put() */
static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np)
static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
int index)
{
/*
* There should be only ONE phandle present in "operating-points-v2"
* property.
*/
return of_parse_phandle(np, "operating-points-v2", 0);
/* "operating-points-v2" can be an array for power domain providers */
return of_parse_phandle(np, "operating-points-v2", index);
}
/* Returns opp descriptor node for a device, caller must do of_node_put() */
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
{
return _opp_of_get_opp_desc_node(dev->of_node);
return _opp_of_get_opp_desc_node(dev->of_node, 0);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
@ -292,6 +290,7 @@ static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
u64 rate;
u32 val;
int ret;
bool rate_not_available = false;
new_opp = _opp_allocate(opp_table);
if (!new_opp)
@ -299,8 +298,21 @@ static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
ret = of_property_read_u64(np, "opp-hz", &rate);
if (ret < 0) {
dev_err(dev, "%s: opp-hz not found\n", __func__);
goto free_opp;
/* "opp-hz" is optional for devices like power domains. */
if (!of_find_property(dev->of_node, "#power-domain-cells",
NULL)) {
dev_err(dev, "%s: opp-hz not found\n", __func__);
goto free_opp;
}
rate_not_available = true;
} else {
/*
* Rate is defined as an unsigned long in clk API, and so
* casting explicitly to its type. Must be fixed once rate is 64
* bit guaranteed in clk API.
*/
new_opp->rate = (unsigned long)rate;
}
/* Check if the OPP supports hardware's hierarchy of versions or not */
@ -309,12 +321,6 @@ static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
goto free_opp;
}
/*
* Rate is defined as an unsigned long in clk API, and so casting
* explicitly to its type. Must be fixed once rate is 64 bit
* guaranteed in clk API.
*/
new_opp->rate = (unsigned long)rate;
new_opp->turbo = of_property_read_bool(np, "turbo-mode");
new_opp->np = np;
@ -324,11 +330,13 @@ static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
if (!of_property_read_u32(np, "clock-latency-ns", &val))
new_opp->clock_latency_ns = val;
new_opp->pstate = of_genpd_opp_to_performance_state(dev, np);
ret = opp_parse_supplies(new_opp, dev, opp_table);
if (ret)
goto free_opp;
ret = _opp_add(dev, new_opp, opp_table);
ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
if (ret) {
/* Don't return error for duplicate OPPs */
if (ret == -EBUSY)
@ -374,7 +382,8 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
{
struct device_node *np;
struct opp_table *opp_table;
int ret = 0, count = 0;
int ret = 0, count = 0, pstate_count = 0;
struct dev_pm_opp *opp;
opp_table = _managed_opp(opp_np);
if (opp_table) {
@ -408,6 +417,20 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
goto put_opp_table;
}
list_for_each_entry(opp, &opp_table->opp_list, node)
pstate_count += !!opp->pstate;
/* Either all or none of the nodes shall have performance state set */
if (pstate_count && pstate_count != count) {
dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
count, pstate_count);
ret = -ENOENT;
goto put_opp_table;
}
if (pstate_count)
opp_table->genpd_performance_state = true;
opp_table->np = opp_np;
if (of_property_read_bool(opp_np, "opp-shared"))
opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
@ -509,6 +532,41 @@ int dev_pm_opp_of_add_table(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
/**
* dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
* @dev: device pointer used to lookup OPP table.
* @index: Index number.
*
* Register the initial OPP table with the OPP library for given device only
* using the "operating-points-v2" property.
*
* Return:
* 0 On success OR
* Duplicate OPPs (both freq and volt are same) and opp->available
* -EEXIST Freq are same and volt are different OR
* Duplicate OPPs (both freq and volt are same) and !opp->available
* -ENOMEM Memory allocation failure
* -ENODEV when 'operating-points' property is not found or is invalid data
* in device node.
* -ENODATA when empty 'operating-points' property is found
* -EINVAL when invalid entries are found in opp-v2 table
*/
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
struct device_node *opp_np;
int ret;
opp_np = _opp_of_get_opp_desc_node(dev->of_node, index);
if (!opp_np)
return -ENODEV;
ret = _of_add_opp_table_v2(dev, opp_np);
of_node_put(opp_np);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
/* CPU device specific helpers */
/**
@ -613,7 +671,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
}
/* Get OPP descriptor node */
tmp_np = _opp_of_get_opp_desc_node(cpu_np);
tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0);
of_node_put(cpu_np);
if (!tmp_np) {
pr_err("%pOF: Couldn't find opp node\n", cpu_np);
@ -633,3 +691,76 @@ put_cpu_node:
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
/**
* of_dev_pm_opp_find_required_opp() - Search for required OPP.
* @dev: The device whose OPP node is referenced by the 'np' DT node.
* @np: Node that contains the "required-opps" property.
*
* Returns the OPP of the device 'dev', whose phandle is present in the "np"
* node. Although the "required-opps" property supports having multiple
* phandles, this helper routine only parses the very first phandle in the list.
*
* Return: Matching opp, else returns ERR_PTR in case of error and should be
* handled using IS_ERR.
*
* The callers are required to call dev_pm_opp_put() for the returned OPP after
* use.
*/
struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev,
struct device_node *np)
{
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ENODEV);
struct device_node *required_np;
struct opp_table *opp_table;
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return ERR_CAST(opp_table);
required_np = of_parse_phandle(np, "required-opps", 0);
if (unlikely(!required_np)) {
dev_err(dev, "Unable to parse required-opps\n");
goto put_opp_table;
}
mutex_lock(&opp_table->lock);
list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available && temp_opp->np == required_np) {
opp = temp_opp;
/* Increment the reference count of OPP */
dev_pm_opp_get(opp);
break;
}
}
mutex_unlock(&opp_table->lock);
of_node_put(required_np);
put_opp_table:
dev_pm_opp_put_opp_table(opp_table);
return opp;
}
EXPORT_SYMBOL_GPL(of_dev_pm_opp_find_required_opp);
/**
* dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
* @opp: opp for which DT node has to be returned for
*
* Return: DT node corresponding to the opp, else 0 on success.
*
* The caller needs to put the node with of_node_put() after using it.
*/
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
{
if (IS_ERR_OR_NULL(opp)) {
pr_err("%s: Invalid parameters\n", __func__);
return NULL;
}
return of_node_get(opp->np);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);

Просмотреть файл

@ -140,7 +140,6 @@ enum opp_table_access {
* @genpd_performance_state: Device's power domain support performance state.
* @set_opp: Platform specific set_opp callback
* @set_opp_data: Data to be passed to set_opp callback
* @get_pstate: Platform specific get_pstate callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
@ -178,7 +177,6 @@ struct opp_table {
int (*set_opp)(struct dev_pm_set_opp_data *data);
struct dev_pm_set_opp_data *set_opp_data;
int (*get_pstate)(struct device *dev, unsigned long rate);
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
@ -187,14 +185,16 @@ struct opp_table {
};
/* Routines internal to opp core */
void dev_pm_opp_get(struct dev_pm_opp *opp);
void _get_opp_table_kref(struct opp_table *opp_table);
int _get_opp_count(struct opp_table *opp_table);
struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, bool remove_all);
void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all);
struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
void _opp_free(struct dev_pm_opp *opp);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
struct opp_table *_add_opp_table(struct device *dev);

Просмотреть файл

@ -559,22 +559,28 @@ EXPORT_SYMBOL(tegra_powergate_remove_clamping);
int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
struct reset_control *rst)
{
struct tegra_powergate pg;
struct tegra_powergate *pg;
int err;
if (!tegra_powergate_is_available(id))
return -EINVAL;
pg.id = id;
pg.clks = &clk;
pg.num_clks = 1;
pg.reset = rst;
pg.pmc = pmc;
pg = kzalloc(sizeof(*pg), GFP_KERNEL);
if (!pg)
return -ENOMEM;
err = tegra_powergate_power_up(&pg, false);
pg->id = id;
pg->clks = &clk;
pg->num_clks = 1;
pg->reset = rst;
pg->pmc = pmc;
err = tegra_powergate_power_up(pg, false);
if (err)
pr_err("failed to turn on partition %d: %d\n", id, err);
kfree(pg);
return err;
}
EXPORT_SYMBOL(tegra_powergate_sequence_power_up);

Просмотреть файл

@ -47,8 +47,10 @@ struct genpd_power_state {
};
struct genpd_lock_ops;
struct dev_pm_opp;
struct generic_pm_domain {
struct device dev;
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
struct list_head master_links; /* Links with PM domain as a master */
@ -67,6 +69,8 @@ struct generic_pm_domain {
unsigned int performance_state; /* Aggregated max performance state */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
struct dev_pm_opp *opp);
int (*set_performance_state)(struct generic_pm_domain *genpd,
unsigned int state);
struct gpd_dev_ops dev_ops;
@ -243,6 +247,8 @@ extern int of_genpd_add_subdomain(struct of_phandle_args *parent,
extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
extern int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
extern unsigned int of_genpd_opp_to_performance_state(struct device *dev,
struct device_node *opp_node);
int genpd_dev_pm_attach(struct device *dev);
#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
@ -278,6 +284,13 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
return -ENODEV;
}
static inline unsigned int
of_genpd_opp_to_performance_state(struct device *dev,
struct device_node *opp_node)
{
return -ENODEV;
}
static inline int genpd_dev_pm_attach(struct device *dev)
{
return 0;

Просмотреть файл

@ -125,8 +125,6 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
void dev_pm_opp_put_clkname(struct opp_table *opp_table);
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev, int (*get_pstate)(struct device *dev, unsigned long rate));
void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
@ -247,14 +245,6 @@ static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device
static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {}
static inline struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev,
int (*get_pstate)(struct device *dev, unsigned long rate))
{
return ERR_PTR(-ENOTSUPP);
}
static inline void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table) {}
static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
return ERR_PTR(-ENOTSUPP);
@ -303,17 +293,25 @@ static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int dev_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index);
void dev_pm_opp_of_remove_table(struct device *dev);
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
return -ENOTSUPP;
}
static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
return -ENOTSUPP;
}
static inline void dev_pm_opp_of_remove_table(struct device *dev)
{
}
@ -336,6 +334,15 @@ static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device
{
return NULL;
}
static inline struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np)
{
return NULL;
}
static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
{
return NULL;
}
#endif
#endif /* __LINUX_OPP_H__ */