Merge branches 'clk-qcom-alpha-pll', 'clk-check-ops-ptr', 'clk-protect-rate' and 'clk-omap' into clk-next

* clk-qcom-alpha-pll:
  clk: qcom: add read-only alpha pll post divider operations
  clk: qcom: support for 2 bit PLL post divider
  clk: qcom: support Brammo type Alpha PLL
  clk: qcom: support Huayra type Alpha PLL
  clk: qcom: support for dynamic updating the PLL
  clk: qcom: support for alpha mode configuration
  clk: qcom: flag for 64 bit CONFIG_CTL
  clk: qcom: fix 16 bit alpha support calculation
  clk: qcom: support for alpha pll properties

* clk-check-ops-ptr:
  clk: check ops pointer on clock register

* clk-protect-rate:
  clk: fix set_rate_range when current rate is out of range
  clk: add clk_rate_exclusive api
  clk: cosmetic changes to clk_summary debugfs entry
  clk: add clock protection mechanism to clk core
  clk: use round rate to bail out early in set_rate
  clk: rework calls to round and determine rate callbacks
  clk: add clk_core_set_phase_nolock function
  clk: take the prepare lock out of clk_core_set_parent
  clk: fix incorrect usage of ENOSYS

* clk-omap:
  clk: ti: Drop legacy clk-3xxx-legacy code
This commit is contained in:
Stephen Boyd 2018-01-26 16:41:39 -08:00
Коммит 74b48999b1
17 изменённых файлов: 994 добавлений и 5254 удалений

Просмотреть файл

@ -62,6 +62,7 @@ struct clk_core {
bool orphan;
unsigned int enable_count;
unsigned int prepare_count;
unsigned int protect_count;
unsigned long min_rate;
unsigned long max_rate;
unsigned long accuracy;
@ -86,6 +87,7 @@ struct clk {
const char *con_id;
unsigned long min_rate;
unsigned long max_rate;
unsigned int exclusive_count;
struct hlist_node clks_node;
};
@ -170,6 +172,11 @@ static void clk_enable_unlock(unsigned long flags)
spin_unlock_irqrestore(&enable_lock, flags);
}
static bool clk_core_rate_is_protected(struct clk_core *core)
{
return core->protect_count;
}
static bool clk_core_is_prepared(struct clk_core *core)
{
bool ret = false;
@ -381,6 +388,11 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
return clk_core_is_prepared(hw->core);
}
bool clk_hw_rate_is_protected(const struct clk_hw *hw)
{
return clk_core_rate_is_protected(hw->core);
}
bool clk_hw_is_enabled(const struct clk_hw *hw)
{
return clk_core_is_enabled(hw->core);
@ -519,6 +531,139 @@ EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
/*** clk api ***/
static void clk_core_rate_unprotect(struct clk_core *core)
{
lockdep_assert_held(&prepare_lock);
if (!core)
return;
if (WARN_ON(core->protect_count == 0))
return;
if (--core->protect_count > 0)
return;
clk_core_rate_unprotect(core->parent);
}
static int clk_core_rate_nuke_protect(struct clk_core *core)
{
int ret;
lockdep_assert_held(&prepare_lock);
if (!core)
return -EINVAL;
if (core->protect_count == 0)
return 0;
ret = core->protect_count;
core->protect_count = 1;
clk_core_rate_unprotect(core);
return ret;
}
/**
* clk_rate_exclusive_put - release exclusivity over clock rate control
* @clk: the clk over which the exclusivity is released
*
* clk_rate_exclusive_put() completes a critical section during which a clock
* consumer cannot tolerate any other consumer making any operation on the
* clock which could result in a rate change or rate glitch. Exclusive clocks
* cannot have their rate changed, either directly or indirectly due to changes
* further up the parent chain of clocks. As a result, clocks up parent chain
* also get under exclusive control of the calling consumer.
*
* If exlusivity is claimed more than once on clock, even by the same consumer,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Calls to clk_rate_exclusive_put() must be balanced with calls to
* clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
* error status.
*/
void clk_rate_exclusive_put(struct clk *clk)
{
if (!clk)
return;
clk_prepare_lock();
/*
* if there is something wrong with this consumer protect count, stop
* here before messing with the provider
*/
if (WARN_ON(clk->exclusive_count <= 0))
goto out;
clk_core_rate_unprotect(clk->core);
clk->exclusive_count--;
out:
clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
static void clk_core_rate_protect(struct clk_core *core)
{
lockdep_assert_held(&prepare_lock);
if (!core)
return;
if (core->protect_count == 0)
clk_core_rate_protect(core->parent);
core->protect_count++;
}
static void clk_core_rate_restore_protect(struct clk_core *core, int count)
{
lockdep_assert_held(&prepare_lock);
if (!core)
return;
if (count == 0)
return;
clk_core_rate_protect(core);
core->protect_count = count;
}
/**
* clk_rate_exclusive_get - get exclusivity over the clk rate control
* @clk: the clk over which the exclusity of rate control is requested
*
* clk_rate_exlusive_get() begins a critical section during which a clock
* consumer cannot tolerate any other consumer making any operation on the
* clock which could result in a rate change or rate glitch. Exclusive clocks
* cannot have their rate changed, either directly or indirectly due to changes
* further up the parent chain of clocks. As a result, clocks up parent chain
* also get under exclusive control of the calling consumer.
*
* If exlusivity is claimed more than once on clock, even by the same consumer,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Calls to clk_rate_exclusive_get() should be balanced with calls to
* clk_rate_exclusive_put(). Calls to this function may sleep.
* Returns 0 on success, -EERROR otherwise
*/
int clk_rate_exclusive_get(struct clk *clk)
{
if (!clk)
return 0;
clk_prepare_lock();
clk_core_rate_protect(clk->core);
clk->exclusive_count++;
clk_prepare_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
static void clk_core_unprepare(struct clk_core *core)
{
lockdep_assert_held(&prepare_lock);
@ -905,10 +1050,9 @@ static int clk_disable_unused(void)
}
late_initcall_sync(clk_disable_unused);
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req)
static int clk_core_determine_round_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
struct clk_core *parent;
long rate;
lockdep_assert_held(&prepare_lock);
@ -916,6 +1060,38 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
if (!core)
return 0;
/*
* At this point, core protection will be disabled if
* - if the provider is not protected at all
* - if the calling consumer is the only one which has exclusivity
* over the provider
*/
if (clk_core_rate_is_protected(core)) {
req->rate = core->rate;
} else if (core->ops->determine_rate) {
return core->ops->determine_rate(core->hw, req);
} else if (core->ops->round_rate) {
rate = core->ops->round_rate(core->hw, req->rate,
&req->best_parent_rate);
if (rate < 0)
return rate;
req->rate = rate;
} else {
return -EINVAL;
}
return 0;
}
static void clk_core_init_rate_req(struct clk_core * const core,
struct clk_rate_request *req)
{
struct clk_core *parent;
if (WARN_ON(!core || !req))
return;
parent = core->parent;
if (parent) {
req->best_parent_hw = parent->hw;
@ -924,22 +1100,32 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
req->best_parent_hw = NULL;
req->best_parent_rate = 0;
}
}
if (core->ops->determine_rate) {
return core->ops->determine_rate(core->hw, req);
} else if (core->ops->round_rate) {
rate = core->ops->round_rate(core->hw, req->rate,
&req->best_parent_rate);
if (rate < 0)
return rate;
static bool clk_core_can_round(struct clk_core * const core)
{
if (core->ops->determine_rate || core->ops->round_rate)
return true;
req->rate = rate;
} else if (core->flags & CLK_SET_RATE_PARENT) {
return clk_core_round_rate_nolock(parent, req);
} else {
req->rate = core->rate;
}
return false;
}
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
clk_core_init_rate_req(core, req);
if (clk_core_can_round(core))
return clk_core_determine_round_nolock(core, req);
else if (core->flags & CLK_SET_RATE_PARENT)
return clk_core_round_rate_nolock(core->parent, req);
req->rate = core->rate;
return 0;
}
@ -996,10 +1182,17 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
clk_prepare_lock();
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
req.rate = rate;
ret = clk_core_round_rate_nolock(clk->core, &req);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
if (ret)
@ -1432,34 +1625,23 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
clk_core_get_boundaries(core, &min_rate, &max_rate);
/* find the closest rate and parent clk/rate */
if (core->ops->determine_rate) {
if (clk_core_can_round(core)) {
struct clk_rate_request req;
req.rate = rate;
req.min_rate = min_rate;
req.max_rate = max_rate;
if (parent) {
req.best_parent_hw = parent->hw;
req.best_parent_rate = parent->rate;
} else {
req.best_parent_hw = NULL;
req.best_parent_rate = 0;
}
ret = core->ops->determine_rate(core->hw, &req);
clk_core_init_rate_req(core, &req);
ret = clk_core_determine_round_nolock(core, &req);
if (ret < 0)
return NULL;
best_parent_rate = req.best_parent_rate;
new_rate = req.rate;
parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
} else if (core->ops->round_rate) {
ret = core->ops->round_rate(core->hw, rate,
&best_parent_rate);
if (ret < 0)
return NULL;
new_rate = ret;
if (new_rate < min_rate || new_rate > max_rate)
return NULL;
} else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
@ -1641,25 +1823,58 @@ static void clk_change_rate(struct clk_core *core)
clk_pm_runtime_put(core);
}
static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
unsigned long req_rate)
{
int ret, cnt;
struct clk_rate_request req;
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
/* simulate what the rate would be if it could be freely set */
cnt = clk_core_rate_nuke_protect(core);
if (cnt < 0)
return cnt;
clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
req.rate = req_rate;
ret = clk_core_round_rate_nolock(core, &req);
/* restore the protection */
clk_core_rate_restore_protect(core, cnt);
return ret ? 0 : req.rate;
}
static int clk_core_set_rate_nolock(struct clk_core *core,
unsigned long req_rate)
{
struct clk_core *top, *fail_clk;
unsigned long rate = req_rate;
unsigned long rate;
int ret = 0;
if (!core)
return 0;
rate = clk_core_req_round_rate_nolock(core, req_rate);
/* bail early if nothing to do */
if (rate == clk_core_get_rate_nolock(core))
return 0;
/* fail on a direct rate set of a protected provider */
if (clk_core_rate_is_protected(core))
return -EBUSY;
if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
return -EBUSY;
/* calculate new rates and get the topmost changed clock */
top = clk_calc_new_rates(core, rate);
top = clk_calc_new_rates(core, req_rate);
if (!top)
return -EINVAL;
@ -1718,14 +1933,67 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
ret = clk_core_set_rate_nolock(clk->core, rate);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate);
/**
* clk_set_rate_exclusive - specify a new rate get exclusive control
* @clk: the clk whose rate is being changed
* @rate: the new rate for clk
*
* This is a combination of clk_set_rate() and clk_rate_exclusive_get()
* within a critical section
*
* This can be used initially to ensure that at least 1 consumer is
* statisfied when several consumers are competing for exclusivity over the
* same clock provider.
*
* The exclusivity is not applied if setting the rate failed.
*
* Calls to clk_rate_exclusive_get() should be balanced with calls to
* clk_rate_exclusive_put().
*
* Returns 0 on success, -EERROR otherwise.
*/
int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
{
int ret;
if (!clk)
return 0;
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
/*
* The temporary protection removal is not here, on purpose
* This function is meant to be used instead of clk_rate_protect,
* so before the consumer code path protect the clock provider
*/
ret = clk_core_set_rate_nolock(clk->core, rate);
if (!ret) {
clk_core_rate_protect(clk->core);
clk->exclusive_count++;
}
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
/**
* clk_set_rate_range - set a rate range for a clock source
* @clk: clock source
@ -1737,6 +2005,7 @@ EXPORT_SYMBOL_GPL(clk_set_rate);
int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
{
int ret = 0;
unsigned long old_min, old_max, rate;
if (!clk)
return 0;
@ -1750,12 +2019,46 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
clk_prepare_lock();
if (min != clk->min_rate || max != clk->max_rate) {
clk->min_rate = min;
clk->max_rate = max;
ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
/* Save the current values in case we need to rollback the change */
old_min = clk->min_rate;
old_max = clk->max_rate;
clk->min_rate = min;
clk->max_rate = max;
rate = clk_core_get_rate_nolock(clk->core);
if (rate < min || rate > max) {
/*
* FIXME:
* We are in bit of trouble here, current rate is outside the
* the requested range. We are going try to request appropriate
* range boundary but there is a catch. It may fail for the
* usual reason (clock broken, clock protected, etc) but also
* because:
* - round_rate() was not favorable and fell on the wrong
* side of the boundary
* - the determine_rate() callback does not really check for
* this corner case when determining the rate
*/
if (rate < min)
rate = min;
else
rate = max;
ret = clk_core_set_rate_nolock(clk->core, rate);
if (ret) {
/* rollback the changes */
clk->min_rate = old_min;
clk->max_rate = old_max;
}
}
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
return ret;
@ -1876,32 +2179,31 @@ bool clk_has_parent(struct clk *clk, struct clk *parent)
}
EXPORT_SYMBOL_GPL(clk_has_parent);
static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
static int clk_core_set_parent_nolock(struct clk_core *core,
struct clk_core *parent)
{
int ret = 0;
int p_index = 0;
unsigned long p_rate = 0;
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
if (core->parent == parent)
goto out;
return 0;
/* verify ops for for multi-parent clks */
if ((core->num_parents > 1) && (!core->ops->set_parent)) {
ret = -ENOSYS;
goto out;
}
if (core->num_parents > 1 && !core->ops->set_parent)
return -EPERM;
/* check that we are allowed to re-parent if the clock is in use */
if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
ret = -EBUSY;
goto out;
}
if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
return -EBUSY;
if (clk_core_rate_is_protected(core))
return -EBUSY;
/* try finding the new parent index */
if (parent) {
@ -1909,15 +2211,14 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
if (p_index < 0) {
pr_debug("%s: clk %s can not be parent of clk %s\n",
__func__, parent->name, core->name);
ret = p_index;
goto out;
return p_index;
}
p_rate = parent->rate;
}
ret = clk_pm_runtime_get(core);
if (ret)
goto out;
return ret;
/* propagate PRE_RATE_CHANGE notifications */
ret = __clk_speculate_rates(core, p_rate);
@ -1939,8 +2240,6 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
runtime_put:
clk_pm_runtime_put(core);
out:
clk_prepare_unlock();
return ret;
}
@ -1964,13 +2263,50 @@ out:
*/
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret;
if (!clk)
return 0;
return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
clk_prepare_lock();
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
ret = clk_core_set_parent_nolock(clk->core,
parent ? parent->core : NULL);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_parent);
static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
{
int ret = -EINVAL;
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
if (clk_core_rate_is_protected(core))
return -EBUSY;
trace_clk_set_phase(core, degrees);
if (core->ops->set_phase)
ret = core->ops->set_phase(core->hw, degrees);
trace_clk_set_phase_complete(core, degrees);
return ret;
}
/**
* clk_set_phase - adjust the phase shift of a clock signal
* @clk: clock signal source
@ -1993,7 +2329,7 @@ EXPORT_SYMBOL_GPL(clk_set_parent);
*/
int clk_set_phase(struct clk *clk, int degrees)
{
int ret = -EINVAL;
int ret;
if (!clk)
return 0;
@ -2005,15 +2341,13 @@ int clk_set_phase(struct clk *clk, int degrees)
clk_prepare_lock();
trace_clk_set_phase(clk->core, degrees);
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
if (clk->core->ops->set_phase)
ret = clk->core->ops->set_phase(clk->core->hw, degrees);
ret = clk_core_set_phase_nolock(clk->core, degrees);
trace_clk_set_phase_complete(clk->core, degrees);
if (!ret)
clk->core->phase = degrees;
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
@ -2101,11 +2435,12 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
if (!c)
return;
seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, clk_core_get_rate(c),
clk_core_get_accuracy(c), clk_core_get_phase(c));
c->enable_count, c->prepare_count, c->protect_count,
clk_core_get_rate(c), clk_core_get_accuracy(c),
clk_core_get_phase(c));
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@ -2127,7 +2462,8 @@ static int clk_summary_show(struct seq_file *s, void *data)
struct clk_core *c;
struct hlist_head **lists = (struct hlist_head **)s->private;
seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
seq_puts(s, " enable prepare protect \n");
seq_puts(s, " clock count count count rate accuracy phase\n");
seq_puts(s, "----------------------------------------------------------------------------------------\n");
clk_prepare_lock();
@ -2163,6 +2499,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
seq_printf(s, "\"%s\": { ", c->name);
seq_printf(s, "\"enable_count\": %d,", c->enable_count);
seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
seq_printf(s, "\"protect_count\": %d,", c->protect_count);
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
@ -2293,6 +2630,11 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
if (!d)
goto err_out;
d = debugfs_create_u32("clk_protect_count", S_IRUGO, core->dentry,
(u32 *)&core->protect_count);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
(u32 *)&core->notifier_count);
if (!d)
@ -2683,7 +3025,13 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
ret = -ENOMEM;
goto fail_name;
}
if (WARN_ON(!hw->init->ops)) {
ret = -EINVAL;
goto fail_ops;
}
core->ops = hw->init->ops;
if (dev && pm_runtime_enabled(dev))
core->dev = dev;
if (dev && dev->driver)
@ -2745,6 +3093,7 @@ fail_parent_names_copy:
kfree_const(core->parent_names[i]);
kfree(core->parent_names);
fail_parent_names:
fail_ops:
kfree_const(core->name);
fail_name:
kfree(core);
@ -2856,7 +3205,7 @@ void clk_unregister(struct clk *clk)
/* Reparent all children to the orphan list. */
hlist_for_each_entry_safe(child, t, &clk->core->children,
child_node)
clk_core_set_parent(child, NULL);
clk_core_set_parent_nolock(child, NULL);
}
hlist_del_init(&clk->core->child_node);
@ -2864,6 +3213,11 @@ void clk_unregister(struct clk *clk)
if (clk->core->prepare_count)
pr_warn("%s: unregistering prepared clock: %s\n",
__func__, clk->core->name);
if (clk->core->protect_count)
pr_warn("%s: unregistering protected clock: %s\n",
__func__, clk->core->name);
kref_put(&clk->core->ref, __clk_release);
unlock:
clk_prepare_unlock();
@ -3022,6 +3376,18 @@ void __clk_put(struct clk *clk)
clk_prepare_lock();
/*
* Before calling clk_put, all calls to clk_rate_exclusive_get() from a
* given user should be balanced with calls to clk_rate_exclusive_put()
* and by that same consumer
*/
if (WARN_ON(clk->exclusive_count)) {
/* We voiced our concern, let's sanitize the situation */
clk->core->protect_count -= (clk->exclusive_count - 1);
clk_core_rate_unprotect(clk->core);
clk->exclusive_count = 0;
}
hlist_del(&clk->clks_node);
if (clk->min_rate > clk->core->req_rate ||
clk->max_rate < clk->core->req_rate)

Просмотреть файл

@ -20,7 +20,7 @@
#include "clk-alpha-pll.h"
#include "common.h"
#define PLL_MODE 0x00
#define PLL_MODE(p) ((p)->offset + 0x0)
# define PLL_OUTCTRL BIT(0)
# define PLL_BYPASSNL BIT(1)
# define PLL_RESET_N BIT(2)
@ -32,35 +32,87 @@
# define PLL_VOTE_FSM_ENA BIT(20)
# define PLL_FSM_ENA BIT(20)
# define PLL_VOTE_FSM_RESET BIT(21)
# define PLL_UPDATE BIT(22)
# define PLL_UPDATE_BYPASS BIT(23)
# define PLL_OFFLINE_ACK BIT(28)
# define ALPHA_PLL_ACK_LATCH BIT(29)
# define PLL_ACTIVE_FLAG BIT(30)
# define PLL_LOCK_DET BIT(31)
#define PLL_L_VAL 0x04
#define PLL_ALPHA_VAL 0x08
#define PLL_ALPHA_VAL_U 0x0c
#define PLL_L_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_L_VAL])
#define PLL_ALPHA_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL])
#define PLL_ALPHA_VAL_U(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL_U])
#define PLL_USER_CTL 0x10
#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
# define PLL_POST_DIV_SHIFT 8
# define PLL_POST_DIV_MASK 0xf
# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0)
# define PLL_ALPHA_EN BIT(24)
# define PLL_ALPHA_MODE BIT(25)
# define PLL_VCO_SHIFT 20
# define PLL_VCO_MASK 0x3
#define PLL_USER_CTL_U 0x14
#define PLL_USER_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL_U])
#define PLL_CONFIG_CTL 0x18
#define PLL_CONFIG_CTL_U 0x20
#define PLL_TEST_CTL 0x1c
#define PLL_TEST_CTL_U 0x20
#define PLL_STATUS 0x24
#define PLL_CONFIG_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL])
#define PLL_CONFIG_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U])
#define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[CLK_ALPHA_PLL_TYPE_DEFAULT] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
[PLL_OFF_USER_CTL] = 0x10,
[PLL_OFF_USER_CTL_U] = 0x14,
[PLL_OFF_CONFIG_CTL] = 0x18,
[PLL_OFF_TEST_CTL] = 0x1c,
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
[CLK_ALPHA_PLL_TYPE_HUAYRA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x10,
[PLL_OFF_CONFIG_CTL] = 0x14,
[PLL_OFF_CONFIG_CTL_U] = 0x18,
[PLL_OFF_TEST_CTL] = 0x1c,
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
[CLK_ALPHA_PLL_TYPE_BRAMMO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
[PLL_OFF_USER_CTL] = 0x10,
[PLL_OFF_CONFIG_CTL] = 0x18,
[PLL_OFF_TEST_CTL] = 0x1c,
[PLL_OFF_STATUS] = 0x24,
},
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
/*
* Even though 40 bits are present, use only 32 for ease of calculation.
*/
#define ALPHA_REG_BITWIDTH 40
#define ALPHA_BITWIDTH 32
#define ALPHA_16BIT_MASK 0xffff
#define ALPHA_REG_16BIT_WIDTH 16
#define ALPHA_BITWIDTH 32U
#define ALPHA_SHIFT(w) min(w, ALPHA_BITWIDTH)
#define PLL_HUAYRA_M_WIDTH 8
#define PLL_HUAYRA_M_SHIFT 8
#define PLL_HUAYRA_M_MASK 0xff
#define PLL_HUAYRA_N_SHIFT 0
#define PLL_HUAYRA_N_MASK 0xff
#define PLL_HUAYRA_ALPHA_WIDTH 16
#define pll_alpha_width(p) \
((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
ALPHA_REG_BITWIDTH : ALPHA_REG_16BIT_WIDTH)
#define pll_has_64bit_config(p) ((PLL_CONFIG_CTL_U(p) - PLL_CONFIG_CTL(p)) == 4)
#define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \
struct clk_alpha_pll, clkr)
@ -71,18 +123,17 @@
static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
const char *action)
{
u32 val, off;
u32 val;
int count;
int ret;
const char *name = clk_hw_get_name(&pll->clkr.hw);
off = pll->offset;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
for (count = 100; count > 0; count--) {
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
if (inverse && !(val & mask))
@ -109,16 +160,30 @@ static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
#define wait_for_pll_offline(pll) \
wait_for_pll(pll, PLL_OFFLINE_ACK, 0, "offline")
#define wait_for_pll_update(pll) \
wait_for_pll(pll, PLL_UPDATE, 1, "update")
#define wait_for_pll_update_ack_set(pll) \
wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 0, "update_ack_set")
#define wait_for_pll_update_ack_clear(pll) \
wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 1, "update_ack_clear")
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
u32 val, mask;
u32 off = pll->offset;
regmap_write(regmap, off + PLL_L_VAL, config->l);
regmap_write(regmap, off + PLL_ALPHA_VAL, config->alpha);
regmap_write(regmap, off + PLL_CONFIG_CTL, config->config_ctl_val);
regmap_write(regmap, off + PLL_CONFIG_CTL_U, config->config_ctl_hi_val);
regmap_write(regmap, PLL_L_VAL(pll), config->l);
regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
regmap_write(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
if (pll_has_64bit_config(pll))
regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
config->config_ctl_hi_val);
if (pll_alpha_width(pll) > 32)
regmap_write(regmap, PLL_ALPHA_VAL_U(pll), config->alpha_hi);
val = config->main_output_mask;
val |= config->aux_output_mask;
@ -127,6 +192,8 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
val |= config->pre_div_val;
val |= config->post_div_val;
val |= config->vco_val;
val |= config->alpha_en_mask;
val |= config->alpha_mode_mask;
mask = config->main_output_mask;
mask |= config->aux_output_mask;
@ -136,20 +203,19 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
mask |= config->post_div_mask;
mask |= config->vco_mask;
regmap_update_bits(regmap, off + PLL_USER_CTL, mask, val);
regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
if (pll->flags & SUPPORTS_FSM_MODE)
qcom_pll_set_fsm_mode(regmap, off + PLL_MODE, 6, 0);
qcom_pll_set_fsm_mode(regmap, PLL_MODE(pll), 6, 0);
}
static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
{
int ret;
u32 val, off;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 val;
off = pll->offset;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
@ -158,7 +224,7 @@ static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
if (pll->flags & SUPPORTS_OFFLINE_REQ)
val &= ~PLL_OFFLINE_REQ;
ret = regmap_write(pll->clkr.regmap, off + PLL_MODE, val);
ret = regmap_write(pll->clkr.regmap, PLL_MODE(pll), val);
if (ret)
return ret;
@ -171,16 +237,15 @@ static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
{
int ret;
u32 val, off;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 val;
off = pll->offset;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return;
if (pll->flags & SUPPORTS_OFFLINE_REQ) {
ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
PLL_OFFLINE_REQ, PLL_OFFLINE_REQ);
if (ret)
return;
@ -191,7 +256,7 @@ static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
}
/* Disable hwfsm */
ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
PLL_FSM_ENA, 0);
if (ret)
return;
@ -202,11 +267,10 @@ static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
static int pll_is_enabled(struct clk_hw *hw, u32 mask)
{
int ret;
u32 val, off;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 val;
off = pll->offset;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
@ -227,12 +291,10 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
{
int ret;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 val, mask, off;
off = pll->offset;
u32 val, mask;
mask = PLL_OUTCTRL | PLL_RESET_N | PLL_BYPASSNL;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
@ -248,7 +310,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
if ((val & mask) == mask)
return 0;
ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
PLL_BYPASSNL, PLL_BYPASSNL);
if (ret)
return ret;
@ -260,7 +322,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
mb();
udelay(5);
ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
PLL_RESET_N, PLL_RESET_N);
if (ret)
return ret;
@ -269,7 +331,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
if (ret)
return ret;
ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
PLL_OUTCTRL, PLL_OUTCTRL);
/* Ensure that the write above goes through before returning. */
@ -281,11 +343,9 @@ static void clk_alpha_pll_disable(struct clk_hw *hw)
{
int ret;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 val, mask, off;
u32 val, mask;
off = pll->offset;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return;
@ -296,23 +356,25 @@ static void clk_alpha_pll_disable(struct clk_hw *hw)
}
mask = PLL_OUTCTRL;
regmap_update_bits(pll->clkr.regmap, off + PLL_MODE, mask, 0);
regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), mask, 0);
/* Delay of 2 output clock ticks required until output is disabled */
mb();
udelay(1);
mask = PLL_RESET_N | PLL_BYPASSNL;
regmap_update_bits(pll->clkr.regmap, off + PLL_MODE, mask, 0);
}
static unsigned long alpha_pll_calc_rate(u64 prate, u32 l, u32 a)
{
return (prate * l) + ((prate * a) >> ALPHA_BITWIDTH);
regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), mask, 0);
}
static unsigned long
alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a)
alpha_pll_calc_rate(u64 prate, u32 l, u32 a, u32 alpha_width)
{
return (prate * l) + ((prate * a) >> ALPHA_SHIFT(alpha_width));
}
static unsigned long
alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a,
u32 alpha_width)
{
u64 remainder;
u64 quotient;
@ -327,14 +389,15 @@ alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a)
}
/* Upper ALPHA_BITWIDTH bits of Alpha */
quotient = remainder << ALPHA_BITWIDTH;
quotient = remainder << ALPHA_SHIFT(alpha_width);
remainder = do_div(quotient, prate);
if (remainder)
quotient++;
*a = quotient;
return alpha_pll_calc_rate(prate, *l, *a);
return alpha_pll_calc_rate(prate, *l, *a, alpha_width);
}
static const struct pll_vco *
@ -356,71 +419,138 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
u32 l, low, high, ctl;
u64 a = 0, prate = parent_rate;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 off = pll->offset;
u32 alpha_width = pll_alpha_width(pll);
regmap_read(pll->clkr.regmap, off + PLL_L_VAL, &l);
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
regmap_read(pll->clkr.regmap, off + PLL_USER_CTL, &ctl);
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
if (ctl & PLL_ALPHA_EN) {
regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL, &low);
if (pll->flags & SUPPORTS_16BIT_ALPHA) {
a = low & ALPHA_16BIT_MASK;
} else {
regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL_U,
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low);
if (alpha_width > 32) {
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
&high);
a = (u64)high << 32 | low;
a >>= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
} else {
a = low & GENMASK(alpha_width - 1, 0);
}
if (alpha_width > ALPHA_BITWIDTH)
a >>= alpha_width - ALPHA_BITWIDTH;
}
return alpha_pll_calc_rate(prate, l, a);
return alpha_pll_calc_rate(prate, l, a, alpha_width);
}
static int clk_alpha_pll_update_latch(struct clk_alpha_pll *pll,
int (*is_enabled)(struct clk_hw *))
{
int ret;
u32 mode;
if (!is_enabled(&pll->clkr.hw) ||
!(pll->flags & SUPPORTS_DYNAMIC_UPDATE))
return 0;
regmap_read(pll->clkr.regmap, PLL_MODE(pll), &mode);
/* Latch the input to the PLL */
regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE,
PLL_UPDATE);
/* Wait for 2 reference cycle before checking ACK bit */
udelay(1);
/*
* PLL will latch the new L, Alpha and freq control word.
* PLL will respond by raising PLL_ACK_LATCH output when new programming
* has been latched in and PLL is being updated. When
* UPDATE_LOGIC_BYPASS bit is not set, PLL_UPDATE will be cleared
* automatically by hardware when PLL_ACK_LATCH is asserted by PLL.
*/
if (mode & PLL_UPDATE_BYPASS) {
ret = wait_for_pll_update_ack_set(pll);
if (ret)
return ret;
regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE, 0);
} else {
ret = wait_for_pll_update(pll);
if (ret)
return ret;
}
ret = wait_for_pll_update_ack_clear(pll);
if (ret)
return ret;
/* Wait for PLL output to stabilize */
udelay(10);
return 0;
}
static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate,
int (*is_enabled)(struct clk_hw *))
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
const struct pll_vco *vco;
u32 l, alpha_width = pll_alpha_width(pll);
u64 a;
rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
vco = alpha_pll_find_vco(pll, rate);
if (pll->vco_table && !vco) {
pr_err("alpha pll not in a valid vco range\n");
return -EINVAL;
}
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
if (alpha_width > ALPHA_BITWIDTH)
a <<= alpha_width - ALPHA_BITWIDTH;
if (alpha_width > 32)
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), a >> 32);
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
if (vco) {
regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
PLL_VCO_MASK << PLL_VCO_SHIFT,
vco->val << PLL_VCO_SHIFT);
}
regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
PLL_ALPHA_EN, PLL_ALPHA_EN);
return clk_alpha_pll_update_latch(pll, is_enabled);
}
static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
const struct pll_vco *vco;
u32 l, off = pll->offset;
u64 a;
return __clk_alpha_pll_set_rate(hw, rate, prate,
clk_alpha_pll_is_enabled);
}
rate = alpha_pll_round_rate(rate, prate, &l, &a);
vco = alpha_pll_find_vco(pll, rate);
if (!vco) {
pr_err("alpha pll not in a valid vco range\n");
return -EINVAL;
}
regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l);
if (pll->flags & SUPPORTS_16BIT_ALPHA) {
regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL,
a & ALPHA_16BIT_MASK);
} else {
a <<= (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, a >> 32);
}
regmap_update_bits(pll->clkr.regmap, off + PLL_USER_CTL,
PLL_VCO_MASK << PLL_VCO_SHIFT,
vco->val << PLL_VCO_SHIFT);
regmap_update_bits(pll->clkr.regmap, off + PLL_USER_CTL, PLL_ALPHA_EN,
PLL_ALPHA_EN);
return 0;
static int clk_alpha_pll_hwfsm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
return __clk_alpha_pll_set_rate(hw, rate, prate,
clk_alpha_pll_hwfsm_is_enabled);
}
static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l;
u32 l, alpha_width = pll_alpha_width(pll);
u64 a;
unsigned long min_freq, max_freq;
rate = alpha_pll_round_rate(rate, *prate, &l, &a);
if (alpha_pll_find_vco(pll, rate))
rate = alpha_pll_round_rate(rate, *prate, &l, &a, alpha_width);
if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
return rate;
min_freq = pll->vco_table[0].min_freq;
@ -429,6 +559,158 @@ static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
return clamp(rate, min_freq, max_freq);
}
static unsigned long
alpha_huayra_pll_calc_rate(u64 prate, u32 l, u32 a)
{
/*
* a contains 16 bit alpha_val in twos compliment number in the range
* of [-0.5, 0.5).
*/
if (a >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
l -= 1;
return (prate * l) + (prate * a >> PLL_HUAYRA_ALPHA_WIDTH);
}
static unsigned long
alpha_huayra_pll_round_rate(unsigned long rate, unsigned long prate,
u32 *l, u32 *a)
{
u64 remainder;
u64 quotient;
quotient = rate;
remainder = do_div(quotient, prate);
*l = quotient;
if (!remainder) {
*a = 0;
return rate;
}
quotient = remainder << PLL_HUAYRA_ALPHA_WIDTH;
remainder = do_div(quotient, prate);
if (remainder)
quotient++;
/*
* alpha_val should be in twos compliment number in the range
* of [-0.5, 0.5) so if quotient >= 0.5 then increment the l value
* since alpha value will be subtracted in this case.
*/
if (quotient >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
*l += 1;
*a = quotient;
return alpha_huayra_pll_calc_rate(prate, *l, *a);
}
static unsigned long
alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
u64 rate = parent_rate, tmp;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha = 0, ctl, alpha_m, alpha_n;
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
if (ctl & PLL_ALPHA_EN) {
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
/*
* Depending upon alpha_mode, it can be treated as M/N value or
* as a twos compliment number. When alpha_mode=1,
* pll_alpha_val<15:8>=M and pll_apla_val<7:0>=N
*
* Fout=FIN*(L+(M/N))
*
* M is a signed number (-128 to 127) and N is unsigned
* (0 to 255). M/N has to be within +/-0.5.
*
* When alpha_mode=0, it is a twos compliment number in the
* range [-0.5, 0.5).
*
* Fout=FIN*(L+(alpha_val)/2^16)
*
* where alpha_val is twos compliment number.
*/
if (!(ctl & PLL_ALPHA_MODE))
return alpha_huayra_pll_calc_rate(rate, l, alpha);
alpha_m = alpha >> PLL_HUAYRA_M_SHIFT & PLL_HUAYRA_M_MASK;
alpha_n = alpha >> PLL_HUAYRA_N_SHIFT & PLL_HUAYRA_N_MASK;
rate *= l;
tmp = parent_rate;
if (alpha_m >= BIT(PLL_HUAYRA_M_WIDTH - 1)) {
alpha_m = BIT(PLL_HUAYRA_M_WIDTH) - alpha_m;
tmp *= alpha_m;
do_div(tmp, alpha_n);
rate -= tmp;
} else {
tmp *= alpha_m;
do_div(tmp, alpha_n);
rate += tmp;
}
return rate;
}
return alpha_huayra_pll_calc_rate(rate, l, alpha);
}
static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, a, ctl, cur_alpha = 0;
rate = alpha_huayra_pll_round_rate(rate, prate, &l, &a);
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
if (ctl & PLL_ALPHA_EN)
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &cur_alpha);
/*
* Huayra PLL supports PLL dynamic programming. User can change L_VAL,
* without having to go through the power on sequence.
*/
if (clk_alpha_pll_is_enabled(hw)) {
if (cur_alpha != a) {
pr_err("clock needs to be gated %s\n",
clk_hw_get_name(hw));
return -EBUSY;
}
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
/* Ensure that the write above goes to detect L val change. */
mb();
return wait_for_pll_enable_lock(pll);
}
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
if (a == 0)
regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
PLL_ALPHA_EN, 0x0);
else
regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
PLL_ALPHA_EN | PLL_ALPHA_MODE, PLL_ALPHA_EN);
return 0;
}
static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
u32 l, a;
return alpha_huayra_pll_round_rate(rate, *prate, &l, &a);
}
const struct clk_ops clk_alpha_pll_ops = {
.enable = clk_alpha_pll_enable,
.disable = clk_alpha_pll_disable,
@ -439,13 +721,23 @@ const struct clk_ops clk_alpha_pll_ops = {
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
const struct clk_ops clk_alpha_pll_huayra_ops = {
.enable = clk_alpha_pll_enable,
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_huayra_recalc_rate,
.round_rate = alpha_pll_huayra_round_rate,
.set_rate = alpha_pll_huayra_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_huayra_ops);
const struct clk_ops clk_alpha_pll_hwfsm_ops = {
.enable = clk_alpha_pll_hwfsm_enable,
.disable = clk_alpha_pll_hwfsm_disable,
.is_enabled = clk_alpha_pll_hwfsm_is_enabled,
.recalc_rate = clk_alpha_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
.set_rate = clk_alpha_pll_set_rate,
.set_rate = clk_alpha_pll_hwfsm_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
@ -455,10 +747,10 @@ clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
u32 ctl;
regmap_read(pll->clkr.regmap, pll->offset + PLL_USER_CTL, &ctl);
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
ctl >>= PLL_POST_DIV_SHIFT;
ctl &= PLL_POST_DIV_MASK;
ctl &= PLL_POST_DIV_MASK(pll);
return parent_rate >> fls(ctl);
}
@ -472,16 +764,48 @@ static const struct clk_div_table clk_alpha_div_table[] = {
{ }
};
static const struct clk_div_table clk_alpha_2bit_div_table[] = {
{ 0x0, 1 },
{ 0x1, 2 },
{ 0x3, 4 },
{ }
};
static long
clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
const struct clk_div_table *table;
return divider_round_rate(hw, rate, prate, clk_alpha_div_table,
if (pll->width == 2)
table = clk_alpha_2bit_div_table;
else
table = clk_alpha_div_table;
return divider_round_rate(hw, rate, prate, table,
pll->width, CLK_DIVIDER_POWER_OF_TWO);
}
static long
clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
u32 ctl, div;
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
ctl >>= PLL_POST_DIV_SHIFT;
ctl &= BIT(pll->width) - 1;
div = 1 << fls(ctl);
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
*prate = clk_hw_round_rate(clk_hw_get_parent(hw), div * rate);
return DIV_ROUND_UP_ULL((u64)*prate, div);
}
static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@ -491,8 +815,8 @@ static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
/* 16 -> 0xf, 8 -> 0x7, 4 -> 0x3, 2 -> 0x1, 1 -> 0x0 */
div = DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1;
return regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_USER_CTL,
PLL_POST_DIV_MASK << PLL_POST_DIV_SHIFT,
return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
div << PLL_POST_DIV_SHIFT);
}
@ -502,3 +826,9 @@ const struct clk_ops clk_alpha_pll_postdiv_ops = {
.set_rate = clk_alpha_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
const struct clk_ops clk_alpha_pll_postdiv_ro_ops = {
.round_rate = clk_alpha_pll_postdiv_round_ro_rate,
.recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ro_ops);

Просмотреть файл

@ -17,6 +17,30 @@
#include <linux/clk-provider.h>
#include "clk-regmap.h"
/* Alpha PLL types */
enum {
CLK_ALPHA_PLL_TYPE_DEFAULT,
CLK_ALPHA_PLL_TYPE_HUAYRA,
CLK_ALPHA_PLL_TYPE_BRAMMO,
CLK_ALPHA_PLL_TYPE_MAX,
};
enum {
PLL_OFF_L_VAL,
PLL_OFF_ALPHA_VAL,
PLL_OFF_ALPHA_VAL_U,
PLL_OFF_USER_CTL,
PLL_OFF_USER_CTL_U,
PLL_OFF_CONFIG_CTL,
PLL_OFF_CONFIG_CTL_U,
PLL_OFF_TEST_CTL,
PLL_OFF_TEST_CTL_U,
PLL_OFF_STATUS,
PLL_OFF_MAX_REGS
};
extern const u8 clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_MAX][PLL_OFF_MAX_REGS];
struct pll_vco {
unsigned long min_freq;
unsigned long max_freq;
@ -27,16 +51,18 @@ struct pll_vco {
* struct clk_alpha_pll - phase locked loop (PLL)
* @offset: base address of registers
* @vco_table: array of VCO settings
* @regs: alpha pll register map (see @clk_alpha_pll_regs)
* @clkr: regmap clock handle
*/
struct clk_alpha_pll {
u32 offset;
const u8 *regs;
const struct pll_vco *vco_table;
size_t num_vco;
#define SUPPORTS_OFFLINE_REQ BIT(0)
#define SUPPORTS_16BIT_ALPHA BIT(1)
#define SUPPORTS_FSM_MODE BIT(2)
#define SUPPORTS_DYNAMIC_UPDATE BIT(3)
u8 flags;
struct clk_regmap clkr;
@ -45,12 +71,14 @@ struct clk_alpha_pll {
/**
* struct clk_alpha_pll_postdiv - phase locked loop (PLL) post-divider
* @offset: base address of registers
* @regs: alpha pll register map (see @clk_alpha_pll_regs)
* @width: width of post-divider
* @clkr: regmap clock handle
*/
struct clk_alpha_pll_postdiv {
u32 offset;
u8 width;
const u8 *regs;
struct clk_regmap clkr;
};
@ -58,12 +86,15 @@ struct clk_alpha_pll_postdiv {
struct alpha_pll_config {
u32 l;
u32 alpha;
u32 alpha_hi;
u32 config_ctl_val;
u32 config_ctl_hi_val;
u32 main_output_mask;
u32 aux_output_mask;
u32 aux2_output_mask;
u32 early_output_mask;
u32 alpha_en_mask;
u32 alpha_mode_mask;
u32 pre_div_val;
u32 pre_div_mask;
u32 post_div_val;
@ -75,6 +106,8 @@ struct alpha_pll_config {
extern const struct clk_ops clk_alpha_pll_ops;
extern const struct clk_ops clk_alpha_pll_hwfsm_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_ops;
extern const struct clk_ops clk_alpha_pll_huayra_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_ro_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);

Просмотреть файл

@ -52,6 +52,7 @@ static const struct parent_map gcc_xo_gpll0_gpll0_out_main_div2_map[] = {
static struct clk_alpha_pll gpll0_main = {
.offset = 0x21000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
.enable_reg = 0x0b000,
.enable_mask = BIT(0),
@ -82,6 +83,7 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
static struct clk_alpha_pll_postdiv gpll0 = {
.offset = 0x21000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0",
.parent_names = (const char *[]){

Просмотреть файл

@ -73,6 +73,7 @@ static struct clk_fixed_factor xo = {
static struct clk_alpha_pll gpll0_early = {
.offset = 0x00000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
.enable_reg = 0x1480,
.enable_mask = BIT(0),
@ -88,6 +89,7 @@ static struct clk_alpha_pll gpll0_early = {
static struct clk_alpha_pll_postdiv gpll0 = {
.offset = 0x00000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data)
{
.name = "gpll0",
@ -99,6 +101,7 @@ static struct clk_alpha_pll_postdiv gpll0 = {
static struct clk_alpha_pll gpll4_early = {
.offset = 0x1dc0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
.enable_reg = 0x1480,
.enable_mask = BIT(4),
@ -114,6 +117,7 @@ static struct clk_alpha_pll gpll4_early = {
static struct clk_alpha_pll_postdiv gpll4 = {
.offset = 0x1dc0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data)
{
.name = "gpll4",

Просмотреть файл

@ -227,6 +227,7 @@ static struct clk_fixed_factor xo = {
static struct clk_alpha_pll gpll0_early = {
.offset = 0x00000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
.enable_reg = 0x52000,
.enable_mask = BIT(0),
@ -252,6 +253,7 @@ static struct clk_fixed_factor gpll0_early_div = {
static struct clk_alpha_pll_postdiv gpll0 = {
.offset = 0x00000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0",
.parent_names = (const char *[]){ "gpll0_early" },
@ -262,6 +264,7 @@ static struct clk_alpha_pll_postdiv gpll0 = {
static struct clk_alpha_pll gpll4_early = {
.offset = 0x77000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
.enable_reg = 0x52000,
.enable_mask = BIT(4),
@ -276,6 +279,7 @@ static struct clk_alpha_pll gpll4_early = {
static struct clk_alpha_pll_postdiv gpll4 = {
.offset = 0x77000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4",
.parent_names = (const char *[]){ "gpll4_early" },

Просмотреть файл

@ -267,6 +267,7 @@ static struct pll_vco mmpll_t_vco[] = {
static struct clk_alpha_pll mmpll0_early = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_p_vco,
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr = {
@ -283,6 +284,7 @@ static struct clk_alpha_pll mmpll0_early = {
static struct clk_alpha_pll_postdiv mmpll0 = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll0",
@ -295,6 +297,7 @@ static struct clk_alpha_pll_postdiv mmpll0 = {
static struct clk_alpha_pll mmpll1_early = {
.offset = 0x30,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_p_vco,
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr = {
@ -311,6 +314,7 @@ static struct clk_alpha_pll mmpll1_early = {
static struct clk_alpha_pll_postdiv mmpll1 = {
.offset = 0x30,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll1",
@ -323,6 +327,7 @@ static struct clk_alpha_pll_postdiv mmpll1 = {
static struct clk_alpha_pll mmpll2_early = {
.offset = 0x4100,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_gfx_vco,
.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
.clkr.hw.init = &(struct clk_init_data){
@ -335,6 +340,7 @@ static struct clk_alpha_pll mmpll2_early = {
static struct clk_alpha_pll_postdiv mmpll2 = {
.offset = 0x4100,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll2",
@ -347,6 +353,7 @@ static struct clk_alpha_pll_postdiv mmpll2 = {
static struct clk_alpha_pll mmpll3_early = {
.offset = 0x60,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_p_vco,
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr.hw.init = &(struct clk_init_data){
@ -359,6 +366,7 @@ static struct clk_alpha_pll mmpll3_early = {
static struct clk_alpha_pll_postdiv mmpll3 = {
.offset = 0x60,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll3",
@ -371,6 +379,7 @@ static struct clk_alpha_pll_postdiv mmpll3 = {
static struct clk_alpha_pll mmpll4_early = {
.offset = 0x90,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_t_vco,
.num_vco = ARRAY_SIZE(mmpll_t_vco),
.clkr.hw.init = &(struct clk_init_data){
@ -383,6 +392,7 @@ static struct clk_alpha_pll mmpll4_early = {
static struct clk_alpha_pll_postdiv mmpll4 = {
.offset = 0x90,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 2,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll4",
@ -395,6 +405,7 @@ static struct clk_alpha_pll_postdiv mmpll4 = {
static struct clk_alpha_pll mmpll5_early = {
.offset = 0xc0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_p_vco,
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr.hw.init = &(struct clk_init_data){
@ -407,6 +418,7 @@ static struct clk_alpha_pll mmpll5_early = {
static struct clk_alpha_pll_postdiv mmpll5 = {
.offset = 0xc0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll5",
@ -419,6 +431,7 @@ static struct clk_alpha_pll_postdiv mmpll5 = {
static struct clk_alpha_pll mmpll8_early = {
.offset = 0x4130,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_gfx_vco,
.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
.clkr.hw.init = &(struct clk_init_data){
@ -431,6 +444,7 @@ static struct clk_alpha_pll mmpll8_early = {
static struct clk_alpha_pll_postdiv mmpll8 = {
.offset = 0x4130,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll8",
@ -443,6 +457,7 @@ static struct clk_alpha_pll_postdiv mmpll8 = {
static struct clk_alpha_pll mmpll9_early = {
.offset = 0x4200,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_t_vco,
.num_vco = ARRAY_SIZE(mmpll_t_vco),
.clkr.hw.init = &(struct clk_init_data){
@ -455,6 +470,7 @@ static struct clk_alpha_pll mmpll9_early = {
static struct clk_alpha_pll_postdiv mmpll9 = {
.offset = 0x4200,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 2,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll9",

Просмотреть файл

@ -19,10 +19,6 @@ obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \
clk-dra7-atl.o dpll3xxx.o dpll44xx.o
obj-$(CONFIG_SOC_AM43XX) += $(clk-common) dpll3xxx.o clk-43xx.o
ifdef CONFIG_ATAGS
obj-$(CONFIG_ARCH_OMAP3) += clk-3xxx-legacy.o
endif
endif # CONFIG_ARCH_OMAP2PLUS
obj-$(CONFIG_COMMON_CLK_TI_ADPLL) += adpll.o

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -336,141 +336,6 @@ void ti_dt_clk_init_retry_clks(void)
}
}
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
void __init ti_clk_patch_legacy_clks(struct ti_clk **patch)
{
while (*patch) {
memcpy((*patch)->patch, *patch, sizeof(**patch));
patch++;
}
}
struct clk __init *ti_clk_register_clk(struct ti_clk *setup)
{
struct clk *clk;
struct ti_clk_fixed *fixed;
struct ti_clk_fixed_factor *fixed_factor;
struct clk_hw *clk_hw;
int ret;
if (setup->clk)
return setup->clk;
switch (setup->type) {
case TI_CLK_FIXED:
fixed = setup->data;
clk = clk_register_fixed_rate(NULL, setup->name, NULL, 0,
fixed->frequency);
if (!IS_ERR(clk)) {
ret = ti_clk_add_alias(NULL, clk, setup->name);
if (ret) {
clk_unregister(clk);
clk = ERR_PTR(ret);
}
}
break;
case TI_CLK_MUX:
clk = ti_clk_register_mux(setup);
break;
case TI_CLK_DIVIDER:
clk = ti_clk_register_divider(setup);
break;
case TI_CLK_COMPOSITE:
clk = ti_clk_register_composite(setup);
break;
case TI_CLK_FIXED_FACTOR:
fixed_factor = setup->data;
clk = clk_register_fixed_factor(NULL, setup->name,
fixed_factor->parent,
0, fixed_factor->mult,
fixed_factor->div);
if (!IS_ERR(clk)) {
ret = ti_clk_add_alias(NULL, clk, setup->name);
if (ret) {
clk_unregister(clk);
clk = ERR_PTR(ret);
}
}
break;
case TI_CLK_GATE:
clk = ti_clk_register_gate(setup);
break;
case TI_CLK_DPLL:
clk = ti_clk_register_dpll(setup);
break;
default:
pr_err("bad type for %s!\n", setup->name);
clk = ERR_PTR(-EINVAL);
}
if (!IS_ERR(clk)) {
setup->clk = clk;
if (setup->clkdm_name) {
clk_hw = __clk_get_hw(clk);
if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) {
pr_warn("can't setup clkdm for basic clk %s\n",
setup->name);
} else {
to_clk_hw_omap(clk_hw)->clkdm_name =
setup->clkdm_name;
omap2_init_clk_clkdm(clk_hw);
}
}
}
return clk;
}
int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks)
{
struct clk *clk;
bool retry;
struct ti_clk_alias *retry_clk;
struct ti_clk_alias *tmp;
while (clks->clk) {
clk = ti_clk_register_clk(clks->clk);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) == -EAGAIN) {
list_add(&clks->link, &retry_list);
} else {
pr_err("register for %s failed: %ld\n",
clks->clk->name, PTR_ERR(clk));
return PTR_ERR(clk);
}
}
clks++;
}
retry = true;
while (!list_empty(&retry_list) && retry) {
retry = false;
list_for_each_entry_safe(retry_clk, tmp, &retry_list, link) {
pr_debug("retry-init: %s\n", retry_clk->clk->name);
clk = ti_clk_register_clk(retry_clk->clk);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) == -EAGAIN) {
continue;
} else {
pr_err("register for %s failed: %ld\n",
retry_clk->clk->name,
PTR_ERR(clk));
return PTR_ERR(clk);
}
} else {
retry = true;
list_del(&retry_clk->link);
}
}
}
return 0;
}
#endif
static const struct of_device_id simple_clk_match_table[] __initconst = {
{ .compatible = "fixed-clock" },
{ .compatible = "fixed-factor-clock" },

Просмотреть файл

@ -92,17 +92,6 @@ struct ti_clk {
struct clk *clk;
};
struct ti_clk_alias {
struct ti_clk *clk;
struct clk_lookup lk;
struct list_head link;
};
struct ti_clk_fixed {
u32 frequency;
u16 flags;
};
struct ti_clk_mux {
u8 bit_shift;
int num_parents;
@ -123,13 +112,6 @@ struct ti_clk_divider {
u16 flags;
};
struct ti_clk_fixed_factor {
const char *parent;
u16 div;
u16 mult;
u16 flags;
};
struct ti_clk_gate {
const char *parent;
u8 bit_shift;
@ -138,44 +120,6 @@ struct ti_clk_gate {
u16 flags;
};
struct ti_clk_composite {
struct ti_clk_divider *divider;
struct ti_clk_mux *mux;
struct ti_clk_gate *gate;
u16 flags;
};
struct ti_clk_clkdm_gate {
const char *parent;
u16 flags;
};
struct ti_clk_dpll {
int num_parents;
u16 control_reg;
u16 idlest_reg;
u16 autoidle_reg;
u16 mult_div1_reg;
u8 module;
const char **parents;
u16 flags;
u8 modes;
u32 mult_mask;
u32 div1_mask;
u32 enable_mask;
u32 autoidle_mask;
u32 freqsel_mask;
u32 idlest_mask;
u32 dco_mask;
u32 sddiv_mask;
u16 max_multiplier;
u16 max_divider;
u8 min_divider;
u8 auto_recal_bit;
u8 recal_en_bit;
u8 recal_st_bit;
};
/* Composite clock component types */
enum {
CLK_COMPONENT_TYPE_GATE = 0,
@ -245,29 +189,17 @@ extern const struct omap_clkctrl_data dm816_clkctrl_data[];
typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
struct clk *ti_clk_register_gate(struct ti_clk *setup);
struct clk *ti_clk_register_interface(struct ti_clk *setup);
struct clk *ti_clk_register_mux(struct ti_clk *setup);
struct clk *ti_clk_register_divider(struct ti_clk *setup);
struct clk *ti_clk_register_composite(struct ti_clk *setup);
struct clk *ti_clk_register_dpll(struct ti_clk *setup);
struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
const char *con);
int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con);
void ti_clk_add_aliases(void);
struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup);
struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup);
struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup);
int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
u8 flags, u8 *width,
const struct clk_div_table **table);
void ti_clk_patch_legacy_clks(struct ti_clk **patch);
struct clk *ti_clk_register_clk(struct ti_clk *setup);
int ti_clk_register_legacy_clks(struct ti_clk_alias *clks);
int ti_clk_get_reg_addr(struct device_node *node, int index,
struct clk_omap_reg *reg);
void ti_dt_clocks_register(struct ti_dt_clk *oclks);

Просмотреть файл

@ -116,51 +116,6 @@ static inline struct clk_hw *_get_hw(struct clk_hw_omap_comp *clk, int idx)
#define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw)
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
struct clk *ti_clk_register_composite(struct ti_clk *setup)
{
struct ti_clk_composite *comp;
struct clk_hw *gate;
struct clk_hw *mux;
struct clk_hw *div;
int num_parents = 1;
const char * const *parent_names = NULL;
struct clk *clk;
int ret;
comp = setup->data;
div = ti_clk_build_component_div(comp->divider);
gate = ti_clk_build_component_gate(comp->gate);
mux = ti_clk_build_component_mux(comp->mux);
if (div)
parent_names = &comp->divider->parent;
if (gate)
parent_names = &comp->gate->parent;
if (mux) {
num_parents = comp->mux->num_parents;
parent_names = comp->mux->parents;
}
clk = clk_register_composite(NULL, setup->name,
parent_names, num_parents, mux,
&ti_clk_mux_ops, div,
&ti_composite_divider_ops, gate,
&ti_composite_gate_ops, 0);
ret = ti_clk_add_alias(NULL, clk, setup->name);
if (ret) {
clk_unregister(clk);
return ERR_PTR(ret);
}
return clk;
}
#endif
static void __init _register_composite(void *user,
struct device_node *node)
{

Просмотреть файл

@ -203,96 +203,6 @@ cleanup:
kfree(clk_hw);
}
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
void _get_reg(u8 module, u16 offset, struct clk_omap_reg *reg)
{
reg->index = module;
reg->offset = offset;
}
struct clk *ti_clk_register_dpll(struct ti_clk *setup)
{
struct clk_hw_omap *clk_hw;
struct clk_init_data init = { NULL };
struct dpll_data *dd;
struct clk *clk;
struct ti_clk_dpll *dpll;
const struct clk_ops *ops = &omap3_dpll_ck_ops;
struct clk *clk_ref;
struct clk *clk_bypass;
dpll = setup->data;
if (dpll->num_parents < 2)
return ERR_PTR(-EINVAL);
clk_ref = clk_get_sys(NULL, dpll->parents[0]);
clk_bypass = clk_get_sys(NULL, dpll->parents[1]);
if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
return ERR_PTR(-EAGAIN);
dd = kzalloc(sizeof(*dd), GFP_KERNEL);
clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
if (!dd || !clk_hw) {
clk = ERR_PTR(-ENOMEM);
goto cleanup;
}
clk_hw->dpll_data = dd;
clk_hw->ops = &clkhwops_omap3_dpll;
clk_hw->hw.init = &init;
init.name = setup->name;
init.ops = ops;
init.num_parents = dpll->num_parents;
init.parent_names = dpll->parents;
_get_reg(dpll->module, dpll->control_reg, &dd->control_reg);
_get_reg(dpll->module, dpll->idlest_reg, &dd->idlest_reg);
_get_reg(dpll->module, dpll->mult_div1_reg, &dd->mult_div1_reg);
_get_reg(dpll->module, dpll->autoidle_reg, &dd->autoidle_reg);
dd->modes = dpll->modes;
dd->div1_mask = dpll->div1_mask;
dd->idlest_mask = dpll->idlest_mask;
dd->mult_mask = dpll->mult_mask;
dd->autoidle_mask = dpll->autoidle_mask;
dd->enable_mask = dpll->enable_mask;
dd->sddiv_mask = dpll->sddiv_mask;
dd->dco_mask = dpll->dco_mask;
dd->max_divider = dpll->max_divider;
dd->min_divider = dpll->min_divider;
dd->max_multiplier = dpll->max_multiplier;
dd->auto_recal_bit = dpll->auto_recal_bit;
dd->recal_en_bit = dpll->recal_en_bit;
dd->recal_st_bit = dpll->recal_st_bit;
dd->clk_ref = __clk_get_hw(clk_ref);
dd->clk_bypass = __clk_get_hw(clk_bypass);
if (dpll->flags & CLKF_CORE)
ops = &omap3_dpll_core_ck_ops;
if (dpll->flags & CLKF_PER)
ops = &omap3_dpll_per_ck_ops;
if (dpll->flags & CLKF_J_TYPE)
dd->flags |= DPLL_J_TYPE;
clk = ti_clk_register(NULL, &clk_hw->hw, setup->name);
if (!IS_ERR(clk))
return clk;
cleanup:
kfree(dd);
kfree(clk_hw);
return clk;
}
#endif
#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
defined(CONFIG_SOC_AM43XX)

Просмотреть файл

@ -128,53 +128,6 @@ static struct clk *_register_gate(struct device *dev, const char *name,
return clk;
}
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
struct clk *ti_clk_register_gate(struct ti_clk *setup)
{
const struct clk_ops *ops = &omap_gate_clk_ops;
const struct clk_hw_omap_ops *hw_ops = NULL;
struct clk_omap_reg reg;
u32 flags = 0;
u8 clk_gate_flags = 0;
struct ti_clk_gate *gate;
gate = setup->data;
if (gate->flags & CLKF_INTERFACE)
return ti_clk_register_interface(setup);
if (gate->flags & CLKF_SET_RATE_PARENT)
flags |= CLK_SET_RATE_PARENT;
if (gate->flags & CLKF_SET_BIT_TO_DISABLE)
clk_gate_flags |= INVERT_ENABLE;
if (gate->flags & CLKF_HSDIV) {
ops = &omap_gate_clk_hsdiv_restore_ops;
hw_ops = &clkhwops_wait;
}
if (gate->flags & CLKF_DSS)
hw_ops = &clkhwops_omap3430es2_dss_usbhost_wait;
if (gate->flags & CLKF_WAIT)
hw_ops = &clkhwops_wait;
if (gate->flags & CLKF_CLKDM)
ops = &omap_gate_clkdm_clk_ops;
if (gate->flags & CLKF_AM35XX)
hw_ops = &clkhwops_am35xx_ipss_module_wait;
reg.index = gate->module;
reg.offset = gate->reg;
reg.ptr = NULL;
return _register_gate(NULL, setup->name, gate->parent, flags,
&reg, gate->bit_shift,
clk_gate_flags, ops, hw_ops);
}
struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup)
{
struct clk_hw_omap *gate;
@ -204,7 +157,6 @@ struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup)
return &gate->hw;
}
#endif
static void __init _of_ti_gate_clk_setup(struct device_node *node,
const struct clk_ops *ops,

Просмотреть файл

@ -67,38 +67,6 @@ static struct clk *_register_interface(struct device *dev, const char *name,
return clk;
}
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
struct clk *ti_clk_register_interface(struct ti_clk *setup)
{
const struct clk_hw_omap_ops *ops = &clkhwops_iclk_wait;
struct clk_omap_reg reg;
struct ti_clk_gate *gate;
gate = setup->data;
reg.index = gate->module;
reg.offset = gate->reg;
reg.ptr = NULL;
if (gate->flags & CLKF_NO_WAIT)
ops = &clkhwops_iclk;
if (gate->flags & CLKF_HSOTGUSB)
ops = &clkhwops_omap3430es2_iclk_hsotgusb_wait;
if (gate->flags & CLKF_DSS)
ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait;
if (gate->flags & CLKF_SSI)
ops = &clkhwops_omap3430es2_iclk_ssi_wait;
if (gate->flags & CLKF_AM35XX)
ops = &clkhwops_am35xx_ipss_wait;
return _register_interface(NULL, setup->name, gate->parent,
&reg, gate->bit_shift, ops);
}
#endif
static void __init _of_ti_interface_clk_setup(struct device_node *node,
const struct clk_hw_omap_ops *ops)
{

Просмотреть файл

@ -744,6 +744,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw);
unsigned long __clk_get_flags(struct clk *clk);
unsigned long clk_hw_get_flags(const struct clk_hw *hw);
bool clk_hw_is_prepared(const struct clk_hw *hw);
bool clk_hw_rate_is_protected(const struct clk_hw *hw);
bool clk_hw_is_enabled(const struct clk_hw *hw);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);

Просмотреть файл

@ -331,6 +331,38 @@ struct clk *devm_clk_get(struct device *dev, const char *id);
*/
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id);
/**
* clk_rate_exclusive_get - get exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to get exclusive control over the rate of a
* provider. It prevents any other consumer to execute, even indirectly,
* opereation which could alter the rate of the provider or cause glitches
*
* If exlusivity is claimed more than once on clock, even by the same driver,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Must not be called from within atomic context.
*
* Returns success (0) or negative errno.
*/
int clk_rate_exclusive_get(struct clk *clk);
/**
* clk_rate_exclusive_put - release exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to release the exclusivity it previously got
* from clk_rate_exclusive_get()
*
* The caller must balance the number of clk_rate_exclusive_get() and
* clk_rate_exclusive_put() calls.
*
* Must not be called from within atomic context.
*/
void clk_rate_exclusive_put(struct clk *clk);
/**
* clk_enable - inform the system when the clock source should be running.
@ -472,6 +504,23 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
*/
int clk_set_rate(struct clk *clk, unsigned long rate);
/**
* clk_set_rate_exclusive- set the clock rate and claim exclusivity over
* clock source
* @clk: clock source
* @rate: desired clock rate in Hz
*
* This helper function allows drivers to atomically set the rate of a producer
* and claim exclusivity over the rate control of the producer.
*
* It is essentially a combination of clk_set_rate() and
* clk_rate_exclusite_get(). Caller must balance this call with a call to
* clk_rate_exclusive_put()
*
* Returns success (0) or negative errno.
*/
int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
/**
* clk_has_parent - check if a clock is a possible parent for another
* @clk: clock source
@ -583,6 +632,14 @@ static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
static inline int clk_rate_exclusive_get(struct clk *clk)
{
return 0;
}
static inline void clk_rate_exclusive_put(struct clk *clk) {}
static inline int clk_enable(struct clk *clk)
{
return 0;
@ -609,6 +666,11 @@ static inline int clk_set_rate(struct clk *clk, unsigned long rate)
return 0;
}
static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
{
return 0;
}
static inline long clk_round_rate(struct clk *clk, unsigned long rate)
{
return 0;