ARM: OMAP2+: clockdomain: bypass clockdomain handling when disabling unused clks

The OMAP port to the common clk framework[1] resulted in spurious WARNs
while disable unused clocks.  This is due to _clkdm_clk_hwmod_disable
catching clkdm->usecount's with a value of zero.  Even less desirable it
would not allow the clkdm_clk_disable function pointer to get called due
to an early return of -ERANGE.

This patch adds a check for such a corner case by skipping the WARN and
early return in the event that clkdm->usecount and clk->enable_usecount
are both zero.  Presumably this could only happen during the check for
unused clocks at boot-time.

[1] http://article.gmane.org/gmane.linux.ports.arm.omap/88824

Signed-off-by: Mike Turquette <mturquette@ti.com>
[paul@pwsan.com: split the hwmod and clock disable cases; modified the
 code to skip the clockdomain handling during the disable-unused-clocks phase;
 added COMMON_CLK ifdef; removed include of clk-private.h at Mike's request]
Signed-off-by: Paul Walmsley <paul@pwsan.com>
This commit is contained in:
Mike Turquette 2012-11-09 11:28:42 -07:00 коммит произвёл Paul Walmsley
Родитель b797be1d4c
Коммит d043d87cd3
1 изменённых файлов: 56 добавлений и 37 удалений

Просмотреть файл

@ -22,6 +22,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/limits.h> #include <linux/limits.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/clk-provider.h>
#include <linux/io.h> #include <linux/io.h>
@ -947,35 +948,6 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
return 0; return 0;
} }
static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm)
{
unsigned long flags;
if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
return -EINVAL;
spin_lock_irqsave(&clkdm->lock, flags);
if (atomic_read(&clkdm->usecount) == 0) {
spin_unlock_irqrestore(&clkdm->lock, flags);
WARN_ON(1); /* underflow */
return -ERANGE;
}
if (atomic_dec_return(&clkdm->usecount) > 0) {
spin_unlock_irqrestore(&clkdm->lock, flags);
return 0;
}
arch_clkdm->clkdm_clk_disable(clkdm);
pwrdm_state_switch(clkdm->pwrdm.ptr);
spin_unlock_irqrestore(&clkdm->lock, flags);
pr_debug("clockdomain: %s: disabled\n", clkdm->name);
return 0;
}
/** /**
* clkdm_clk_enable - add an enabled downstream clock to this clkdm * clkdm_clk_enable - add an enabled downstream clock to this clkdm
* @clkdm: struct clockdomain * * @clkdm: struct clockdomain *
@ -1018,15 +990,41 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
*/ */
int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk) int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
{ {
/* unsigned long flags;
* XXX Rewrite this code to maintain a list of enabled
* downstream clocks for debugging purposes?
*/
if (!clk) if (!clkdm || !clk || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
return -EINVAL; return -EINVAL;
return _clkdm_clk_hwmod_disable(clkdm); spin_lock_irqsave(&clkdm->lock, flags);
#ifdef CONFIG_COMMON_CLK
/* corner case: disabling unused clocks */
if (__clk_get_enable_count(clk) == 0)
goto ccd_exit;
#endif
if (atomic_read(&clkdm->usecount) == 0) {
spin_unlock_irqrestore(&clkdm->lock, flags);
WARN_ON(1); /* underflow */
return -ERANGE;
}
if (atomic_dec_return(&clkdm->usecount) > 0) {
spin_unlock_irqrestore(&clkdm->lock, flags);
return 0;
}
arch_clkdm->clkdm_clk_disable(clkdm);
pwrdm_state_switch(clkdm->pwrdm.ptr);
pr_debug("clockdomain: %s: disabled\n", clkdm->name);
#ifdef CONFIG_COMMON_CLK
ccd_exit:
#endif
spin_unlock_irqrestore(&clkdm->lock, flags);
return 0;
} }
/** /**
@ -1077,6 +1075,8 @@ int clkdm_hwmod_enable(struct clockdomain *clkdm, struct omap_hwmod *oh)
*/ */
int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh) int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh)
{ {
unsigned long flags;
/* The clkdm attribute does not exist yet prior OMAP4 */ /* The clkdm attribute does not exist yet prior OMAP4 */
if (cpu_is_omap24xx() || cpu_is_omap34xx()) if (cpu_is_omap24xx() || cpu_is_omap34xx())
return 0; return 0;
@ -1086,9 +1086,28 @@ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh)
* downstream hwmods for debugging purposes? * downstream hwmods for debugging purposes?
*/ */
if (!oh) if (!clkdm || !oh || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
return -EINVAL; return -EINVAL;
return _clkdm_clk_hwmod_disable(clkdm); spin_lock_irqsave(&clkdm->lock, flags);
if (atomic_read(&clkdm->usecount) == 0) {
spin_unlock_irqrestore(&clkdm->lock, flags);
WARN_ON(1); /* underflow */
return -ERANGE;
}
if (atomic_dec_return(&clkdm->usecount) > 0) {
spin_unlock_irqrestore(&clkdm->lock, flags);
return 0;
}
arch_clkdm->clkdm_clk_disable(clkdm);
pwrdm_state_switch(clkdm->pwrdm.ptr);
spin_unlock_irqrestore(&clkdm->lock, flags);
pr_debug("clockdomain: %s: disabled\n", clkdm->name);
return 0;
} }