summaryrefslogtreecommitdiff
path: root/arch/arm/mach-omap2/clockdomain.c
diff options
context:
space:
mode:
authorTero Kristo <t-kristo@ti.com>2012-09-25 20:05:32 +0400
committerTony Lindgren <tony@atomide.com>2012-10-17 20:00:31 +0400
commit64e29fd5ed42f5d0ff5b0ea9395b3abd5d43f89b (patch)
tree2d1a162662e2279f03d17de9b2e33c5d6cb8ca54 /arch/arm/mach-omap2/clockdomain.c
parent49c58e82023649924b1b0f38d715d94093f4044b (diff)
downloadlinux-64e29fd5ed42f5d0ff5b0ea9395b3abd5d43f89b.tar.xz
ARM: OMAP: clockdomain: Fix locking on _clkdm_clk_hwmod_enable / disable
Previously the code only acquired spinlock after increasing / decreasing the usecount value, which is wrong. This leaves a small window where a task switch may occur between the check of the usecount and the actual wakeup / sleep of the domain. Fixed by moving the spinlock locking before the usecount access. Left the usecount as atomic_t if someone wants an easy access to the parameter through atomic_read. Signed-off-by: Tero Kristo <t-kristo@ti.com> Acked-by: Paul Walmsley <paul@pwsan.com> Signed-off-by: Tony Lindgren <tony@atomide.com>
Diffstat (limited to 'arch/arm/mach-omap2/clockdomain.c')
-rw-r--r--arch/arm/mach-omap2/clockdomain.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index cbb879139c51..512e79a842cb 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -925,15 +925,18 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable)
return -EINVAL;
+ spin_lock_irqsave(&clkdm->lock, flags);
+
/*
* For arch's with no autodeps, clkcm_clk_enable
* should be called for every clock instance or hwmod that is
* enabled, so the clkdm can be force woken up.
*/
- if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps)
+ if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps) {
+ spin_unlock_irqrestore(&clkdm->lock, flags);
return 0;
+ }
- spin_lock_irqsave(&clkdm->lock, flags);
arch_clkdm->clkdm_clk_enable(clkdm);
pwrdm_state_switch(clkdm->pwrdm.ptr);
spin_unlock_irqrestore(&clkdm->lock, flags);
@@ -950,15 +953,19 @@ static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm)
if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
return -EINVAL;
+ spin_lock_irqsave(&clkdm->lock, flags);
+
if (atomic_read(&clkdm->usecount) == 0) {
+ spin_unlock_irqrestore(&clkdm->lock, flags);
WARN_ON(1); /* underflow */
return -ERANGE;
}
- if (atomic_dec_return(&clkdm->usecount) > 0)
+ if (atomic_dec_return(&clkdm->usecount) > 0) {
+ spin_unlock_irqrestore(&clkdm->lock, flags);
return 0;
+ }
- spin_lock_irqsave(&clkdm->lock, flags);
arch_clkdm->clkdm_clk_disable(clkdm);
pwrdm_state_switch(clkdm->pwrdm.ptr);
spin_unlock_irqrestore(&clkdm->lock, flags);