summaryrefslogtreecommitdiff
path: root/arch/arm/mach-omap2/clockdomain.c
diff options
context:
space:
mode:
authorRajendra Nayak <rnayak@ti.com>2011-07-10 15:56:55 +0400
committerPaul Walmsley <paul@pwsan.com>2011-07-10 15:56:55 +0400
commit555e74ea08bfc04a0136f976cbaa200addf1ba87 (patch)
treeafac050a2669fcf1e47d764785f9007c568b9e15 /arch/arm/mach-omap2/clockdomain.c
parentb86cfb52a145d8ddad66b98c39c6764f3883cd5a (diff)
downloadlinux-555e74ea08bfc04a0136f976cbaa200addf1ba87.tar.xz
OMAP2+: clockdomain: Add per clkdm lock to prevent concurrent state programming
Since the clkdm state programming is now done from within the hwmod framework (which uses a per-hwmod lock) instead of the being done from the clock framework (which used a global lock), there is now a need to have per-clkdm locking to prevent races between different hwmods/modules belonging to the same clock domain concurrently programming the clkdm state. Signed-off-by: Rajendra Nayak <rnayak@ti.com> Signed-off-by: Benoit Cousson <b-cousson@ti.com> Cc: Paul Walmsley <paul@pwsan.com> Signed-off-by: Paul Walmsley <paul@pwsan.com>
Diffstat (limited to 'arch/arm/mach-omap2/clockdomain.c')
-rw-r--r--arch/arm/mach-omap2/clockdomain.c47
1 files changed, 40 insertions, 7 deletions
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index 239b558853f5..ab7db083f97f 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -92,6 +92,8 @@ static int _clkdm_register(struct clockdomain *clkdm)
pwrdm_add_clkdm(pwrdm, clkdm);
+ spin_lock_init(&clkdm->lock);
+
pr_debug("clockdomain: registered %s\n", clkdm->name);
return 0;
@@ -690,6 +692,9 @@ int clkdm_clear_all_sleepdeps(struct clockdomain *clkdm)
*/
int clkdm_sleep(struct clockdomain *clkdm)
{
+ int ret;
+ unsigned long flags;
+
if (!clkdm)
return -EINVAL;
@@ -704,9 +709,11 @@ int clkdm_sleep(struct clockdomain *clkdm)
pr_debug("clockdomain: forcing sleep on %s\n", clkdm->name);
+ spin_lock_irqsave(&clkdm->lock, flags);
clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
-
- return arch_clkdm->clkdm_sleep(clkdm);
+ ret = arch_clkdm->clkdm_sleep(clkdm);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+ return ret;
}
/**
@@ -720,6 +727,9 @@ int clkdm_sleep(struct clockdomain *clkdm)
*/
int clkdm_wakeup(struct clockdomain *clkdm)
{
+ int ret;
+ unsigned long flags;
+
if (!clkdm)
return -EINVAL;
@@ -734,9 +744,11 @@ int clkdm_wakeup(struct clockdomain *clkdm)
pr_debug("clockdomain: forcing wakeup on %s\n", clkdm->name);
+ spin_lock_irqsave(&clkdm->lock, flags);
clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
-
- return arch_clkdm->clkdm_wakeup(clkdm);
+ ret = arch_clkdm->clkdm_wakeup(clkdm);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+ return ret;
}
/**
@@ -751,6 +763,8 @@ int clkdm_wakeup(struct clockdomain *clkdm)
*/
void clkdm_allow_idle(struct clockdomain *clkdm)
{
+ unsigned long flags;
+
if (!clkdm)
return;
@@ -766,10 +780,11 @@ void clkdm_allow_idle(struct clockdomain *clkdm)
pr_debug("clockdomain: enabling automatic idle transitions for %s\n",
clkdm->name);
+ spin_lock_irqsave(&clkdm->lock, flags);
clkdm->_flags |= _CLKDM_FLAG_HWSUP_ENABLED;
-
arch_clkdm->clkdm_allow_idle(clkdm);
pwrdm_clkdm_state_switch(clkdm);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
}
/**
@@ -783,6 +798,8 @@ void clkdm_allow_idle(struct clockdomain *clkdm)
*/
void clkdm_deny_idle(struct clockdomain *clkdm)
{
+ unsigned long flags;
+
if (!clkdm)
return;
@@ -798,9 +815,10 @@ void clkdm_deny_idle(struct clockdomain *clkdm)
pr_debug("clockdomain: disabling automatic idle transitions for %s\n",
clkdm->name);
+ spin_lock_irqsave(&clkdm->lock, flags);
clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
-
arch_clkdm->clkdm_deny_idle(clkdm);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
}
/**
@@ -816,16 +834,25 @@ void clkdm_deny_idle(struct clockdomain *clkdm)
*/
bool clkdm_in_hwsup(struct clockdomain *clkdm)
{
+ bool ret;
+ unsigned long flags;
+
if (!clkdm)
return false;
- return (clkdm->_flags & _CLKDM_FLAG_HWSUP_ENABLED) ? true : false;
+ spin_lock_irqsave(&clkdm->lock, flags);
+ ret = (clkdm->_flags & _CLKDM_FLAG_HWSUP_ENABLED) ? true : false;
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+
+ return ret;
}
/* Clockdomain-to-clock/hwmod framework interface code */
static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
{
+ unsigned long flags;
+
if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable)
return -EINVAL;
@@ -837,9 +864,11 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps)
return 0;
+ spin_lock_irqsave(&clkdm->lock, flags);
arch_clkdm->clkdm_clk_enable(clkdm);
pwrdm_wait_transition(clkdm->pwrdm.ptr);
pwrdm_clkdm_state_switch(clkdm);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
pr_debug("clockdomain: clkdm %s: enabled\n", clkdm->name);
@@ -848,6 +877,8 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm)
{
+ unsigned long flags;
+
if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
return -EINVAL;
@@ -859,8 +890,10 @@ static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm)
if (atomic_dec_return(&clkdm->usecount) > 0)
return 0;
+ spin_lock_irqsave(&clkdm->lock, flags);
arch_clkdm->clkdm_clk_disable(clkdm);
pwrdm_clkdm_state_switch(clkdm);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
pr_debug("clockdomain: clkdm %s: disabled\n", clkdm->name);