summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-25 18:43:30 +0400
committerIngo Molnar <mingo@elte.hu>2011-09-13 13:11:50 +0400
commit740969f91e950b64a18fdd0a25164cdee042abf0 (patch)
treeb152f88baf3868a7f3df4fa712ecd6a8c811d0a4
parentcdcc136ffd264849a943acb42c36ffe9b458f811 (diff)
downloadlinux-740969f91e950b64a18fdd0a25164cdee042abf0.tar.xz
locking, lib/proportions: Annotate prop_local_percpu::lock as raw
The prop_local_percpu::lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/proportions.h6
-rw-r--r--lib/proportions.c12
2 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index cf793bbbd05e..ef35bb73f69b 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -58,7 +58,7 @@ struct prop_local_percpu {
*/
int shift;
unsigned long period;
- spinlock_t lock; /* protect the snapshot state */
+ raw_spinlock_t lock; /* protect the snapshot state */
};
int prop_local_init_percpu(struct prop_local_percpu *pl);
@@ -106,11 +106,11 @@ struct prop_local_single {
*/
unsigned long period;
int shift;
- spinlock_t lock; /* protect the snapshot state */
+ raw_spinlock_t lock; /* protect the snapshot state */
};
#define INIT_PROP_LOCAL_SINGLE(name) \
-{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
}
int prop_local_init_single(struct prop_local_single *pl);
diff --git a/lib/proportions.c b/lib/proportions.c
index d50746a79de2..05df84801b56 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
int prop_local_init_percpu(struct prop_local_percpu *pl)
{
- spin_lock_init(&pl->lock);
+ raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
return percpu_counter_init(&pl->events, 0);
@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
if (pl->period == global_period)
return;
- spin_lock_irqsave(&pl->lock, flags);
+ raw_spin_lock_irqsave(&pl->lock, flags);
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
/*
@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
percpu_counter_set(&pl->events, 0);
pl->period = global_period;
- spin_unlock_irqrestore(&pl->lock, flags);
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/*
@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd,
int prop_local_init_single(struct prop_local_single *pl)
{
- spin_lock_init(&pl->lock);
+ raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
pl->events = 0;
@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
if (pl->period == global_period)
return;
- spin_lock_irqsave(&pl->lock, flags);
+ raw_spin_lock_irqsave(&pl->lock, flags);
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
/*
* For each missed period, we half the local counter.
@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
else
pl->events = 0;
pl->period = global_period;
- spin_unlock_irqrestore(&pl->lock, flags);
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/*