summaryrefslogtreecommitdiff
path: root/arch/powerpc/lib/qspinlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/lib/qspinlock.c')
-rw-r--r--arch/powerpc/lib/qspinlock.c122
1 files changed, 52 insertions, 70 deletions
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 6dd2f46bd3ef..5de4dd549f6e 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -16,7 +16,8 @@ struct qnode {
struct qnode *next;
struct qspinlock *lock;
int cpu;
- int yield_cpu;
+ u8 sleepy; /* 1 if the previous vCPU was preempted or
+ * if the previous node was sleepy */
u8 locked; /* 1 if lock acquired */
};
@@ -43,7 +44,7 @@ static bool pv_sleepy_lock_sticky __read_mostly = false;
static u64 pv_sleepy_lock_interval_ns __read_mostly = 0;
static int pv_sleepy_lock_factor __read_mostly = 256;
static bool pv_yield_prev __read_mostly = true;
-static bool pv_yield_propagate_owner __read_mostly = true;
+static bool pv_yield_sleepy_owner __read_mostly = true;
static bool pv_prod_head __read_mostly = false;
static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
@@ -247,22 +248,18 @@ static __always_inline void seen_sleepy_lock(void)
this_cpu_write(sleepy_lock_seen_clock, sched_clock());
}
-static __always_inline void seen_sleepy_node(struct qspinlock *lock, u32 val)
+static __always_inline void seen_sleepy_node(void)
{
if (pv_sleepy_lock) {
if (pv_sleepy_lock_interval_ns)
this_cpu_write(sleepy_lock_seen_clock, sched_clock());
- if (val & _Q_LOCKED_VAL) {
- if (!(val & _Q_SLEEPY_VAL))
- try_set_sleepy(lock, val);
- }
+ /* Don't set sleepy because we likely have a stale val */
}
}
-static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
+static struct qnode *get_tail_qnode(struct qspinlock *lock, int prev_cpu)
{
- int cpu = decode_tail_cpu(val);
- struct qnodes *qnodesp = per_cpu_ptr(&qnodes, cpu);
+ struct qnodes *qnodesp = per_cpu_ptr(&qnodes, prev_cpu);
int idx;
/*
@@ -353,77 +350,66 @@ static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u
return __yield_to_locked_owner(lock, val, paravirt, mustq);
}
-static __always_inline void propagate_yield_cpu(struct qnode *node, u32 val, int *set_yield_cpu, bool paravirt)
+static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool paravirt)
{
struct qnode *next;
int owner;
if (!paravirt)
return;
- if (!pv_yield_propagate_owner)
- return;
-
- owner = get_owner_cpu(val);
- if (*set_yield_cpu == owner)
+ if (!pv_yield_sleepy_owner)
return;
next = READ_ONCE(node->next);
if (!next)
return;
- if (vcpu_is_preempted(owner)) {
- next->yield_cpu = owner;
- *set_yield_cpu = owner;
- } else if (*set_yield_cpu != -1) {
- next->yield_cpu = owner;
- *set_yield_cpu = owner;
- }
+ if (next->sleepy)
+ return;
+
+ owner = get_owner_cpu(val);
+ if (vcpu_is_preempted(owner))
+ next->sleepy = 1;
}
/* Called inside spin_begin() */
-static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, u32 val, bool paravirt)
+static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, int prev_cpu, bool paravirt)
{
- int prev_cpu = decode_tail_cpu(val);
u32 yield_count;
- int yield_cpu;
bool preempted = false;
if (!paravirt)
goto relax;
- if (!pv_yield_propagate_owner)
- goto yield_prev;
-
- yield_cpu = READ_ONCE(node->yield_cpu);
- if (yield_cpu == -1) {
- /* Propagate back the -1 CPU */
- if (node->next && node->next->yield_cpu != -1)
- node->next->yield_cpu = yield_cpu;
+ if (!pv_yield_sleepy_owner)
goto yield_prev;
- }
-
- yield_count = yield_count_of(yield_cpu);
- if ((yield_count & 1) == 0)
- goto yield_prev; /* owner vcpu is running */
-
- if (get_owner_cpu(READ_ONCE(lock->val)) != yield_cpu)
- goto yield_prev; /* re-sample lock owner */
- spin_end();
-
- preempted = true;
- seen_sleepy_node(lock, val);
+ /*
+ * If the previous waiter was preempted it might not be able to
+ * propagate sleepy to us, so check the lock in that case too.
+ */
+ if (node->sleepy || vcpu_is_preempted(prev_cpu)) {
+ u32 val = READ_ONCE(lock->val);
- smp_rmb();
+ if (val & _Q_LOCKED_VAL) {
+ if (node->next && !node->next->sleepy) {
+ /*
+ * Propagate sleepy to next waiter. Only if
+ * owner is preempted, which allows the queue
+ * to become "non-sleepy" if vCPU preemption
+ * ceases to occur, even if the lock remains
+ * highly contended.
+ */
+ if (vcpu_is_preempted(get_owner_cpu(val)))
+ node->next->sleepy = 1;
+ }
- if (yield_cpu == node->yield_cpu) {
- if (node->next && node->next->yield_cpu != yield_cpu)
- node->next->yield_cpu = yield_cpu;
- yield_to_preempted(yield_cpu, yield_count);
- spin_begin();
- return preempted;
+ preempted = yield_to_locked_owner(lock, val, paravirt);
+ if (preempted)
+ return preempted;
+ }
+ node->sleepy = false;
}
- spin_begin();
yield_prev:
if (!pv_yield_prev)
@@ -436,7 +422,7 @@ yield_prev:
spin_end();
preempted = true;
- seen_sleepy_node(lock, val);
+ seen_sleepy_node();
smp_rmb(); /* See __yield_to_locked_owner comment */
@@ -546,7 +532,6 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
bool sleepy = false;
bool mustq = false;
int idx;
- int set_yield_cpu = -1;
int iters = 0;
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
@@ -570,7 +555,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
node->next = NULL;
node->lock = lock;
node->cpu = smp_processor_id();
- node->yield_cpu = -1;
+ node->sleepy = 0;
node->locked = 0;
tail = encode_tail_cpu(node->cpu);
@@ -587,7 +572,8 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
* head of the waitqueue.
*/
if (old & _Q_TAIL_CPU_MASK) {
- struct qnode *prev = get_tail_qnode(lock, old);
+ int prev_cpu = decode_tail_cpu(old);
+ struct qnode *prev = get_tail_qnode(lock, prev_cpu);
/* Link @node into the waitqueue. */
WRITE_ONCE(prev->next, node);
@@ -597,16 +583,12 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
while (!READ_ONCE(node->locked)) {
spec_barrier();
- if (yield_to_prev(lock, node, old, paravirt))
+ if (yield_to_prev(lock, node, prev_cpu, paravirt))
seen_preempted = true;
}
spec_barrier();
spin_end();
- /* Clear out stale propagated yield_cpu */
- if (paravirt && pv_yield_propagate_owner && node->yield_cpu != -1)
- node->yield_cpu = -1;
-
smp_rmb(); /* acquire barrier for the mcs lock */
/*
@@ -648,7 +630,7 @@ again:
}
}
- propagate_yield_cpu(node, val, &set_yield_cpu, paravirt);
+ propagate_sleepy(node, val, paravirt);
preempted = yield_head_to_locked_owner(lock, val, paravirt);
if (!maybe_stealers)
continue;
@@ -952,21 +934,21 @@ static int pv_yield_prev_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_prev, pv_yield_prev_get, pv_yield_prev_set, "%llu\n");
-static int pv_yield_propagate_owner_set(void *data, u64 val)
+static int pv_yield_sleepy_owner_set(void *data, u64 val)
{
- pv_yield_propagate_owner = !!val;
+ pv_yield_sleepy_owner = !!val;
return 0;
}
-static int pv_yield_propagate_owner_get(void *data, u64 *val)
+static int pv_yield_sleepy_owner_get(void *data, u64 *val)
{
- *val = pv_yield_propagate_owner;
+ *val = pv_yield_sleepy_owner;
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_propagate_owner, pv_yield_propagate_owner_get, pv_yield_propagate_owner_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_sleepy_owner, pv_yield_sleepy_owner_get, pv_yield_sleepy_owner_set, "%llu\n");
static int pv_prod_head_set(void *data, u64 val)
{
@@ -998,7 +980,7 @@ static __init int spinlock_debugfs_init(void)
debugfs_create_file("qspl_pv_sleepy_lock_interval_ns", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_interval_ns);
debugfs_create_file("qspl_pv_sleepy_lock_factor", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_factor);
debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_prev);
- debugfs_create_file("qspl_pv_yield_propagate_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_propagate_owner);
+ debugfs_create_file("qspl_pv_yield_sleepy_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_sleepy_owner);
debugfs_create_file("qspl_pv_prod_head", 0600, arch_debugfs_dir, NULL, &fops_pv_prod_head);
}