From f61ab43cc1a6146d6eef7e0713a452c3677ad13e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sat, 26 Nov 2022 19:59:27 +1000 Subject: powerpc/qspinlock: allow lock stealing in trylock and lock fastpath This change allows trylock to steal the lock. It also allows the initial lock attempt to steal the lock rather than bailing out and going to the slow path. This gives trylock more strength: without this a continually-contended lock will never permit a trylock to succeed. With this change, the trylock has a small but non-zero chance. It also gives the lock fastpath most of the benefit of passing the reservation back through to the steal loop in the slow path without the complexity. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20221126095932.1234527-13-npiggin@gmail.com --- arch/powerpc/lib/qspinlock.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/powerpc/lib') diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c index 2f6c0bed25ea..8e5b8bc3f094 100644 --- a/arch/powerpc/lib/qspinlock.c +++ b/arch/powerpc/lib/qspinlock.c @@ -24,7 +24,11 @@ struct qnodes { /* Tuning parameters */ static int steal_spins __read_mostly = (1 << 5); +#if _Q_SPIN_TRY_LOCK_STEAL == 1 +static const bool maybe_stealers = true; +#else static bool maybe_stealers __read_mostly = true; +#endif static int head_spins __read_mostly = (1 << 8); static bool pv_yield_owner __read_mostly = true; @@ -483,6 +487,10 @@ void pv_spinlocks_init(void) #include static int steal_spins_set(void *data, u64 val) { +#if _Q_SPIN_TRY_LOCK_STEAL == 1 + /* MAYBE_STEAL remains true */ + steal_spins = val; +#else static DEFINE_MUTEX(lock); /* @@ -507,6 +515,7 @@ static int steal_spins_set(void *data, u64 val) steal_spins = val; } mutex_unlock(&lock); +#endif return 0; } -- cgit v1.2.3