From 7f40c260df86b9d145a4ddb13879787c266c5232 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Thu, 30 May 2019 21:13:19 -0500 Subject: dt-bindings: hwlock: Update OMAP binding for TI K3 SoCs The TI K3 AM65x and J721E family of SoCs have a HwSpinlock IP that is similar to the existing HwSpinlock IP present in OMAP architecture based SoCs with minor differences. Update the existing OMAP HwSpinlock binding for this IP on TI K3 AM65x and J721E SoCs. The same compatible from AM65x SoCs is reused for J721E SoCs. Signed-off-by: Suman Anna Signed-off-by: Bjorn Andersson --- .../devicetree/bindings/hwlock/omap-hwspinlock.txt | 25 +++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'Documentation') diff --git a/Documentation/devicetree/bindings/hwlock/omap-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/omap-hwspinlock.txt index 2c9804f4f4ac..8d365f89694c 100644 --- a/Documentation/devicetree/bindings/hwlock/omap-hwspinlock.txt +++ b/Documentation/devicetree/bindings/hwlock/omap-hwspinlock.txt @@ -1,12 +1,16 @@ -OMAP4+ HwSpinlock Driver -======================== +TI HwSpinlock for OMAP and K3 based SoCs +========================================= Required properties: -- compatible: Should be "ti,omap4-hwspinlock" for - OMAP44xx, OMAP54xx, AM33xx, AM43xx, DRA7xx SoCs +- compatible: Should be one of the following, + "ti,omap4-hwspinlock" for + OMAP44xx, OMAP54xx, AM33xx, AM43xx, DRA7xx SoCs + "ti,am654-hwspinlock" for + K3 AM65x and J721E SoCs - reg: Contains the hwspinlock module register address space (base address and length) - ti,hwmods: Name of the hwmod associated with the hwspinlock device + (for OMAP architecture based SoCs only) - #hwlock-cells: Should be 1. The OMAP hwspinlock users will use a 0-indexed relative hwlock number as the argument specifier value for requesting a specific hwspinlock @@ -17,10 +21,21 @@ Please look at the generic hwlock binding for usage information for consumers, Example: -/* OMAP4 */ +1. OMAP4 SoCs hwspinlock: spinlock@4a0f6000 { compatible = "ti,omap4-hwspinlock"; reg = <0x4a0f6000 0x1000>; ti,hwmods = "spinlock"; #hwlock-cells = <1>; }; + +2. AM65x SoCs and J721E SoCs +&cbass_main { + cbass_main_navss: interconnect0 { + hwspinlock: spinlock@30e00000 { + compatible = "ti,am654-hwspinlock"; + reg = <0x00 0x30e00000 0x00 0x1000>; + #hwlock-cells = <1>; + }; + }; +}; -- cgit v1.2.3 From bce6f5221374ba451a337d0a3773e6eb99dad3e8 Mon Sep 17 00:00:00 2001 From: Fabien Dessenne Date: Thu, 7 Mar 2019 16:58:22 +0100 Subject: hwspinlock: document the hwspinlock 'raw' API Document the hwspin_lock_timeout_raw(), hwspin_trylock_raw() and hwspin_unlock_raw() API. Signed-off-by: Fabien Dessenne Signed-off-by: Bjorn Andersson --- Documentation/hwspinlock.txt | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'Documentation') diff --git a/Documentation/hwspinlock.txt b/Documentation/hwspinlock.txt index ed640a278185..c3403f9ae27a 100644 --- a/Documentation/hwspinlock.txt +++ b/Documentation/hwspinlock.txt @@ -134,6 +134,23 @@ notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs). The function will never sleep. +:: + + int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int timeout); + +Lock a previously-assigned hwspinlock with a timeout limit (specified in +msecs). If the hwspinlock is already taken, the function will busy loop +waiting for it to be released, but give up when the timeout elapses. + +Caution: User must protect the routine of getting hardware lock with mutex +or spinlock to avoid dead-lock, that will let user can do some time-consuming +or sleepable operations under the hardware lock. + +Returns 0 when successful and an appropriate error code otherwise (most +notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs). + +The function will never sleep. + :: int hwspin_trylock(struct hwspinlock *hwlock); @@ -184,6 +201,21 @@ Returns 0 on success and an appropriate error code otherwise (most notably -EBUSY if the hwspinlock was already taken). The function will never sleep. +:: + + int hwspin_trylock_raw(struct hwspinlock *hwlock); + +Attempt to lock a previously-assigned hwspinlock, but immediately fail if +it is already taken. + +Caution: User must protect the routine of getting hardware lock with mutex +or spinlock to avoid dead-lock, that will let user can do some time-consuming +or sleepable operations under the hardware lock. + +Returns 0 on success and an appropriate error code otherwise (most +notably -EBUSY if the hwspinlock was already taken). +The function will never sleep. + :: void hwspin_unlock(struct hwspinlock *hwlock); @@ -220,6 +252,16 @@ Upon a successful return from this function, preemption is reenabled, and the state of the local interrupts is restored to the state saved at the given flags. This function will never sleep. +:: + + void hwspin_unlock_raw(struct hwspinlock *hwlock); + +Unlock a previously-locked hwspinlock. + +The caller should **never** unlock an hwspinlock which is already unlocked. +Doing so is considered a bug (there is no protection against this). +This function will never sleep. + :: int hwspin_lock_get_id(struct hwspinlock *hwlock); -- cgit v1.2.3 From 360aa640a59f269b784848c0b2d6d462952750d9 Mon Sep 17 00:00:00 2001 From: Fabien Dessenne Date: Thu, 7 Mar 2019 16:58:23 +0100 Subject: hwspinlock: add the 'in_atomic' API Add the 'in_atomic' mode which can be called from an atomic context. This mode relies on the existing 'raw' mode (no lock, no preemption/irq disabling) with the difference that the timeout is not based on jiffies (jiffies won't increase when irq are disabled) but handled with busy-waiting udelay() calls. Signed-off-by: Fabien Dessenne Signed-off-by: Bjorn Andersson --- Documentation/hwspinlock.txt | 39 +++++++++++++++++++++++ drivers/hwspinlock/hwspinlock_core.c | 43 +++++++++++++++++-------- include/linux/hwspinlock.h | 61 ++++++++++++++++++++++++++++++++++-- 3 files changed, 127 insertions(+), 16 deletions(-) (limited to 'Documentation') diff --git a/Documentation/hwspinlock.txt b/Documentation/hwspinlock.txt index c3403f9ae27a..6f03713b7003 100644 --- a/Documentation/hwspinlock.txt +++ b/Documentation/hwspinlock.txt @@ -151,6 +151,22 @@ notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs). The function will never sleep. +:: + + int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to); + +Lock a previously-assigned hwspinlock with a timeout limit (specified in +msecs). If the hwspinlock is already taken, the function will busy loop +waiting for it to be released, but give up when the timeout elapses. + +This function shall be called only from an atomic context and the timeout +value shall not exceed a few msecs. + +Returns 0 when successful and an appropriate error code otherwise (most +notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs). + +The function will never sleep. + :: int hwspin_trylock(struct hwspinlock *hwlock); @@ -216,6 +232,19 @@ Returns 0 on success and an appropriate error code otherwise (most notably -EBUSY if the hwspinlock was already taken). The function will never sleep. +:: + + int hwspin_trylock_in_atomic(struct hwspinlock *hwlock); + +Attempt to lock a previously-assigned hwspinlock, but immediately fail if +it is already taken. + +This function shall be called only from an atomic context. + +Returns 0 on success and an appropriate error code otherwise (most +notably -EBUSY if the hwspinlock was already taken). +The function will never sleep. + :: void hwspin_unlock(struct hwspinlock *hwlock); @@ -262,6 +291,16 @@ The caller should **never** unlock an hwspinlock which is already unlocked. Doing so is considered a bug (there is no protection against this). This function will never sleep. +:: + + void hwspin_unlock_in_atomic(struct hwspinlock *hwlock); + +Unlock a previously-locked hwspinlock. + +The caller should **never** unlock an hwspinlock which is already unlocked. +Doing so is considered a bug (there is no protection against this). +This function will never sleep. + :: int hwspin_lock_get_id(struct hwspinlock *hwlock); diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index d806307f19c2..8862445aa858 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c @@ -9,6 +9,7 @@ #define pr_fmt(fmt) "%s: " fmt, __func__ +#include #include #include #include @@ -23,6 +24,9 @@ #include "hwspinlock_internal.h" +/* retry delay used in atomic context */ +#define HWSPINLOCK_RETRY_DELAY_US 100 + /* radix tree tags */ #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ @@ -68,11 +72,11 @@ static DEFINE_MUTEX(hwspinlock_tree_lock); * user need some time-consuming or sleepable operations under the hardware * lock, they need one sleepable lock (like mutex) to protect the operations. * - * If the mode is not HWLOCK_RAW, upon a successful return from this function, - * preemption (and possibly interrupts) is disabled, so the caller must not - * sleep, and is advised to release the hwspinlock as soon as possible. This is - * required in order to minimize remote cores polling on the hardware - * interconnect. + * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful + * return from this function, preemption (and possibly interrupts) is disabled, + * so the caller must not sleep, and is advised to release the hwspinlock as + * soon as possible. This is required in order to minimize remote cores polling + * on the hardware interconnect. * * The user decides whether local interrupts are disabled or not, and if yes, * whether he wants their previous state to be saved. It is up to the user @@ -112,6 +116,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) ret = spin_trylock_irq(&hwlock->lock); break; case HWLOCK_RAW: + case HWLOCK_IN_ATOMIC: ret = 1; break; default: @@ -136,6 +141,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) spin_unlock_irq(&hwlock->lock); break; case HWLOCK_RAW: + case HWLOCK_IN_ATOMIC: /* Nothing to do */ break; default: @@ -179,11 +185,14 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock); * user need some time-consuming or sleepable operations under the hardware * lock, they need one sleepable lock (like mutex) to protect the operations. * - * If the mode is not HWLOCK_RAW, upon a successful return from this function, - * preemption is disabled (and possibly local interrupts, too), so the caller - * must not sleep, and is advised to release the hwspinlock as soon as possible. - * This is required in order to minimize remote cores polling on the - * hardware interconnect. + * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout + * is handled with busy-waiting delays, hence shall not exceed few msecs. + * + * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful + * return from this function, preemption (and possibly interrupts) is disabled, + * so the caller must not sleep, and is advised to release the hwspinlock as + * soon as possible. This is required in order to minimize remote cores polling + * on the hardware interconnect. * * The user decides whether local interrupts are disabled or not, and if yes, * whether he wants their previous state to be saved. It is up to the user @@ -198,7 +207,7 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, int mode, unsigned long *flags) { int ret; - unsigned long expire; + unsigned long expire, atomic_delay = 0; expire = msecs_to_jiffies(to) + jiffies; @@ -212,8 +221,15 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, * The lock is already taken, let's check if the user wants * us to try again */ - if (time_is_before_eq_jiffies(expire)) - return -ETIMEDOUT; + if (mode == HWLOCK_IN_ATOMIC) { + udelay(HWSPINLOCK_RETRY_DELAY_US); + atomic_delay += HWSPINLOCK_RETRY_DELAY_US; + if (atomic_delay > to * 1000) + return -ETIMEDOUT; + } else { + if (time_is_before_eq_jiffies(expire)) + return -ETIMEDOUT; + } /* * Allow platform-specific relax handlers to prevent @@ -276,6 +292,7 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) spin_unlock_irq(&hwlock->lock); break; case HWLOCK_RAW: + case HWLOCK_IN_ATOMIC: /* Nothing to do */ break; default: diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h index 0afe693be5f4..bfe7c1f1ac6d 100644 --- a/include/linux/hwspinlock.h +++ b/include/linux/hwspinlock.h @@ -14,9 +14,10 @@ #include /* hwspinlock mode argument */ -#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ -#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ -#define HWLOCK_RAW 0x03 +#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ +#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ +#define HWLOCK_RAW 0x03 +#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */ struct device; struct device_node; @@ -222,6 +223,23 @@ static inline int hwspin_trylock_raw(struct hwspinlock *hwlock) return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL); } +/** + * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock + * @hwlock: an hwspinlock which we want to trylock + * + * This function attempts to lock an hwspinlock, and will immediately fail + * if the hwspinlock is already taken. + * + * This function shall be called only from an atomic context. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock) +{ + return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL); +} + /** * hwspin_trylock() - attempt to lock a specific hwspinlock * @hwlock: an hwspinlock which we want to trylock @@ -312,6 +330,28 @@ int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to) return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL); } +/** + * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * This function shall be called only from an atomic context and the timeout + * value shall not exceed a few msecs. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline +int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to) +{ + return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL); +} + /** * hwspin_lock_timeout() - lock an hwspinlock with timeout limit * @hwlock: the hwspinlock to be locked @@ -386,6 +426,21 @@ static inline void hwspin_unlock_raw(struct hwspinlock *hwlock) __hwspin_unlock(hwlock, HWLOCK_RAW, NULL); } +/** + * hwspin_unlock_in_atomic() - unlock hwspinlock + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * + * This function will unlock a specific hwspinlock. + * + * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling + * this function: it is a bug to call unlock on a @hwlock that is already + * unlocked. + */ +static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock) +{ + __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL); +} + /** * hwspin_unlock() - unlock hwspinlock * @hwlock: a previously-acquired hwspinlock which we want to unlock -- cgit v1.2.3