mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
hwspinlock: add the 'in_atomic' API
Add the 'in_atomic' mode which can be called from an atomic context. This mode relies on the existing 'raw' mode (no lock, no preemption/irq disabling) with the difference that the timeout is not based on jiffies (jiffies won't increase when irq are disabled) but handled with busy-waiting udelay() calls. Signed-off-by: Fabien Dessenne <fabien.dessenne@st.com> Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
This commit is contained in:
committed by
Bjorn Andersson
parent
bce6f52213
commit
360aa640a5
@@ -14,9 +14,10 @@
|
||||
#include <linux/sched.h>
|
||||
|
||||
/* hwspinlock mode argument */
|
||||
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
|
||||
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
|
||||
#define HWLOCK_RAW 0x03
|
||||
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
|
||||
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
|
||||
#define HWLOCK_RAW 0x03
|
||||
#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
|
||||
|
||||
struct device;
|
||||
struct device_node;
|
||||
@@ -222,6 +223,23 @@ static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
|
||||
return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
|
||||
* @hwlock: an hwspinlock which we want to trylock
|
||||
*
|
||||
* This function attempts to lock an hwspinlock, and will immediately fail
|
||||
* if the hwspinlock is already taken.
|
||||
*
|
||||
* This function shall be called only from an atomic context.
|
||||
*
|
||||
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
|
||||
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
|
||||
*/
|
||||
static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
|
||||
{
|
||||
return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_trylock() - attempt to lock a specific hwspinlock
|
||||
* @hwlock: an hwspinlock which we want to trylock
|
||||
@@ -312,6 +330,28 @@ int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
|
||||
return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
|
||||
* @hwlock: the hwspinlock to be locked
|
||||
* @to: timeout value in msecs
|
||||
*
|
||||
* This function locks the underlying @hwlock. If the @hwlock
|
||||
* is already taken, the function will busy loop waiting for it to
|
||||
* be released, but give up when @timeout msecs have elapsed.
|
||||
*
|
||||
* This function shall be called only from an atomic context and the timeout
|
||||
* value shall not exceed a few msecs.
|
||||
*
|
||||
* Returns 0 when the @hwlock was successfully taken, and an appropriate
|
||||
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
|
||||
* busy after @timeout msecs). The function will never sleep.
|
||||
*/
|
||||
static inline
|
||||
int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
|
||||
{
|
||||
return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
|
||||
* @hwlock: the hwspinlock to be locked
|
||||
@@ -386,6 +426,21 @@ static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
|
||||
__hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_unlock_in_atomic() - unlock hwspinlock
|
||||
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
||||
*
|
||||
* This function will unlock a specific hwspinlock.
|
||||
*
|
||||
* @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
|
||||
* this function: it is a bug to call unlock on a @hwlock that is already
|
||||
* unlocked.
|
||||
*/
|
||||
static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
|
||||
{
|
||||
__hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* hwspin_unlock() - unlock hwspinlock
|
||||
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
||||
|
||||
Reference in New Issue
Block a user