summaryrefslogtreecommitdiff
path: root/drivers/hwspinlock
diff options
context:
space:
mode:
authorBaolin Wang <baolin.wang@linaro.org>2018-04-08 11:06:57 +0800
committerBjorn Andersson <bjorn.andersson@linaro.org>2018-04-17 14:49:21 -0700
commit1e6c06a7e88c251d8a30271ad5206fbd967a4576 (patch)
tree42921ec0798d2d306cec575c10f7b9317cac9b56 /drivers/hwspinlock
parent66742b19e5e0d01297d355cc445d4920b648821b (diff)
downloadlwn-1e6c06a7e88c251d8a30271ad5206fbd967a4576.tar.gz
lwn-1e6c06a7e88c251d8a30271ad5206fbd967a4576.zip
hwspinlock: Introduce one new mode for hwspinlock
In some scenarios, user need do some time-consuming or sleepable operations under the hardware spinlock protection for synchronization between the multiple subsystems. For example, there is one PMIC efuse on Spreadtrum platform, which need to be accessed under one hardware lock. But during the hardware lock protection, the efuse operation is time-consuming to almost 5 ms, so we can not disable the interrupts or preemption so long in this case. Thus we can introduce one new mode to indicate that we just acquire the hardware lock and do not disable interrupts or preemption, meanwhile we should force user to protect the hardware lock with mutex or spinlock to avoid dead-lock. Signed-off-by: Baolin Wang <baolin.wang@linaro.org> Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Diffstat (limited to 'drivers/hwspinlock')
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c34
1 files changed, 27 insertions, 7 deletions
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index f4a59f5631e4..5278d0560a4a 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -71,10 +71,16 @@ static DEFINE_MUTEX(hwspinlock_tree_lock);
* This function attempts to lock an hwspinlock, and will immediately
* fail if the hwspinlock is already taken.
*
- * Upon a successful return from this function, preemption (and possibly
- * interrupts) is disabled, so the caller must not sleep, and is advised to
- * release the hwspinlock as soon as possible. This is required in order to
- * minimize remote cores polling on the hardware interconnect.
+ * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
+ * of getting hardware lock with mutex or spinlock. Since in some scenarios,
+ * user need some time-consuming or sleepable operations under the hardware
+ * lock, they need one sleepable lock (like mutex) to protect the operations.
+ *
+ * If the mode is not HWLOCK_RAW, upon a successful return from this function,
+ * preemption (and possibly interrupts) is disabled, so the caller must not
+ * sleep, and is advised to release the hwspinlock as soon as possible. This is
+ * required in order to minimize remote cores polling on the hardware
+ * interconnect.
*
* The user decides whether local interrupts are disabled or not, and if yes,
* whether he wants their previous state to be saved. It is up to the user
@@ -113,6 +119,9 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
case HWLOCK_IRQ:
ret = spin_trylock_irq(&hwlock->lock);
break;
+ case HWLOCK_RAW:
+ ret = 1;
+ break;
default:
ret = spin_trylock(&hwlock->lock);
break;
@@ -134,6 +143,9 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
case HWLOCK_IRQ:
spin_unlock_irq(&hwlock->lock);
break;
+ case HWLOCK_RAW:
+ /* Nothing to do */
+ break;
default:
spin_unlock(&hwlock->lock);
break;
@@ -170,9 +182,14 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock);
* is already taken, the function will busy loop waiting for it to
* be released, but give up after @timeout msecs have elapsed.
*
- * Upon a successful return from this function, preemption is disabled
- * (and possibly local interrupts, too), so the caller must not sleep,
- * and is advised to release the hwspinlock as soon as possible.
+ * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
+ * of getting hardware lock with mutex or spinlock. Since in some scenarios,
+ * user need some time-consuming or sleepable operations under the hardware
+ * lock, they need one sleepable lock (like mutex) to protect the operations.
+ *
+ * If the mode is not HWLOCK_RAW, upon a successful return from this function,
+ * preemption is disabled (and possibly local interrupts, too), so the caller
+ * must not sleep, and is advised to release the hwspinlock as soon as possible.
* This is required in order to minimize remote cores polling on the
* hardware interconnect.
*
@@ -266,6 +283,9 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
case HWLOCK_IRQ:
spin_unlock_irq(&hwlock->lock);
break;
+ case HWLOCK_RAW:
+ /* Nothing to do */
+ break;
default:
spin_unlock(&hwlock->lock);
break;