summaryrefslogtreecommitdiff
path: root/include/linux/smp_lock.h
diff options
context:
space:
mode:
authorJonathan Corbet <corbet@lwn.net>2008-05-19 00:27:41 +0400
committerJonathan Corbet <corbet@lwn.net>2008-06-21 00:05:53 +0400
commit0b2806768899dba5967bcd4a3b93eaed9a1dc4f3 (patch)
treec9e7762d9b236883f4a23a170919317a6e7c93c7 /include/linux/smp_lock.h
parent6606470dd1d628878383c96d10b52a77986ddac7 (diff)
downloadlinux-0b2806768899dba5967bcd4a3b93eaed9a1dc4f3.tar.xz
Add cycle_kernel_lock()
A number of driver functions are so obviously trivial that they do not need the big kernel lock - at least not overtly. It turns out that the acquisition of the BKL in driver open() functions can perform a sort of poor-hacker's serialization function, delaying the open operation until the driver is certain to have completed its initialization. Add a simple cycle_kernel_lock() function for these cases to make it clear that there is no need to *hold* the BKL, just to be sure that we can acquire it. Signed-off-by: Jonathan Corbet <corbet@lwn.net>
Diffstat (limited to 'include/linux/smp_lock.h')
-rw-r--r--include/linux/smp_lock.h13
1 files changed, 13 insertions, 0 deletions
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index aab3a4cff4e1..813be59bf345 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -27,11 +27,24 @@ static inline int reacquire_kernel_lock(struct task_struct *task)
extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
+/*
+ * Various legacy drivers don't really need the BKL in a specific
+ * function, but they *do* need to know that the BKL became available.
+ * This function just avoids wrapping a bunch of lock/unlock pairs
+ * around code which doesn't really need it.
+ */
+static inline void cycle_kernel_lock(void)
+{
+ lock_kernel();
+ unlock_kernel();
+}
+
#else
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task) do { } while(0)
+#define cycle_kernel_lock() do { } while(0)
#define reacquire_kernel_lock(task) 0
#define kernel_locked() 1