summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-25 01:09:40 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-25 01:09:40 +0300
commit98ec21a01896751b673b6c731ca8881daa8b2c6d (patch)
tree9d6d780675436efc894878475284c70f766126dd /include
parenta262948335bc5359b82f0ed5ef35f6e82ca44d16 (diff)
parentcbce1a686700595de65ee363b9b3283ae85d8fc5 (diff)
downloadlinux-98ec21a01896751b673b6c731ca8881daa8b2c6d.tar.xz
Merge branch 'sched-hrtimers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Thomas Gleixner: "This series of scheduler updates depends on sched/core and timers/core branches, which are already in your tree: - Scheduler balancing overhaul to plug a hard to trigger race which causes an oops in the balancer (Peter Zijlstra) - Lockdep updates which are related to the balancing updates (Peter Zijlstra)" * 'sched-hrtimers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched,lockdep: Employ lock pinning lockdep: Implement lock pinning lockdep: Simplify lock_release() sched: Streamline the task migration locking a little sched: Move code around sched,dl: Fix sched class hopping CBS hole sched, dl: Convert switched_{from, to}_dl() / prio_changed_dl() to balance callbacks sched,dl: Remove return value from pull_dl_task() sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks sched,rt: Remove return value from pull_rt_task() sched: Allow balance callbacks for check_class_changed() sched: Use replace normalize_task() with __sched_setscheduler() sched: Replace post_schedule with a balance callback list
Diffstat (limited to 'include')
-rw-r--r--include/linux/lockdep.h10
1 files changed, 10 insertions, 0 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 2722111591a3..70400dc7660f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -255,6 +255,7 @@ struct held_lock {
unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */
+ unsigned int pin_count;
};
/*
@@ -354,6 +355,9 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
+extern void lock_pin_lock(struct lockdep_map *lock);
+extern void lock_unpin_lock(struct lockdep_map *lock);
+
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
@@ -368,6 +372,9 @@ extern void lockdep_trace_alloc(gfp_t mask);
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
+#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
+#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
+
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_off(void)
@@ -420,6 +427,9 @@ struct lock_class_key { };
#define lockdep_recursing(tsk) (0)
+#define lockdep_pin_lock(l) do { (void)(l); } while (0)
+#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
+
#endif /* !LOCKDEP */
#ifdef CONFIG_LOCK_STAT