summaryrefslogtreecommitdiff
path: root/ipc/sem.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2016-12-15 02:06:43 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-15 03:04:08 +0300
commitf150f02cfbc7b6b980e260856555abd73235a6b0 (patch)
treed7872d6961c6c9f72598089c716aab16404085d0 /ipc/sem.c
parent4663d3e8f21652f33c698fcc2bf20f61499d9c3e (diff)
downloadlinux-f150f02cfbc7b6b980e260856555abd73235a6b0.tar.xz
ipc/sem: use proper list api for pending_list wakeups
... saves some LoC and looks cleaner than re-implementing the calls. Link: http://lkml.kernel.org/r/1474225896-10066-6-git-send-email-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Manfred Spraul <manfred@colorfullife.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc/sem.c')
-rw-r--r--ipc/sem.c38
1 files changed, 13 insertions, 25 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index 084e1f55ca72..4f5af6e7d630 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -813,8 +813,7 @@ static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
static int wake_const_ops(struct sem_array *sma, int semnum,
struct wake_q_head *wake_q)
{
- struct sem_queue *q;
- struct list_head *walk;
+ struct sem_queue *q, *tmp;
struct list_head *pending_list;
int semop_completed = 0;
@@ -823,25 +822,19 @@ static int wake_const_ops(struct sem_array *sma, int semnum,
else
pending_list = &sma->sem_base[semnum].pending_const;
- walk = pending_list->next;
- while (walk != pending_list) {
- int error;
-
- q = container_of(walk, struct sem_queue, list);
- walk = walk->next;
-
- error = perform_atomic_semop(sma, q);
-
- if (error <= 0) {
- /* operation completed, remove from queue & wakeup */
+ list_for_each_entry_safe(q, tmp, pending_list, list) {
+ int error = perform_atomic_semop(sma, q);
- unlink_queue(sma, q);
+ if (error > 0)
+ continue;
+ /* operation completed, remove from queue & wakeup */
+ unlink_queue(sma, q);
- wake_up_sem_queue_prepare(q, error, wake_q);
- if (error == 0)
- semop_completed = 1;
- }
+ wake_up_sem_queue_prepare(q, error, wake_q);
+ if (error == 0)
+ semop_completed = 1;
}
+
return semop_completed;
}
@@ -914,8 +907,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
*/
static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
{
- struct sem_queue *q;
- struct list_head *walk;
+ struct sem_queue *q, *tmp;
struct list_head *pending_list;
int semop_completed = 0;
@@ -925,13 +917,9 @@ static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *w
pending_list = &sma->sem_base[semnum].pending_alter;
again:
- walk = pending_list->next;
- while (walk != pending_list) {
+ list_for_each_entry_safe(q, tmp, pending_list, list) {
int error, restart;
- q = container_of(walk, struct sem_queue, list);
- walk = walk->next;
-
/* If we are scanning the single sop, per-semaphore list of
* one semaphore and that semaphore is 0, then it is not
* necessary to scan further: simple increments