summaryrefslogtreecommitdiff
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-02-26 18:20:34 +0300
committerDavid S. Miller <davem@davemloft.net>2015-02-28 01:55:14 +0300
commit5beb5c90c1f54d745da040aa05634a5830ba4a4c (patch)
tree4626f1033a05b15b4cd046d066de1011c7a7f67d /lib/rhashtable.c
parent061c1a6e367855a9ed1110ba059bc2e7634fd429 (diff)
downloadlinux-5beb5c90c1f54d745da040aa05634a5830ba4a4c.tar.xz
rhashtable: use cond_resched()
If a hash table has 128 slots and 16384 elems, expand to 256 slots takes more than one second. For larger sets, a soft lockup is detected. Holding cpu for that long, even in a work queue is a show stopper for non preemptable kernels. cond_resched() at strategic points to allow process scheduler to reschedule us. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 090641db4c0d..b5344ef4c684 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
@@ -412,6 +413,7 @@ int rhashtable_expand(struct rhashtable *ht)
}
}
unlock_buckets(new_tbl, old_tbl, new_hash);
+ cond_resched();
}
/* Unzip interleaved hash chains */
@@ -435,6 +437,7 @@ int rhashtable_expand(struct rhashtable *ht)
complete = false;
unlock_buckets(new_tbl, old_tbl, old_hash);
+ cond_resched();
}
}
@@ -493,6 +496,7 @@ int rhashtable_shrink(struct rhashtable *ht)
tbl->buckets[new_hash + new_tbl->size]);
unlock_buckets(new_tbl, tbl, new_hash);
+ cond_resched();
}
/* Publish the new, valid hash table */