summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/842/842_decompress.c14
-rw-r--r--lib/Kconfig.debug37
-rw-r--r--lib/Makefile1
-rw-r--r--lib/atomic64_test.c124
-rw-r--r--lib/btree.c2
-rw-r--r--lib/dma-debug.c4
-rw-r--r--lib/dynamic_debug.c11
-rw-r--r--lib/iov_iter.c11
-rw-r--r--lib/list_debug.c2
-rw-r--r--lib/mpi/mpicoder.c21
-rw-r--r--lib/netdev-notifier-error-inject.c55
-rw-r--r--lib/proportions.c2
-rw-r--r--lib/rhashtable.c73
-rw-r--r--lib/test_bpf.c120
-rw-r--r--lib/test_rhashtable.c76
-rw-r--r--lib/vsprintf.c29
16 files changed, 455 insertions, 127 deletions
diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c
index 8881dad2a6a0..a7f278d2ed8f 100644
--- a/lib/842/842_decompress.c
+++ b/lib/842/842_decompress.c
@@ -69,7 +69,7 @@ struct sw842_param {
((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \
(s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \
(s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \
- WARN(1, "pr_debug param err invalid size %x\n", s))
+ 0)
static int next_bits(struct sw842_param *p, u64 *d, u8 n);
@@ -202,10 +202,14 @@ static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize)
return -EINVAL;
}
- pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n",
- size, (unsigned long)index, (unsigned long)(index * size),
- (unsigned long)offset, (unsigned long)total,
- (unsigned long)beN_to_cpu(&p->ostart[offset], size));
+ if (size != 2 && size != 4 && size != 8)
+ WARN(1, "__do_index invalid size %x\n", size);
+ else
+ pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n",
+ size, (unsigned long)index,
+ (unsigned long)(index * size), (unsigned long)offset,
+ (unsigned long)total,
+ (unsigned long)beN_to_cpu(&p->ostart[offset], size));
memcpy(p->out, &p->ostart[offset], size);
p->out += size;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8c15b29d5adc..e2a236a7341f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -812,6 +812,17 @@ config BOOTPARAM_HUNG_TASK_PANIC_VALUE
default 0 if !BOOTPARAM_HUNG_TASK_PANIC
default 1 if BOOTPARAM_HUNG_TASK_PANIC
+config WQ_WATCHDOG
+ bool "Detect Workqueue Stalls"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here to enable stall detection on workqueues. If a
+ worker pool doesn't make forward progress on a pending work
+ item for over a given amount of time, 30s by default, a
+ warning message is printed along with dump of workqueue
+ state. This can be configured through kernel parameter
+ "workqueue.watchdog_thresh" and its sysfs counterpart.
+
endmenu # "Debug lockups and hangs"
config PANIC_ON_OOPS
@@ -1484,6 +1495,29 @@ config OF_RECONFIG_NOTIFIER_ERROR_INJECT
If unsure, say N.
+config NETDEV_NOTIFIER_ERROR_INJECT
+ tristate "Netdev notifier error injection module"
+ depends on NET && NOTIFIER_ERROR_INJECTION
+ help
+ This option provides the ability to inject artificial errors to
+ netdevice notifier chain callbacks. It is controlled through debugfs
+ interface /sys/kernel/debug/notifier-error-inject/netdev
+
+ If the notifier call chain should be failed with some events
+ notified, write the error code to "actions/<notifier event>/error".
+
+ Example: Inject netdevice mtu change error (-22 = -EINVAL)
+
+ # cd /sys/kernel/debug/notifier-error-inject/netdev
+ # echo -22 > actions/NETDEV_CHANGEMTU/error
+ # ip link set eth0 mtu 1024
+ RTNETLINK answers: Invalid argument
+
+ To compile this code as a module, choose M here: the module will
+ be called netdev-notifier-error-inject.
+
+ If unsure, say N.
+
config FAULT_INJECTION
bool "Fault-injection framework"
depends on DEBUG_KERNEL
@@ -1523,8 +1557,7 @@ config FAIL_IO_TIMEOUT
config FAIL_MMC_REQUEST
bool "Fault-injection capability for MMC IO"
- select DEBUG_FS
- depends on FAULT_INJECTION && MMC
+ depends on FAULT_INJECTION_DEBUG_FS && MMC
help
Provide fault-injection capability for MMC IO.
This will make the mmc core return data errors. This is
diff --git a/lib/Makefile b/lib/Makefile
index 7f1de26613d2..180dd4d0dd41 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -120,6 +120,7 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
+obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
of-reconfig-notifier-error-inject.o
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 83c33a5bcffb..d62de8bf022d 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -16,6 +16,10 @@
#include <linux/kernel.h>
#include <linux/atomic.h>
+#ifdef CONFIG_X86
+#include <asm/processor.h> /* for boot_cpu_has below */
+#endif
+
#define TEST(bit, op, c_op, val) \
do { \
atomic##bit##_set(&v, v0); \
@@ -27,6 +31,65 @@ do { \
(unsigned long long)r); \
} while (0)
+/*
+ * Test for a atomic operation family,
+ * @test should be a macro accepting parameters (bit, op, ...)
+ */
+
+#define FAMILY_TEST(test, bit, op, args...) \
+do { \
+ test(bit, op, ##args); \
+ test(bit, op##_acquire, ##args); \
+ test(bit, op##_release, ##args); \
+ test(bit, op##_relaxed, ##args); \
+} while (0)
+
+#define TEST_RETURN(bit, op, c_op, val) \
+do { \
+ atomic##bit##_set(&v, v0); \
+ r = v0; \
+ r c_op val; \
+ BUG_ON(atomic##bit##_##op(val, &v) != r); \
+ BUG_ON(atomic##bit##_read(&v) != r); \
+} while (0)
+
+#define RETURN_FAMILY_TEST(bit, op, c_op, val) \
+do { \
+ FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \
+} while (0)
+
+#define TEST_ARGS(bit, op, init, ret, expect, args...) \
+do { \
+ atomic##bit##_set(&v, init); \
+ BUG_ON(atomic##bit##_##op(&v, ##args) != ret); \
+ BUG_ON(atomic##bit##_read(&v) != expect); \
+} while (0)
+
+#define XCHG_FAMILY_TEST(bit, init, new) \
+do { \
+ FAMILY_TEST(TEST_ARGS, bit, xchg, init, init, new, new); \
+} while (0)
+
+#define CMPXCHG_FAMILY_TEST(bit, init, new, wrong) \
+do { \
+ FAMILY_TEST(TEST_ARGS, bit, cmpxchg, \
+ init, init, new, init, new); \
+ FAMILY_TEST(TEST_ARGS, bit, cmpxchg, \
+ init, init, init, wrong, new); \
+} while (0)
+
+#define INC_RETURN_FAMILY_TEST(bit, i) \
+do { \
+ FAMILY_TEST(TEST_ARGS, bit, inc_return, \
+ i, (i) + one, (i) + one); \
+} while (0)
+
+#define DEC_RETURN_FAMILY_TEST(bit, i) \
+do { \
+ FAMILY_TEST(TEST_ARGS, bit, dec_return, \
+ i, (i) - one, (i) - one); \
+} while (0)
+
static __init void test_atomic(void)
{
int v0 = 0xaaa31337;
@@ -45,6 +108,18 @@ static __init void test_atomic(void)
TEST(, and, &=, v1);
TEST(, xor, ^=, v1);
TEST(, andnot, &= ~, v1);
+
+ RETURN_FAMILY_TEST(, add_return, +=, onestwos);
+ RETURN_FAMILY_TEST(, add_return, +=, -one);
+ RETURN_FAMILY_TEST(, sub_return, -=, onestwos);
+ RETURN_FAMILY_TEST(, sub_return, -=, -one);
+
+ INC_RETURN_FAMILY_TEST(, v0);
+ DEC_RETURN_FAMILY_TEST(, v0);
+
+ XCHG_FAMILY_TEST(, v0, v1);
+ CMPXCHG_FAMILY_TEST(, v0, v1, onestwos);
+
}
#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
@@ -74,25 +149,10 @@ static __init void test_atomic64(void)
TEST(64, xor, ^=, v1);
TEST(64, andnot, &= ~, v1);
- INIT(v0);
- r += onestwos;
- BUG_ON(atomic64_add_return(onestwos, &v) != r);
- BUG_ON(v.counter != r);
-
- INIT(v0);
- r += -one;
- BUG_ON(atomic64_add_return(-one, &v) != r);
- BUG_ON(v.counter != r);
-
- INIT(v0);
- r -= onestwos;
- BUG_ON(atomic64_sub_return(onestwos, &v) != r);
- BUG_ON(v.counter != r);
-
- INIT(v0);
- r -= -one;
- BUG_ON(atomic64_sub_return(-one, &v) != r);
- BUG_ON(v.counter != r);
+ RETURN_FAMILY_TEST(64, add_return, +=, onestwos);
+ RETURN_FAMILY_TEST(64, add_return, +=, -one);
+ RETURN_FAMILY_TEST(64, sub_return, -=, onestwos);
+ RETURN_FAMILY_TEST(64, sub_return, -=, -one);
INIT(v0);
atomic64_inc(&v);
@@ -100,33 +160,15 @@ static __init void test_atomic64(void)
BUG_ON(v.counter != r);
INIT(v0);
- r += one;
- BUG_ON(atomic64_inc_return(&v) != r);
- BUG_ON(v.counter != r);
-
- INIT(v0);
atomic64_dec(&v);
r -= one;
BUG_ON(v.counter != r);
- INIT(v0);
- r -= one;
- BUG_ON(atomic64_dec_return(&v) != r);
- BUG_ON(v.counter != r);
+ INC_RETURN_FAMILY_TEST(64, v0);
+ DEC_RETURN_FAMILY_TEST(64, v0);
- INIT(v0);
- BUG_ON(atomic64_xchg(&v, v1) != v0);
- r = v1;
- BUG_ON(v.counter != r);
-
- INIT(v0);
- BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0);
- r = v1;
- BUG_ON(v.counter != r);
-
- INIT(v0);
- BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0);
- BUG_ON(v.counter != r);
+ XCHG_FAMILY_TEST(64, v0, v1);
+ CMPXCHG_FAMILY_TEST(64, v0, v1, v2);
INIT(v0);
BUG_ON(atomic64_add_unless(&v, one, v0));
diff --git a/lib/btree.c b/lib/btree.c
index 4264871ea1a0..f93a945274af 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -5,7 +5,7 @@
*
* Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
* Bits and pieces stolen from Peter Zijlstra's code, which is
- * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright 2007, Red Hat Inc. Peter Zijlstra
* GPLv2
*
* see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 8855f019ebe8..d34bd24c2c84 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1464,7 +1464,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
entry->type = dma_debug_coherent;
entry->dev = dev;
entry->pfn = page_to_pfn(virt_to_page(virt));
- entry->offset = (size_t) virt & PAGE_MASK;
+ entry->offset = (size_t) virt & ~PAGE_MASK;
entry->size = size;
entry->dev_addr = dma_addr;
entry->direction = DMA_BIDIRECTIONAL;
@@ -1480,7 +1480,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
.type = dma_debug_coherent,
.dev = dev,
.pfn = page_to_pfn(virt_to_page(virt)),
- .offset = (size_t) virt & PAGE_MASK,
+ .offset = (size_t) virt & ~PAGE_MASK,
.dev_addr = addr,
.size = size,
.direction = DMA_BIDIRECTIONAL,
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index e3952e9c8ec0..fe42b6ec3f0c 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -657,14 +657,9 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
pr_warn("expected <%d bytes into control\n", USER_BUF_PAGE);
return -E2BIG;
}
- tmpbuf = kmalloc(len + 1, GFP_KERNEL);
- if (!tmpbuf)
- return -ENOMEM;
- if (copy_from_user(tmpbuf, ubuf, len)) {
- kfree(tmpbuf);
- return -EFAULT;
- }
- tmpbuf[len] = '\0';
+ tmpbuf = memdup_user_nul(ubuf, len);
+ if (IS_ERR(tmpbuf))
+ return PTR_ERR(tmpbuf);
vpr_info("read %d bytes from userspace\n", (int)len);
ret = ddebug_exec_queries(tmpbuf, NULL);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 75232ad0a5e7..5fecddc32b1b 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -369,7 +369,7 @@ static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t
kunmap_atomic(from);
}
-static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
+static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
{
char *to = kmap_atomic(page);
memcpy(to + offset, from, len);
@@ -383,9 +383,9 @@ static void memzero_page(struct page *page, size_t offset, size_t len)
kunmap_atomic(addr);
}
-size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
+size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- char *from = addr;
+ const char *from = addr;
if (unlikely(bytes > i->count))
bytes = i->count;
@@ -704,10 +704,10 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
}
EXPORT_SYMBOL(csum_and_copy_from_iter);
-size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum,
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
struct iov_iter *i)
{
- char *from = addr;
+ const char *from = addr;
__wsum sum, next;
size_t off = 0;
if (unlikely(bytes > i->count))
@@ -849,3 +849,4 @@ int import_single_range(int rw, void __user *buf, size_t len,
iov_iter_init(i, rw, iov, 1, len);
return 0;
}
+EXPORT_SYMBOL(import_single_range);
diff --git a/lib/list_debug.c b/lib/list_debug.c
index c24c2f7e296f..3859bf63561c 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -37,7 +37,7 @@ void __list_add(struct list_head *new,
next->prev = new;
new->next = next;
new->prev = prev;
- prev->next = new;
+ WRITE_ONCE(prev->next, new);
}
EXPORT_SYMBOL(__list_add);
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 3db76b8c1115..ec533a6c77b5 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -135,7 +135,9 @@ EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
* @buf: bufer to which the output will be written to. Needs to be at
* leaset mpi_get_size(a) long.
* @buf_len: size of the buf.
- * @nbytes: receives the actual length of the data written.
+ * @nbytes: receives the actual length of the data written on success and
+ * the data to-be-written on -EOVERFLOW in case buf_len was too
+ * small.
* @sign: if not NULL, it will be set to the sign of a.
*
* Return: 0 on success or error code in case of error
@@ -148,7 +150,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
unsigned int n = mpi_get_size(a);
int i, lzeros = 0;
- if (buf_len < n || !buf || !nbytes)
+ if (!buf || !nbytes)
return -EINVAL;
if (sign)
@@ -163,6 +165,11 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
break;
}
+ if (buf_len < n - lzeros) {
+ *nbytes = n - lzeros;
+ return -EOVERFLOW;
+ }
+
p = buf;
*nbytes = n - lzeros;
@@ -332,7 +339,8 @@ EXPORT_SYMBOL_GPL(mpi_set_buffer);
* @nbytes: in/out param - it has the be set to the maximum number of
* bytes that can be written to sgl. This has to be at least
* the size of the integer a. On return it receives the actual
- * length of the data written.
+ * length of the data written on success or the data that would
+ * be written if buffer was too small.
* @sign: if not NULL, it will be set to the sign of a.
*
* Return: 0 on success or error code in case of error
@@ -345,7 +353,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
unsigned int n = mpi_get_size(a);
int i, x, y = 0, lzeros = 0, buf_len;
- if (!nbytes || *nbytes < n)
+ if (!nbytes)
return -EINVAL;
if (sign)
@@ -360,6 +368,11 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
break;
}
+ if (*nbytes < n - lzeros) {
+ *nbytes = n - lzeros;
+ return -EOVERFLOW;
+ }
+
*nbytes = n - lzeros;
buf_len = sgl->length;
p2 = sg_virt(sgl);
diff --git a/lib/netdev-notifier-error-inject.c b/lib/netdev-notifier-error-inject.c
new file mode 100644
index 000000000000..13e9c62e216f
--- /dev/null
+++ b/lib/netdev-notifier-error-inject.c
@@ -0,0 +1,55 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include "notifier-error-inject.h"
+
+static int priority;
+module_param(priority, int, 0);
+MODULE_PARM_DESC(priority, "specify netdevice notifier priority");
+
+static struct notifier_err_inject netdev_notifier_err_inject = {
+ .actions = {
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_REGISTER) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEMTU) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGENAME) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_UP) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_TYPE_CHANGE) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_POST_INIT) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEMTU) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEUPPER) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEUPPER) },
+ {}
+ }
+};
+
+static struct dentry *dir;
+
+static int netdev_err_inject_init(void)
+{
+ int err;
+
+ dir = notifier_err_inject_init("netdev", notifier_err_inject_dir,
+ &netdev_notifier_err_inject, priority);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ err = register_netdevice_notifier(&netdev_notifier_err_inject.nb);
+ if (err)
+ debugfs_remove_recursive(dir);
+
+ return err;
+}
+
+static void netdev_err_inject_exit(void)
+{
+ unregister_netdevice_notifier(&netdev_notifier_err_inject.nb);
+ debugfs_remove_recursive(dir);
+}
+
+module_init(netdev_err_inject_init);
+module_exit(netdev_err_inject_exit);
+
+MODULE_DESCRIPTION("Netdevice notifier error injection module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nikolay Aleksandrov <razor@blackwall.org>");
diff --git a/lib/proportions.c b/lib/proportions.c
index 6f724298f67a..efa54f259ea9 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -1,7 +1,7 @@
/*
* Floating proportions
*
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* Description:
*
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a54ff8949f91..cc808707d1cf 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -231,9 +231,6 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
*/
rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
- /* Ensure the new table is visible to readers. */
- smp_wmb();
-
spin_unlock_bh(old_tbl->locks);
return 0;
@@ -389,33 +386,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht,
return false;
}
-int rhashtable_insert_rehash(struct rhashtable *ht)
+int rhashtable_insert_rehash(struct rhashtable *ht,
+ struct bucket_table *tbl)
{
struct bucket_table *old_tbl;
struct bucket_table *new_tbl;
- struct bucket_table *tbl;
unsigned int size;
int err;
old_tbl = rht_dereference_rcu(ht->tbl, ht);
- tbl = rhashtable_last_table(ht, old_tbl);
size = tbl->size;
+ err = -EBUSY;
+
if (rht_grow_above_75(ht, tbl))
size *= 2;
/* Do not schedule more than one rehash */
else if (old_tbl != tbl)
- return -EBUSY;
+ goto fail;
+
+ err = -ENOMEM;
new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
- if (new_tbl == NULL) {
- /* Schedule async resize/rehash to try allocation
- * non-atomic context.
- */
- schedule_work(&ht->run_work);
- return -ENOMEM;
- }
+ if (new_tbl == NULL)
+ goto fail;
err = rhashtable_rehash_attach(ht, tbl, new_tbl);
if (err) {
@@ -426,12 +421,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht)
schedule_work(&ht->run_work);
return err;
+
+fail:
+ /* Do not fail the insert if someone else did a rehash. */
+ if (likely(rcu_dereference_raw(tbl->future_tbl)))
+ return 0;
+
+ /* Schedule async rehash to retry allocation in process context. */
+ if (err == -ENOMEM)
+ schedule_work(&ht->run_work);
+
+ return err;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
-int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
- struct rhash_head *obj,
- struct bucket_table *tbl)
+struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
+ const void *key,
+ struct rhash_head *obj,
+ struct bucket_table *tbl)
{
struct rhash_head *head;
unsigned int hash;
@@ -467,7 +474,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
exit:
spin_unlock(rht_bucket_lock(tbl, hash));
- return err;
+ if (err == 0)
+ return NULL;
+ else if (err == -EAGAIN)
+ return tbl;
+ else
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
@@ -503,10 +515,11 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
if (!iter->walker)
return -ENOMEM;
- mutex_lock(&ht->mutex);
- iter->walker->tbl = rht_dereference(ht->tbl, ht);
+ spin_lock(&ht->lock);
+ iter->walker->tbl =
+ rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
list_add(&iter->walker->list, &iter->walker->tbl->walkers);
- mutex_unlock(&ht->mutex);
+ spin_unlock(&ht->lock);
return 0;
}
@@ -520,10 +533,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
*/
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
- mutex_lock(&iter->ht->mutex);
+ spin_lock(&iter->ht->lock);
if (iter->walker->tbl)
list_del(&iter->walker->list);
- mutex_unlock(&iter->ht->mutex);
+ spin_unlock(&iter->ht->lock);
kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
@@ -547,14 +560,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
{
struct rhashtable *ht = iter->ht;
- mutex_lock(&ht->mutex);
+ rcu_read_lock();
+ spin_lock(&ht->lock);
if (iter->walker->tbl)
list_del(&iter->walker->list);
-
- rcu_read_lock();
-
- mutex_unlock(&ht->mutex);
+ spin_unlock(&ht->lock);
if (!iter->walker->tbl) {
iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
@@ -723,9 +734,6 @@ int rhashtable_init(struct rhashtable *ht,
if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
return -EINVAL;
- if (params->nelem_hint)
- size = rounded_hashtable_size(params);
-
memset(ht, 0, sizeof(*ht));
mutex_init(&ht->mutex);
spin_lock_init(&ht->lock);
@@ -745,6 +753,9 @@ int rhashtable_init(struct rhashtable *ht,
ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
+ if (params->nelem_hint)
+ size = rounded_hashtable_size(&ht->p);
+
/* The maximum (not average) chain length grows with the
* size of the hash table, at a rate of (log N)/(log log N).
* The value of 16 is selected so that even if the hash
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 10cd1860e5b0..27a7a26b1ece 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1685,6 +1685,126 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x35d97ef2 } }
},
+ { /* Mainly checking JIT here. */
+ "MOV REG64",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
+ BPF_MOV64_REG(R1, R0),
+ BPF_MOV64_REG(R2, R1),
+ BPF_MOV64_REG(R3, R2),
+ BPF_MOV64_REG(R4, R3),
+ BPF_MOV64_REG(R5, R4),
+ BPF_MOV64_REG(R6, R5),
+ BPF_MOV64_REG(R7, R6),
+ BPF_MOV64_REG(R8, R7),
+ BPF_MOV64_REG(R9, R8),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_ALU64_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_IMM(BPF_MOV, R2, 0),
+ BPF_ALU64_IMM(BPF_MOV, R3, 0),
+ BPF_ALU64_IMM(BPF_MOV, R4, 0),
+ BPF_ALU64_IMM(BPF_MOV, R5, 0),
+ BPF_ALU64_IMM(BPF_MOV, R6, 0),
+ BPF_ALU64_IMM(BPF_MOV, R7, 0),
+ BPF_ALU64_IMM(BPF_MOV, R8, 0),
+ BPF_ALU64_IMM(BPF_MOV, R9, 0),
+ BPF_ALU64_REG(BPF_ADD, R0, R0),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_ALU64_REG(BPF_ADD, R0, R2),
+ BPF_ALU64_REG(BPF_ADD, R0, R3),
+ BPF_ALU64_REG(BPF_ADD, R0, R4),
+ BPF_ALU64_REG(BPF_ADD, R0, R5),
+ BPF_ALU64_REG(BPF_ADD, R0, R6),
+ BPF_ALU64_REG(BPF_ADD, R0, R7),
+ BPF_ALU64_REG(BPF_ADD, R0, R8),
+ BPF_ALU64_REG(BPF_ADD, R0, R9),
+ BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfefe } }
+ },
+ { /* Mainly checking JIT here. */
+ "MOV REG32",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
+ BPF_MOV64_REG(R1, R0),
+ BPF_MOV64_REG(R2, R1),
+ BPF_MOV64_REG(R3, R2),
+ BPF_MOV64_REG(R4, R3),
+ BPF_MOV64_REG(R5, R4),
+ BPF_MOV64_REG(R6, R5),
+ BPF_MOV64_REG(R7, R6),
+ BPF_MOV64_REG(R8, R7),
+ BPF_MOV64_REG(R9, R8),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU32_IMM(BPF_MOV, R2, 0),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0),
+ BPF_ALU32_IMM(BPF_MOV, R4, 0),
+ BPF_ALU32_IMM(BPF_MOV, R5, 0),
+ BPF_ALU32_IMM(BPF_MOV, R6, 0),
+ BPF_ALU32_IMM(BPF_MOV, R7, 0),
+ BPF_ALU32_IMM(BPF_MOV, R8, 0),
+ BPF_ALU32_IMM(BPF_MOV, R9, 0),
+ BPF_ALU64_REG(BPF_ADD, R0, R0),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_ALU64_REG(BPF_ADD, R0, R2),
+ BPF_ALU64_REG(BPF_ADD, R0, R3),
+ BPF_ALU64_REG(BPF_ADD, R0, R4),
+ BPF_ALU64_REG(BPF_ADD, R0, R5),
+ BPF_ALU64_REG(BPF_ADD, R0, R6),
+ BPF_ALU64_REG(BPF_ADD, R0, R7),
+ BPF_ALU64_REG(BPF_ADD, R0, R8),
+ BPF_ALU64_REG(BPF_ADD, R0, R9),
+ BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfefe } }
+ },
+ { /* Mainly checking JIT here. */
+ "LD IMM64",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
+ BPF_MOV64_REG(R1, R0),
+ BPF_MOV64_REG(R2, R1),
+ BPF_MOV64_REG(R3, R2),
+ BPF_MOV64_REG(R4, R3),
+ BPF_MOV64_REG(R5, R4),
+ BPF_MOV64_REG(R6, R5),
+ BPF_MOV64_REG(R7, R6),
+ BPF_MOV64_REG(R8, R7),
+ BPF_MOV64_REG(R9, R8),
+ BPF_LD_IMM64(R0, 0x0LL),
+ BPF_LD_IMM64(R1, 0x0LL),
+ BPF_LD_IMM64(R2, 0x0LL),
+ BPF_LD_IMM64(R3, 0x0LL),
+ BPF_LD_IMM64(R4, 0x0LL),
+ BPF_LD_IMM64(R5, 0x0LL),
+ BPF_LD_IMM64(R6, 0x0LL),
+ BPF_LD_IMM64(R7, 0x0LL),
+ BPF_LD_IMM64(R8, 0x0LL),
+ BPF_LD_IMM64(R9, 0x0LL),
+ BPF_ALU64_REG(BPF_ADD, R0, R0),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_ALU64_REG(BPF_ADD, R0, R2),
+ BPF_ALU64_REG(BPF_ADD, R0, R3),
+ BPF_ALU64_REG(BPF_ADD, R0, R4),
+ BPF_ALU64_REG(BPF_ADD, R0, R5),
+ BPF_ALU64_REG(BPF_ADD, R0, R6),
+ BPF_ALU64_REG(BPF_ADD, R0, R7),
+ BPF_ALU64_REG(BPF_ADD, R0, R8),
+ BPF_ALU64_REG(BPF_ADD, R0, R9),
+ BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfefe } }
+ },
{
"INT: ALU MIX",
.u.insns_int = {
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 8c1ad1ced72c..270bf7289b1e 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -36,9 +36,9 @@ static int runs = 4;
module_param(runs, int, 0);
MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
-static int max_size = 65536;
+static int max_size = 0;
module_param(max_size, int, 0);
-MODULE_PARM_DESC(runs, "Maximum table size (default: 65536)");
+MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)");
static bool shrinking = false;
module_param(shrinking, bool, 0);
@@ -52,6 +52,10 @@ static int tcount = 10;
module_param(tcount, int, 0);
MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
+static bool enomem_retry = false;
+module_param(enomem_retry, bool, 0);
+MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)");
+
struct test_obj {
int value;
struct rhash_head node;
@@ -76,6 +80,28 @@ static struct rhashtable_params test_rht_params = {
static struct semaphore prestart_sem;
static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
+static int insert_retry(struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ int err, retries = -1, enomem_retries = 0;
+
+ do {
+ retries++;
+ cond_resched();
+ err = rhashtable_insert_fast(ht, obj, params);
+ if (err == -ENOMEM && enomem_retry) {
+ enomem_retries++;
+ err = -EBUSY;
+ }
+ } while (err == -EBUSY);
+
+ if (enomem_retries)
+ pr_info(" %u insertions retried after -ENOMEM\n",
+ enomem_retries);
+
+ return err ? : retries;
+}
+
static int __init test_rht_lookup(struct rhashtable *ht)
{
unsigned int i;
@@ -157,7 +183,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
{
struct test_obj *obj;
int err;
- unsigned int i, insert_fails = 0;
+ unsigned int i, insert_retries = 0;
s64 start, end;
/*
@@ -170,22 +196,16 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
struct test_obj *obj = &array[i];
obj->value = i * 2;
-
- err = rhashtable_insert_fast(ht, &obj->node, test_rht_params);
- if (err == -ENOMEM || err == -EBUSY) {
- /* Mark failed inserts but continue */
- obj->value = TEST_INSERT_FAIL;
- insert_fails++;
- } else if (err) {
+ err = insert_retry(ht, &obj->node, test_rht_params);
+ if (err > 0)
+ insert_retries += err;
+ else if (err)
return err;
- }
-
- cond_resched();
}
- if (insert_fails)
- pr_info(" %u insertions failed due to memory pressure\n",
- insert_fails);
+ if (insert_retries)
+ pr_info(" %u insertions retried due to memory pressure\n",
+ insert_retries);
test_bucket_stats(ht);
rcu_read_lock();
@@ -236,13 +256,15 @@ static int thread_lookup_test(struct thread_data *tdata)
obj->value, key);
err++;
}
+
+ cond_resched();
}
return err;
}
static int threadfunc(void *data)
{
- int i, step, err = 0, insert_fails = 0;
+ int i, step, err = 0, insert_retries = 0;
struct thread_data *tdata = data;
up(&prestart_sem);
@@ -251,20 +273,18 @@ static int threadfunc(void *data)
for (i = 0; i < entries; i++) {
tdata->objs[i].value = (tdata->id << 16) | i;
- err = rhashtable_insert_fast(&ht, &tdata->objs[i].node,
- test_rht_params);
- if (err == -ENOMEM || err == -EBUSY) {
- tdata->objs[i].value = TEST_INSERT_FAIL;
- insert_fails++;
+ err = insert_retry(&ht, &tdata->objs[i].node, test_rht_params);
+ if (err > 0) {
+ insert_retries += err;
} else if (err) {
pr_err(" thread[%d]: rhashtable_insert_fast failed\n",
tdata->id);
goto out;
}
}
- if (insert_fails)
- pr_info(" thread[%d]: %d insert failures\n",
- tdata->id, insert_fails);
+ if (insert_retries)
+ pr_info(" thread[%d]: %u insertions retried due to memory pressure\n",
+ tdata->id, insert_retries);
err = thread_lookup_test(tdata);
if (err) {
@@ -285,6 +305,8 @@ static int threadfunc(void *data)
goto out;
}
tdata->objs[i].value = TEST_INSERT_FAIL;
+
+ cond_resched();
}
err = thread_lookup_test(tdata);
if (err) {
@@ -311,7 +333,7 @@ static int __init test_rht_init(void)
entries = min(entries, MAX_ENTRIES);
test_rht_params.automatic_shrinking = shrinking;
- test_rht_params.max_size = max_size;
+ test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries);
test_rht_params.nelem_hint = size;
pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n",
@@ -357,6 +379,8 @@ static int __init test_rht_init(void)
return -ENOMEM;
}
+ test_rht_params.max_size = max_size ? :
+ roundup_pow_of_two(tcount * entries);
err = rhashtable_init(&ht, &test_rht_params);
if (err < 0) {
pr_warn("Test failed: Unable to initialize hashtable: %d\n",
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index f9cee8e1233c..ac3f9476b776 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -31,6 +31,9 @@
#include <linux/dcache.h>
#include <linux/cred.h>
#include <net/addrconf.h>
+#ifdef CONFIG_BLOCK
+#include <linux/blkdev.h>
+#endif
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/sections.h> /* for dereference_function_descriptor() */
@@ -613,6 +616,26 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp
return buf;
}
+#ifdef CONFIG_BLOCK
+static noinline_for_stack
+char *bdev_name(char *buf, char *end, struct block_device *bdev,
+ struct printf_spec spec, const char *fmt)
+{
+ struct gendisk *hd = bdev->bd_disk;
+
+ buf = string(buf, end, hd->disk_name, spec);
+ if (bdev->bd_part->partno) {
+ if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) {
+ if (buf < end)
+ *buf = 'p';
+ buf++;
+ }
+ buf = number(buf, end, bdev->bd_part->partno, spec);
+ }
+ return buf;
+}
+#endif
+
static noinline_for_stack
char *symbol_string(char *buf, char *end, void *ptr,
struct printf_spec spec, const char *fmt)
@@ -1443,6 +1466,7 @@ int kptr_restrict __read_mostly;
* (default assumed to be phys_addr_t, passed by reference)
* - 'd[234]' For a dentry name (optionally 2-4 last components)
* - 'D[234]' Same as 'd' but for a struct file
+ * - 'g' For block_device name (gendisk + partition number)
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
* - 'Cn' For a clock, it prints the name (Common Clock Framework) or address
@@ -1600,6 +1624,11 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return dentry_name(buf, end,
((const struct file *)ptr)->f_path.dentry,
spec, fmt);
+#ifdef CONFIG_BLOCK
+ case 'g':
+ return bdev_name(buf, end, ptr, spec, fmt);
+#endif
+
}
spec.flags |= SMALL;
if (spec.field_width == -1) {