summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/842/842_debugfs.h5
-rw-r--r--lib/Kconfig.debug25
-rw-r--r--lib/dynamic_debug.c12
-rw-r--r--lib/fault-inject.c73
-rw-r--r--lib/fonts/fonts.c103
-rw-r--r--lib/genalloc.c125
-rw-r--r--lib/iov_iter.c15
-rw-r--r--lib/kobject.c4
-rw-r--r--lib/notifier-error-inject.c13
-rw-r--r--lib/raid6/Makefile98
-rw-r--r--lib/scatterlist.c36
-rw-r--r--lib/sg_pool.c39
-rw-r--r--lib/test_kasan.c98
13 files changed, 384 insertions, 262 deletions
diff --git a/lib/842/842_debugfs.h b/lib/842/842_debugfs.h
index 277e403e8701..4469407c3e0d 100644
--- a/lib/842/842_debugfs.h
+++ b/lib/842/842_debugfs.h
@@ -22,8 +22,6 @@ static int __init sw842_debugfs_create(void)
return -ENODEV;
sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL);
- if (IS_ERR(sw842_debugfs_root))
- return PTR_ERR(sw842_debugfs_root);
for (i = 0; i < ARRAY_SIZE(template_count); i++) {
char name[32];
@@ -46,8 +44,7 @@ static int __init sw842_debugfs_create(void)
static void __exit sw842_debugfs_remove(void)
{
- if (sw842_debugfs_root && !IS_ERR(sw842_debugfs_root))
- debugfs_remove_recursive(sw842_debugfs_root);
+ debugfs_remove_recursive(sw842_debugfs_root);
}
#endif
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d4c8c9323aa4..4ac4ca21a30a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -305,19 +305,26 @@ config DEBUG_FS
If unsure, say N.
-config HEADERS_CHECK
- bool "Run 'make headers_check' when building vmlinux"
+config HEADERS_INSTALL
+ bool "Install uapi headers to usr/include"
depends on !UML
help
- This option will extract the user-visible kernel headers whenever
- building the kernel, and will run basic sanity checks on them to
- ensure that exported files do not attempt to include files which
- were not exported, etc.
+ This option will install uapi headers (headers exported to user-space)
+ into the usr/include directory for use during the kernel build.
+ This is unneeded for building the kernel itself, but needed for some
+ user-space program samples. It is also needed by some features such
+ as uapi header sanity checks.
+
+config HEADERS_CHECK
+ bool "Run sanity checks on uapi headers when building 'all'"
+ depends on HEADERS_INSTALL
+ help
+ This option will run basic sanity checks on uapi headers when
+ building the 'all' target, for example, ensure that they do not
+ attempt to include files which were not exported, etc.
If you're making modifications to header files which are
- relevant for userspace, say 'Y', and check the headers
- exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
- your build tree), to make sure they're suitable.
+ relevant for userspace, say 'Y'.
config OPTIMIZE_INLINING
bool "Allow compiler to uninline functions marked 'inline'"
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 8a16c2d498e9..c60409138e13 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -993,20 +993,14 @@ static __initdata int ddebug_init_success;
static int __init dynamic_debug_init_debugfs(void)
{
- struct dentry *dir, *file;
+ struct dentry *dir;
if (!ddebug_init_success)
return -ENODEV;
dir = debugfs_create_dir("dynamic_debug", NULL);
- if (!dir)
- return -ENOMEM;
- file = debugfs_create_file("control", 0644, dir, NULL,
- &ddebug_proc_fops);
- if (!file) {
- debugfs_remove(dir);
- return -ENOMEM;
- }
+ debugfs_create_file("control", 0644, dir, NULL, &ddebug_proc_fops);
+
return 0;
}
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 3cb21b2bf088..8186ca84910b 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -166,10 +166,10 @@ static int debugfs_ul_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
-static struct dentry *debugfs_create_ul(const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value)
+static void debugfs_create_ul(const char *name, umode_t mode,
+ struct dentry *parent, unsigned long *value)
{
- return debugfs_create_file(name, mode, parent, value, &fops_ul);
+ debugfs_create_file(name, mode, parent, value, &fops_ul);
}
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
@@ -185,12 +185,11 @@ static int debugfs_stacktrace_depth_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
debugfs_stacktrace_depth_set, "%llu\n");
-static struct dentry *debugfs_create_stacktrace_depth(
- const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value)
+static void debugfs_create_stacktrace_depth(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value)
{
- return debugfs_create_file(name, mode, parent, value,
- &fops_stacktrace_depth);
+ debugfs_create_file(name, mode, parent, value, &fops_stacktrace_depth);
}
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
@@ -202,51 +201,31 @@ struct dentry *fault_create_debugfs_attr(const char *name,
struct dentry *dir;
dir = debugfs_create_dir(name, parent);
- if (!dir)
- return ERR_PTR(-ENOMEM);
-
- if (!debugfs_create_ul("probability", mode, dir, &attr->probability))
- goto fail;
- if (!debugfs_create_ul("interval", mode, dir, &attr->interval))
- goto fail;
- if (!debugfs_create_atomic_t("times", mode, dir, &attr->times))
- goto fail;
- if (!debugfs_create_atomic_t("space", mode, dir, &attr->space))
- goto fail;
- if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose))
- goto fail;
- if (!debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
- &attr->ratelimit_state.interval))
- goto fail;
- if (!debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
- &attr->ratelimit_state.burst))
- goto fail;
- if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter))
- goto fail;
+ if (IS_ERR(dir))
+ return dir;
+
+ debugfs_create_ul("probability", mode, dir, &attr->probability);
+ debugfs_create_ul("interval", mode, dir, &attr->interval);
+ debugfs_create_atomic_t("times", mode, dir, &attr->times);
+ debugfs_create_atomic_t("space", mode, dir, &attr->space);
+ debugfs_create_ul("verbose", mode, dir, &attr->verbose);
+ debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
+ &attr->ratelimit_state.interval);
+ debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
+ &attr->ratelimit_state.burst);
+ debugfs_create_bool("task-filter", mode, dir, &attr->task_filter);
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
-
- if (!debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
- &attr->stacktrace_depth))
- goto fail;
- if (!debugfs_create_ul("require-start", mode, dir,
- &attr->require_start))
- goto fail;
- if (!debugfs_create_ul("require-end", mode, dir, &attr->require_end))
- goto fail;
- if (!debugfs_create_ul("reject-start", mode, dir, &attr->reject_start))
- goto fail;
- if (!debugfs_create_ul("reject-end", mode, dir, &attr->reject_end))
- goto fail;
-
+ debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
+ &attr->stacktrace_depth);
+ debugfs_create_ul("require-start", mode, dir, &attr->require_start);
+ debugfs_create_ul("require-end", mode, dir, &attr->require_end);
+ debugfs_create_ul("reject-start", mode, dir, &attr->reject_start);
+ debugfs_create_ul("reject-end", mode, dir, &attr->reject_end);
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
attr->dname = dget(dir);
return dir;
-fail:
- debugfs_remove_recursive(dir);
-
- return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(fault_create_debugfs_attr);
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 9969358a7af5..e7258d8c252b 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -20,56 +20,42 @@
#endif
#include <linux/font.h>
-#define NO_FONTS
-
static const struct font_desc *fonts[] = {
#ifdef CONFIG_FONT_8x8
-#undef NO_FONTS
- &font_vga_8x8,
+ &font_vga_8x8,
#endif
#ifdef CONFIG_FONT_8x16
-#undef NO_FONTS
- &font_vga_8x16,
+ &font_vga_8x16,
#endif
#ifdef CONFIG_FONT_6x11
-#undef NO_FONTS
- &font_vga_6x11,
+ &font_vga_6x11,
#endif
#ifdef CONFIG_FONT_7x14
-#undef NO_FONTS
- &font_7x14,
+ &font_7x14,
#endif
#ifdef CONFIG_FONT_SUN8x16
-#undef NO_FONTS
- &font_sun_8x16,
+ &font_sun_8x16,
#endif
#ifdef CONFIG_FONT_SUN12x22
-#undef NO_FONTS
- &font_sun_12x22,
+ &font_sun_12x22,
#endif
#ifdef CONFIG_FONT_10x18
-#undef NO_FONTS
- &font_10x18,
+ &font_10x18,
#endif
#ifdef CONFIG_FONT_ACORN_8x8
-#undef NO_FONTS
- &font_acorn_8x8,
+ &font_acorn_8x8,
#endif
#ifdef CONFIG_FONT_PEARL_8x8
-#undef NO_FONTS
- &font_pearl_8x8,
+ &font_pearl_8x8,
#endif
#ifdef CONFIG_FONT_MINI_4x6
-#undef NO_FONTS
- &font_mini_4x6,
+ &font_mini_4x6,
#endif
#ifdef CONFIG_FONT_6x10
-#undef NO_FONTS
- &font_6x10,
+ &font_6x10,
#endif
#ifdef CONFIG_FONT_TER16x32
-#undef NO_FONTS
- &font_ter_16x32,
+ &font_ter_16x32,
#endif
};
@@ -90,16 +76,17 @@ static const struct font_desc *fonts[] = {
* specified font.
*
*/
-
const struct font_desc *find_font(const char *name)
{
- unsigned int i;
+ unsigned int i;
- for (i = 0; i < num_fonts; i++)
- if (!strcmp(fonts[i]->name, name))
- return fonts[i];
- return NULL;
+ BUILD_BUG_ON(!num_fonts);
+ for (i = 0; i < num_fonts; i++)
+ if (!strcmp(fonts[i]->name, name))
+ return fonts[i];
+ return NULL;
}
+EXPORT_SYMBOL(find_font);
/**
@@ -116,44 +103,46 @@ const struct font_desc *find_font(const char *name)
* chosen font.
*
*/
-
const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
u32 font_h)
{
- int i, c, cc;
- const struct font_desc *f, *g;
-
- g = NULL;
- cc = -10000;
- for(i=0; i<num_fonts; i++) {
- f = fonts[i];
- c = f->pref;
+ int i, c, cc, res;
+ const struct font_desc *f, *g;
+
+ g = NULL;
+ cc = -10000;
+ for (i = 0; i < num_fonts; i++) {
+ f = fonts[i];
+ c = f->pref;
#if defined(__mc68000__)
#ifdef CONFIG_FONT_PEARL_8x8
- if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
- c = 100;
+ if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
+ c = 100;
#endif
#ifdef CONFIG_FONT_6x11
- if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX)
- c = 100;
+ if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX)
+ c = 100;
#endif
#endif
- if ((yres < 400) == (f->height <= 8))
- c += 1000;
+ if ((yres < 400) == (f->height <= 8))
+ c += 1000;
+
+ /* prefer a bigger font for high resolution */
+ res = (xres / f->width) * (yres / f->height) / 1000;
+ if (res > 20)
+ c += 20 - res;
- if ((font_w & (1 << (f->width - 1))) &&
- (font_h & (1 << (f->height - 1))))
- c += 1000;
+ if ((font_w & (1 << (f->width - 1))) &&
+ (font_h & (1 << (f->height - 1))))
+ c += 1000;
- if (c > cc) {
- cc = c;
- g = f;
+ if (c > cc) {
+ cc = c;
+ g = f;
+ }
}
- }
- return g;
+ return g;
}
-
-EXPORT_SYMBOL(find_font);
EXPORT_SYMBOL(get_default_font);
MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 5257f74fccf3..9fc31292cfa1 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -327,21 +327,45 @@ EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
* gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
* @pool: pool to allocate from
* @size: number of bytes to allocate from the pool
- * @dma: dma-view physical address return value. Use NULL if unneeded.
+ * @dma: dma-view physical address return value. Use %NULL if unneeded.
*
* Allocate the requested number of bytes from the specified pool.
* Uses the pool allocation function (with first-fit algorithm by default).
* Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
*/
void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
{
+ return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc);
+
+/**
+ * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
+ * usage with the given pool algorithm
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of bytes from the specified pool. Uses the
+ * given pool allocation function. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
+ */
+void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data)
+{
unsigned long vaddr;
if (!pool)
return NULL;
- vaddr = gen_pool_alloc(pool, size);
+ vaddr = gen_pool_alloc_algo(pool, size, algo, data);
if (!vaddr)
return NULL;
@@ -350,7 +374,102 @@ void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
return (void *)vaddr;
}
-EXPORT_SYMBOL(gen_pool_dma_alloc);
+EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
+
+/**
+ * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
+ * usage with the given alignment
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @align: alignment in bytes for starting address
+ *
+ * Allocate the requested number bytes from the specified pool, with the given
+ * alignment restriction. Can not be used in NMI handler on architectures
+ * without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
+ */
+void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align)
+{
+ struct genpool_data_align data = { .align = align };
+
+ return gen_pool_dma_alloc_algo(pool, size, dma,
+ gen_pool_first_fit_align, &data);
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc_align);
+
+/**
+ * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
+ * DMA usage
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: dma-view physical address return value. Use %NULL if unneeded.
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
+{
+ return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc);
+
+/**
+ * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
+ * DMA usage with the given pool algorithm
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool. Uses
+ * the given pool allocation function. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data)
+{
+ void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
+
+ if (vaddr)
+ memset(vaddr, 0, size);
+
+ return vaddr;
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
+
+/**
+ * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
+ * DMA usage with the given alignment
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @align: alignment in bytes for starting address
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool,
+ * with the given alignment restriction. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align)
+{
+ struct genpool_data_align data = { .align = align };
+
+ return gen_pool_dma_zalloc_algo(pool, size, dma,
+ gen_pool_first_fit_align, &data);
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
/**
* gen_pool_free - free allocated special memory back to the pool
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f99c41d4eb54..f1e0569b4539 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1634,9 +1634,9 @@ EXPORT_SYMBOL(dup_iter);
* on-stack array was used or not (and regardless of whether this function
* returns an error or not).
*
- * Return: 0 on success or negative error code on error.
+ * Return: Negative error code on error, bytes imported on success
*/
-int import_iovec(int type, const struct iovec __user * uvector,
+ssize_t import_iovec(int type, const struct iovec __user * uvector,
unsigned nr_segs, unsigned fast_segs,
struct iovec **iov, struct iov_iter *i)
{
@@ -1652,16 +1652,17 @@ int import_iovec(int type, const struct iovec __user * uvector,
}
iov_iter_init(i, type, p, nr_segs, n);
*iov = p == *iov ? NULL : p;
- return 0;
+ return n;
}
EXPORT_SYMBOL(import_iovec);
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
-int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i)
+ssize_t compat_import_iovec(int type,
+ const struct compat_iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i)
{
ssize_t n;
struct iovec *p;
@@ -1675,7 +1676,7 @@ int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
}
iov_iter_init(i, type, p, nr_segs, n);
*iov = p == *iov ? NULL : p;
- return 0;
+ return n;
}
#endif
diff --git a/lib/kobject.c b/lib/kobject.c
index f2ccdbac8ed9..83198cb37d8d 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -498,8 +498,10 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
- if (!kobj->parent)
+ if (!kobj->parent) {
+ kobject_put(kobj);
return -EINVAL;
+ }
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 3d2ba7cf83f4..21016b32d313 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -59,33 +59,22 @@ struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,
err_inject->nb.priority = priority;
dir = debugfs_create_dir(name, parent);
- if (!dir)
- return ERR_PTR(-ENOMEM);
actions_dir = debugfs_create_dir("actions", dir);
- if (!actions_dir)
- goto fail;
for (action = err_inject->actions; action->name; action++) {
struct dentry *action_dir;
action_dir = debugfs_create_dir(action->name, actions_dir);
- if (!action_dir)
- goto fail;
/*
* Create debugfs r/w file containing action->error. If
* notifier call chain is called with action->val, it will
* fail with the error code
*/
- if (!debugfs_create_errno("error", mode, action_dir,
- &action->error))
- goto fail;
+ debugfs_create_errno("error", mode, action_dir, &action->error);
}
return dir;
-fail:
- debugfs_remove_recursive(dir);
- return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(notifier_err_inject_init);
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index e723eacf7868..42695bc8d451 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -12,9 +12,6 @@ raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
hostprogs-y += mktables
-quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) < $< > $@
-
ifeq ($(CONFIG_ALTIVEC),y)
altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
@@ -26,7 +23,6 @@ CFLAGS_REMOVE_altivec1.o += -msoft-float
CFLAGS_REMOVE_altivec2.o += -msoft-float
CFLAGS_REMOVE_altivec4.o += -msoft-float
CFLAGS_REMOVE_altivec8.o += -msoft-float
-CFLAGS_REMOVE_altivec8.o += -msoft-float
CFLAGS_REMOVE_vpermxor1.o += -msoft-float
CFLAGS_REMOVE_vpermxor2.o += -msoft-float
CFLAGS_REMOVE_vpermxor4.o += -msoft-float
@@ -51,111 +47,39 @@ CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only
endif
endif
-targets += int1.c
-$(obj)/int1.c: UNROLL := 1
-$(obj)/int1.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int2.c
-$(obj)/int2.c: UNROLL := 2
-$(obj)/int2.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int4.c
-$(obj)/int4.c: UNROLL := 4
-$(obj)/int4.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int8.c
-$(obj)/int8.c: UNROLL := 8
-$(obj)/int8.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int16.c
-$(obj)/int16.c: UNROLL := 16
-$(obj)/int16.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
+quiet_cmd_unroll = UNROLL $@
+ cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$* < $< > $@
-targets += int32.c
-$(obj)/int32.c: UNROLL := 32
-$(obj)/int32.c: $(src)/int.uc $(src)/unroll.awk FORCE
+targets += int1.c int2.c int4.c int8.c int16.c int32.c
+$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_altivec1.o += $(altivec_flags)
-targets += altivec1.c
-$(obj)/altivec1.c: UNROLL := 1
-$(obj)/altivec1.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_altivec2.o += $(altivec_flags)
-targets += altivec2.c
-$(obj)/altivec2.c: UNROLL := 2
-$(obj)/altivec2.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_altivec4.o += $(altivec_flags)
-targets += altivec4.c
-$(obj)/altivec4.c: UNROLL := 4
-$(obj)/altivec4.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_altivec8.o += $(altivec_flags)
-targets += altivec8.c
-$(obj)/altivec8.c: UNROLL := 8
-$(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
+targets += altivec1.c altivec2.c altivec4.c altivec8.c
+$(obj)/altivec%.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_vpermxor1.o += $(altivec_flags)
-targets += vpermxor1.c
-$(obj)/vpermxor1.c: UNROLL := 1
-$(obj)/vpermxor1.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_vpermxor2.o += $(altivec_flags)
-targets += vpermxor2.c
-$(obj)/vpermxor2.c: UNROLL := 2
-$(obj)/vpermxor2.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_vpermxor4.o += $(altivec_flags)
-targets += vpermxor4.c
-$(obj)/vpermxor4.c: UNROLL := 4
-$(obj)/vpermxor4.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_vpermxor8.o += $(altivec_flags)
-targets += vpermxor8.c
-$(obj)/vpermxor8.c: UNROLL := 8
-$(obj)/vpermxor8.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+targets += vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
+$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_neon1.o += $(NEON_FLAGS)
-targets += neon1.c
-$(obj)/neon1.c: UNROLL := 1
-$(obj)/neon1.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_neon2.o += $(NEON_FLAGS)
-targets += neon2.c
-$(obj)/neon2.c: UNROLL := 2
-$(obj)/neon2.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_neon4.o += $(NEON_FLAGS)
-targets += neon4.c
-$(obj)/neon4.c: UNROLL := 4
-$(obj)/neon4.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_neon8.o += $(NEON_FLAGS)
-targets += neon8.c
-$(obj)/neon8.c: UNROLL := 8
-$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE
+targets += neon1.c neon2.c neon4.c neon8.c
+$(obj)/neon%.c: $(src)/neon.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += s390vx8.c
-$(obj)/s390vx8.c: UNROLL := 8
-$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
+$(obj)/s390vx%.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
quiet_cmd_mktable = TABLE $@
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index eacb82468437..c2cf2c311b7d 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -179,7 +179,8 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
* __sg_free_table - Free a previously mapped sg table
* @table: The sg table header to use
* @max_ents: The maximum number of entries per single scatterlist
- * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
+ * @nents_first_chunk: Number of entries int the (preallocated) first
+ * scatterlist chunk, 0 means no such preallocated first chunk
* @free_fn: Free function
*
* Description:
@@ -189,9 +190,10 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
*
**/
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
- bool skip_first_chunk, sg_free_fn *free_fn)
+ unsigned int nents_first_chunk, sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
+ unsigned curr_max_ents = nents_first_chunk ?: max_ents;
if (unlikely(!table->sgl))
return;
@@ -207,9 +209,9 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
* sg_size is then one less than alloc size, since the last
* element is the chain pointer.
*/
- if (alloc_size > max_ents) {
- next = sg_chain_ptr(&sgl[max_ents - 1]);
- alloc_size = max_ents;
+ if (alloc_size > curr_max_ents) {
+ next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
+ alloc_size = curr_max_ents;
sg_size = alloc_size - 1;
} else {
sg_size = alloc_size;
@@ -217,11 +219,12 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
}
table->orig_nents -= sg_size;
- if (skip_first_chunk)
- skip_first_chunk = false;
+ if (nents_first_chunk)
+ nents_first_chunk = 0;
else
free_fn(sgl, alloc_size);
sgl = next;
+ curr_max_ents = max_ents;
}
table->sgl = NULL;
@@ -244,6 +247,8 @@ EXPORT_SYMBOL(sg_free_table);
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @max_ents: The maximum number of entries the allocator returns per call
+ * @nents_first_chunk: Number of entries int the (preallocated) first
+ * scatterlist chunk, 0 means no such preallocated chunk provided by user
* @gfp_mask: GFP allocation mask
* @alloc_fn: Allocator to use
*
@@ -260,10 +265,13 @@ EXPORT_SYMBOL(sg_free_table);
**/
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
unsigned int max_ents, struct scatterlist *first_chunk,
- gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
+ unsigned int nents_first_chunk, gfp_t gfp_mask,
+ sg_alloc_fn *alloc_fn)
{
struct scatterlist *sg, *prv;
unsigned int left;
+ unsigned curr_max_ents = nents_first_chunk ?: max_ents;
+ unsigned prv_max_ents;
memset(table, 0, sizeof(*table));
@@ -279,8 +287,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
do {
unsigned int sg_size, alloc_size = left;
- if (alloc_size > max_ents) {
- alloc_size = max_ents;
+ if (alloc_size > curr_max_ents) {
+ alloc_size = curr_max_ents;
sg_size = alloc_size - 1;
} else
sg_size = alloc_size;
@@ -314,7 +322,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
* If this is not the first mapping, chain previous part.
*/
if (prv)
- sg_chain(prv, max_ents, sg);
+ sg_chain(prv, prv_max_ents, sg);
else
table->sgl = sg;
@@ -325,6 +333,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
sg_mark_end(&sg[sg_size - 1]);
prv = sg;
+ prv_max_ents = curr_max_ents;
+ curr_max_ents = max_ents;
} while (left);
return 0;
@@ -347,9 +357,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
int ret;
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
- NULL, gfp_mask, sg_kmalloc);
+ NULL, 0, gfp_mask, sg_kmalloc);
if (unlikely(ret))
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
return ret;
}
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index cff20df2695e..db29e5c1f790 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -70,18 +70,27 @@ static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
/**
* sg_free_table_chained - Free a previously mapped sg table
* @table: The sg table header to use
- * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained?
+ * @nents_first_chunk: size of the first_chunk SGL passed to
+ * sg_alloc_table_chained
*
* Description:
* Free an sg table previously allocated and setup with
* sg_alloc_table_chained().
*
+ * @nents_first_chunk has to be same with that same parameter passed
+ * to sg_alloc_table_chained().
+ *
**/
-void sg_free_table_chained(struct sg_table *table, bool first_chunk)
+void sg_free_table_chained(struct sg_table *table,
+ unsigned nents_first_chunk)
{
- if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE)
+ if (table->orig_nents <= nents_first_chunk)
return;
- __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free);
+
+ if (nents_first_chunk == 1)
+ nents_first_chunk = 0;
+
+ __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free);
}
EXPORT_SYMBOL_GPL(sg_free_table_chained);
@@ -90,31 +99,41 @@ EXPORT_SYMBOL_GPL(sg_free_table_chained);
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @first_chunk: first SGL
+ * @nents_first_chunk: number of the SGL of @first_chunk
*
* Description:
* Allocate and chain SGLs in an sg table. If @nents@ is larger than
- * SG_CHUNK_SIZE a chained sg table will be setup.
+ * @nents_first_chunk a chained sg table will be setup. @first_chunk is
+ * ignored if nents_first_chunk <= 1 because user expects the SGL points
+ * non-chain SGL.
*
**/
int sg_alloc_table_chained(struct sg_table *table, int nents,
- struct scatterlist *first_chunk)
+ struct scatterlist *first_chunk, unsigned nents_first_chunk)
{
int ret;
BUG_ON(!nents);
- if (first_chunk) {
- if (nents <= SG_CHUNK_SIZE) {
+ if (first_chunk && nents_first_chunk) {
+ if (nents <= nents_first_chunk) {
table->nents = table->orig_nents = nents;
sg_init_table(table->sgl, nents);
return 0;
}
}
+ /* User supposes that the 1st SGL includes real entry */
+ if (nents_first_chunk <= 1) {
+ first_chunk = NULL;
+ nents_first_chunk = 0;
+ }
+
ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
- first_chunk, GFP_ATOMIC, sg_pool_alloc);
+ first_chunk, nents_first_chunk,
+ GFP_ATOMIC, sg_pool_alloc);
if (unlikely(ret))
- sg_free_table_chained(table, (bool)first_chunk);
+ sg_free_table_chained(table, nents_first_chunk);
return ret;
}
EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index e3c593c38eff..b63b367a94e8 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -7,16 +7,17 @@
#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
+#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/kasan.h>
#include <linux/kernel.h>
-#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/kasan.h>
/*
* Note: test functions are marked noinline so that their names appear in
@@ -619,6 +620,95 @@ static noinline void __init kasan_strings(void)
strnlen(ptr, 1);
}
+static noinline void __init kasan_bitops(void)
+{
+ /*
+ * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
+ * this way we do not actually corrupt other memory.
+ */
+ long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
+ if (!bits)
+ return;
+
+ /*
+ * Below calls try to access bit within allocated memory; however, the
+ * below accesses are still out-of-bounds, since bitops are defined to
+ * operate on the whole long the bit is in.
+ */
+ pr_info("out-of-bounds in set_bit\n");
+ set_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __set_bit\n");
+ __set_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in clear_bit\n");
+ clear_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __clear_bit\n");
+ __clear_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in clear_bit_unlock\n");
+ clear_bit_unlock(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __clear_bit_unlock\n");
+ __clear_bit_unlock(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in change_bit\n");
+ change_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __change_bit\n");
+ __change_bit(BITS_PER_LONG, bits);
+
+ /*
+ * Below calls try to access bit beyond allocated memory.
+ */
+ pr_info("out-of-bounds in test_and_set_bit\n");
+ test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_set_bit\n");
+ __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_set_bit_lock\n");
+ test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_clear_bit\n");
+ test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_clear_bit\n");
+ __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_change_bit\n");
+ test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_change_bit\n");
+ __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_bit\n");
+ (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+#if defined(clear_bit_unlock_is_negative_byte)
+ pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
+ clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits);
+#endif
+ kfree(bits);
+}
+
+static noinline void __init kmalloc_double_kzfree(void)
+{
+ char *ptr;
+ size_t size = 16;
+
+ pr_info("double-free (kzfree)\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ kzfree(ptr);
+ kzfree(ptr);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -660,6 +750,8 @@ static int __init kmalloc_tests_init(void)
kasan_memchr();
kasan_memcmp();
kasan_strings();
+ kasan_bitops();
+ kmalloc_double_kzfree();
kasan_restore_multi_shot(multishot);