From e21ab36a80c2e011a14149fecdd654b8b47d3e8c Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 28 Oct 2014 15:11:43 -0700 Subject: test: bpf: add a testcase reduced from nmap nmap generates classic BPF programs to filter ARP packets with given target MAC which triggered a bug in eBPF x64 JIT. The bug was fixed in commit e0ee9c12157d ("x86: bpf_jit: fix two bugs in eBPF JIT compiler") This patch is adding a testcase in eBPF instructions (those that were generated by classic->eBPF converter) to be processed by JIT. The test is primarily targeting JIT compiler. Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- lib/test_bpf.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'lib') diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 23e070bcf72d..3f167d2eeb94 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -1756,6 +1756,49 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } } }, + { + "nmap reduced", + .u.insns_int = { + BPF_MOV64_REG(R6, R1), + BPF_LD_ABS(BPF_H, 12), + BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28), + BPF_LD_ABS(BPF_H, 12), + BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26), + BPF_MOV32_IMM(R0, 18), + BPF_STX_MEM(BPF_W, R10, R0, -64), + BPF_LDX_MEM(BPF_W, R7, R10, -64), + BPF_LD_IND(BPF_W, R7, 14), + BPF_STX_MEM(BPF_W, R10, R0, -60), + BPF_MOV32_IMM(R0, 280971478), + BPF_STX_MEM(BPF_W, R10, R0, -56), + BPF_LDX_MEM(BPF_W, R7, R10, -56), + BPF_LDX_MEM(BPF_W, R0, R10, -60), + BPF_ALU32_REG(BPF_SUB, R0, R7), + BPF_JMP_IMM(BPF_JNE, R0, 0, 15), + BPF_LD_ABS(BPF_H, 12), + BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13), + BPF_MOV32_IMM(R0, 22), + BPF_STX_MEM(BPF_W, R10, R0, -56), + BPF_LDX_MEM(BPF_W, R7, R10, -56), + BPF_LD_IND(BPF_H, R7, 14), + BPF_STX_MEM(BPF_W, R10, R0, -52), + BPF_MOV32_IMM(R0, 17366), + BPF_STX_MEM(BPF_W, R10, R0, -48), + BPF_LDX_MEM(BPF_W, R7, R10, -48), + BPF_LDX_MEM(BPF_W, R0, R10, -52), + BPF_ALU32_REG(BPF_SUB, R0, R7), + BPF_JMP_IMM(BPF_JNE, R0, 0, 2), + BPF_MOV32_IMM(R0, 256), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, + { { 38, 256 } } + }, }; static struct net_device dev; -- cgit v1.2.3 From e5a2c899957659cd1a9f789bc462f9c0b35f5150 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Wed, 5 Nov 2014 00:23:04 +0100 Subject: fast_hash: avoid indirect function calls By default the arch_fast_hash hashing function pointers are initialized to jhash(2). If during boot-up a CPU with SSE4.2 is detected they get updated to the CRC32 ones. This dispatching scheme incurs a function pointer lookup and indirect call for every hashing operation. rhashtable as a user of arch_fast_hash e.g. stores pointers to hashing functions in its structure, too, causing two indirect branches per hashing operation. Using alternative_call we can get away with one of those indirect branches. Acked-by: Daniel Borkmann Cc: Thomas Graf Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- arch/x86/include/asm/hash.h | 51 ++++++++++++++++++++++++++++++++++++++++----- arch/x86/lib/hash.c | 29 +++++++++++++++----------- include/asm-generic/hash.h | 36 ++++++++++++++++++++++++++++++-- include/linux/hash.h | 34 ------------------------------ lib/Makefile | 2 +- lib/hash.c | 39 ---------------------------------- 6 files changed, 98 insertions(+), 93 deletions(-) delete mode 100644 lib/hash.c (limited to 'lib') diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h index e8c58f88b1d4..a881d784f044 100644 --- a/arch/x86/include/asm/hash.h +++ b/arch/x86/include/asm/hash.h @@ -1,7 +1,48 @@ -#ifndef _ASM_X86_HASH_H -#define _ASM_X86_HASH_H +#ifndef __ASM_X86_HASH_H +#define __ASM_X86_HASH_H -struct fast_hash_ops; -extern void setup_arch_fast_hash(struct fast_hash_ops *ops); +#include +#include -#endif /* _ASM_X86_HASH_H */ +u32 __intel_crc4_2_hash(const void *data, u32 len, u32 seed); +u32 __intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed); + +/* + * non-inline versions of jhash so gcc does not need to generate + * duplicate code in every object file + */ +u32 __jhash(const void *data, u32 len, u32 seed); +u32 __jhash2(const u32 *data, u32 len, u32 seed); + +/* + * for documentation of these functions please look into + * + */ + +static inline u32 arch_fast_hash(const void *data, u32 len, u32 seed) +{ + u32 hash; + + alternative_call(__jhash, __intel_crc4_2_hash, X86_FEATURE_XMM4_2, +#ifdef CONFIG_X86_64 + "=a" (hash), "D" (data), "S" (len), "d" (seed)); +#else + "=a" (hash), "a" (data), "d" (len), "c" (seed)); +#endif + return hash; +} + +static inline u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) +{ + u32 hash; + + alternative_call(__jhash2, __intel_crc4_2_hash2, X86_FEATURE_XMM4_2, +#ifdef CONFIG_X86_64 + "=a" (hash), "D" (data), "S" (len), "d" (seed)); +#else + "=a" (hash), "a" (data), "d" (len), "c" (seed)); +#endif + return hash; +} + +#endif /* __ASM_X86_HASH_H */ diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c index ff4fa51a5b1f..e14327198835 100644 --- a/arch/x86/lib/hash.c +++ b/arch/x86/lib/hash.c @@ -31,13 +31,13 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include -#include - #include #include #include +#include +#include + static inline u32 crc32_u32(u32 crc, u32 val) { #ifdef CONFIG_AS_CRC32 @@ -48,7 +48,7 @@ static inline u32 crc32_u32(u32 crc, u32 val) return crc; } -static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed) +u32 __intel_crc4_2_hash(const void *data, u32 len, u32 seed) { const u32 *p32 = (const u32 *) data; u32 i, tmp = 0; @@ -71,22 +71,27 @@ static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed) return seed; } +EXPORT_SYMBOL(__intel_crc4_2_hash); -static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed) +u32 __intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed) { - const u32 *p32 = (const u32 *) data; u32 i; for (i = 0; i < len; i++) - seed = crc32_u32(seed, *p32++); + seed = crc32_u32(seed, *data++); return seed; } +EXPORT_SYMBOL(__intel_crc4_2_hash2); -void __init setup_arch_fast_hash(struct fast_hash_ops *ops) +u32 __jhash(const void *data, u32 len, u32 seed) { - if (cpu_has_xmm4_2) { - ops->hash = intel_crc4_2_hash; - ops->hash2 = intel_crc4_2_hash2; - } + return jhash(data, len, seed); +} +EXPORT_SYMBOL(__jhash); + +u32 __jhash2(const u32 *data, u32 len, u32 seed) +{ + return jhash2(data, len, seed); } +EXPORT_SYMBOL(__jhash2); diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h index b6312843dbd9..3c82760ff2a4 100644 --- a/include/asm-generic/hash.h +++ b/include/asm-generic/hash.h @@ -1,9 +1,41 @@ #ifndef __ASM_GENERIC_HASH_H #define __ASM_GENERIC_HASH_H -struct fast_hash_ops; -static inline void setup_arch_fast_hash(struct fast_hash_ops *ops) +#include + +/** + * arch_fast_hash - Caclulates a hash over a given buffer that can have + * arbitrary size. This function will eventually use an + * architecture-optimized hashing implementation if + * available, and trades off distribution for speed. + * + * @data: buffer to hash + * @len: length of buffer in bytes + * @seed: start seed + * + * Returns 32bit hash. + */ +static inline u32 arch_fast_hash(const void *data, u32 len, u32 seed) +{ + return jhash(data, len, seed); +} + +/** + * arch_fast_hash2 - Caclulates a hash over a given buffer that has a + * size that is of a multiple of 32bit words. This + * function will eventually use an architecture- + * optimized hashing implementation if available, + * and trades off distribution for speed. + * + * @data: buffer to hash (must be 32bit padded) + * @len: number of 32bit words + * @seed: start seed + * + * Returns 32bit hash. + */ +static inline u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) { + return jhash2(data, len, seed); } #endif /* __ASM_GENERIC_HASH_H */ diff --git a/include/linux/hash.h b/include/linux/hash.h index d0494c399392..6e8fb028848c 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -84,38 +84,4 @@ static inline u32 hash32_ptr(const void *ptr) return (u32)val; } -struct fast_hash_ops { - u32 (*hash)(const void *data, u32 len, u32 seed); - u32 (*hash2)(const u32 *data, u32 len, u32 seed); -}; - -/** - * arch_fast_hash - Caclulates a hash over a given buffer that can have - * arbitrary size. This function will eventually use an - * architecture-optimized hashing implementation if - * available, and trades off distribution for speed. - * - * @data: buffer to hash - * @len: length of buffer in bytes - * @seed: start seed - * - * Returns 32bit hash. - */ -extern u32 arch_fast_hash(const void *data, u32 len, u32 seed); - -/** - * arch_fast_hash2 - Caclulates a hash over a given buffer that has a - * size that is of a multiple of 32bit words. This - * function will eventually use an architecture- - * optimized hashing implementation if available, - * and trades off distribution for speed. - * - * @data: buffer to hash (must be 32bit padded) - * @len: number of 32bit words - * @seed: start seed - * - * Returns 32bit hash. - */ -extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed); - #endif /* _LINUX_HASH_H */ diff --git a/lib/Makefile b/lib/Makefile index 7512dc978f18..04e53dd16070 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o hash.o rhashtable.o + percpu-refcount.o percpu_ida.o rhashtable.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o diff --git a/lib/hash.c b/lib/hash.c deleted file mode 100644 index fea973f4bd57..000000000000 --- a/lib/hash.c +++ /dev/null @@ -1,39 +0,0 @@ -/* General purpose hashing library - * - * That's a start of a kernel hashing library, which can be extended - * with further algorithms in future. arch_fast_hash{2,}() will - * eventually resolve to an architecture optimized implementation. - * - * Copyright 2013 Francesco Fusco - * Copyright 2013 Daniel Borkmann - * Copyright 2013 Thomas Graf - * Licensed under the GNU General Public License, version 2.0 (GPLv2) - */ - -#include -#include -#include - -static struct fast_hash_ops arch_hash_ops __read_mostly = { - .hash = jhash, - .hash2 = jhash2, -}; - -u32 arch_fast_hash(const void *data, u32 len, u32 seed) -{ - return arch_hash_ops.hash(data, len, seed); -} -EXPORT_SYMBOL_GPL(arch_fast_hash); - -u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) -{ - return arch_hash_ops.hash2(data, len, seed); -} -EXPORT_SYMBOL_GPL(arch_fast_hash2); - -static int __init hashlib_init(void) -{ - setup_arch_fast_hash(&arch_hash_ops); - return 0; -} -early_initcall(hashlib_init); -- cgit v1.2.3 From 1b2f309d70daf04b6a97b3753e375654532f6207 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 13 Nov 2014 18:11:20 +0800 Subject: rhashtable: Move mutex_is_held under PROVE_LOCKING The rhashtable function mutex_is_held is only used when PROVE_LOCKING is enabled. This patch makes the mutex_is_held field in rhashtable optional depending on PROVE_LOCKING. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 2 ++ lib/rhashtable.c | 8 ++++++++ 2 files changed, 10 insertions(+) (limited to 'lib') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index fb298e9d6d3a..96ce8ceff554 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -65,7 +65,9 @@ struct rhashtable_params { size_t new_size); bool (*shrink_decision)(const struct rhashtable *ht, size_t new_size); +#ifdef CONFIG_PROVE_LOCKING int (*mutex_is_held)(void); +#endif }; /** diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 081be3ba9ea8..c7654b6f5f64 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -532,7 +532,9 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = arch_fast_hash, + * #ifdef CONFIG_PROVE_LOCKING * .mutex_is_held = &my_mutex_is_held, + * #endif * }; * * Configuration Example 2: Variable length keys @@ -552,7 +554,9 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .head_offset = offsetof(struct test_obj, node), * .hashfn = arch_fast_hash, * .obj_hashfn = my_hash_fn, + * #ifdef CONFIG_PROVE_LOCKING * .mutex_is_held = &my_mutex_is_held, + * #endif * }; */ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) @@ -613,10 +617,12 @@ EXPORT_SYMBOL_GPL(rhashtable_destroy); #define TEST_PTR ((void *) 0xdeadbeef) #define TEST_NEXPANDS 4 +#ifdef CONFIG_PROVE_LOCKING static int test_mutex_is_held(void) { return 1; } +#endif struct test_obj { void *ptr; @@ -767,7 +773,9 @@ static int __init test_rht_init(void) .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), .hashfn = arch_fast_hash, +#ifdef CONFIG_PROVE_LOCKING .mutex_is_held = &test_mutex_is_held, +#endif .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, }; -- cgit v1.2.3 From 7b4ce2353467fdab6e003be7a3129fb09b09deac Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 13 Nov 2014 18:11:22 +0800 Subject: rhashtable: Add parent argument to mutex_is_held Currently mutex_is_held can only test locks in the that are global since it takes no arguments. This prevents rhashtable from being used in places where locks are lock, e.g., per-namespace locks. This patch adds a parent field to mutex_is_held and rhashtable_params so that local locks can be used (and tested). Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 3 ++- lib/rhashtable.c | 4 ++-- net/netfilter/nft_hash.c | 2 +- net/netlink/af_netlink.c | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 96ce8ceff554..473e26bdb91d 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -66,7 +66,8 @@ struct rhashtable_params { bool (*shrink_decision)(const struct rhashtable *ht, size_t new_size); #ifdef CONFIG_PROVE_LOCKING - int (*mutex_is_held)(void); + int (*mutex_is_held)(void *parent); + void *parent; #endif }; diff --git a/lib/rhashtable.c b/lib/rhashtable.c index c7654b6f5f64..4b4b53bfa08b 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -32,7 +32,7 @@ #ifdef CONFIG_PROVE_LOCKING int lockdep_rht_mutex_is_held(const struct rhashtable *ht) { - return ht->p.mutex_is_held(); + return ht->p.mutex_is_held(ht->p.parent); } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); #endif @@ -618,7 +618,7 @@ EXPORT_SYMBOL_GPL(rhashtable_destroy); #define TEST_NEXPANDS 4 #ifdef CONFIG_PROVE_LOCKING -static int test_mutex_is_held(void) +static int test_mutex_is_held(void *parent) { return 1; } diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index b86305c86048..3f75aaaf9d06 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -154,7 +154,7 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[]) } #ifdef CONFIG_PROVE_LOCKING -static int lockdep_nfnl_lock_is_held(void) +static int lockdep_nfnl_lock_is_held(void *parent) { return lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES); } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 53b8ea793191..9e0628cfdf67 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -115,7 +115,7 @@ DEFINE_MUTEX(nl_sk_hash_lock); EXPORT_SYMBOL_GPL(nl_sk_hash_lock); #ifdef CONFIG_PROVE_LOCKING -static int lockdep_nl_sk_hash_is_held(void) +static int lockdep_nl_sk_hash_is_held(void *parent) { if (debug_locks) return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock); -- cgit v1.2.3 From 6eba82248ef47fd478f940a418429e3ec95cb3db Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 13 Nov 2014 13:45:46 +0100 Subject: rhashtable: Drop gfp_flags arg in insert/remove functions Reallocation is only required for shrinking and expanding and both rely on a mutex for synchronization and callers of rhashtable_init() are in non atomic context. Therefore, no reason to continue passing allocation hints through the API. Instead, use GFP_KERNEL and add __GFP_NOWARN | __GFP_NORETRY to allow for silent fall back to vzalloc() without the OOM killer jumping in as pointed out by Eric Dumazet and Eric W. Biederman. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 10 +++++----- lib/rhashtable.c | 41 +++++++++++++++++------------------------ net/netfilter/nft_hash.c | 4 ++-- net/netlink/af_netlink.c | 4 ++-- 4 files changed, 26 insertions(+), 33 deletions(-) (limited to 'lib') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 473e26bdb91d..b93fd89b2e5e 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -99,16 +99,16 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); -bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); +void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); +bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev, gfp_t flags); + struct rhash_head __rcu **pprev); bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); -int rhashtable_expand(struct rhashtable *ht, gfp_t flags); -int rhashtable_shrink(struct rhashtable *ht, gfp_t flags); +int rhashtable_expand(struct rhashtable *ht); +int rhashtable_shrink(struct rhashtable *ht); void *rhashtable_lookup(const struct rhashtable *ht, const void *key); void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 4b4b53bfa08b..25e4c213b08a 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -107,13 +107,13 @@ static u32 head_hashfn(const struct rhashtable *ht, return obj_hashfn(ht, rht_obj(ht, he), hsize); } -static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags) +static struct bucket_table *bucket_table_alloc(size_t nbuckets) { struct bucket_table *tbl; size_t size; size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); - tbl = kzalloc(size, flags); + tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); if (tbl == NULL) tbl = vzalloc(size); @@ -200,7 +200,6 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, /** * rhashtable_expand - Expand hash table while allowing concurrent lookups * @ht: the hash table to expand - * @flags: allocation flags * * A secondary bucket array is allocated and the hash entries are migrated * while keeping them on both lists until the end of the RCU grace period. @@ -211,7 +210,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. */ -int rhashtable_expand(struct rhashtable *ht, gfp_t flags) +int rhashtable_expand(struct rhashtable *ht) { struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_head *he; @@ -223,7 +222,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags) if (ht->p.max_shift && ht->shift >= ht->p.max_shift) return 0; - new_tbl = bucket_table_alloc(old_tbl->size * 2, flags); + new_tbl = bucket_table_alloc(old_tbl->size * 2); if (new_tbl == NULL) return -ENOMEM; @@ -281,7 +280,6 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); /** * rhashtable_shrink - Shrink hash table while allowing concurrent lookups * @ht: the hash table to shrink - * @flags: allocation flags * * This function may only be called in a context where it is safe to call * synchronize_rcu(), e.g. not within a rcu_read_lock() section. @@ -289,7 +287,7 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. */ -int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) +int rhashtable_shrink(struct rhashtable *ht) { struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); struct rhash_head __rcu **pprev; @@ -300,7 +298,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) if (ht->shift <= ht->p.min_shift) return 0; - ntbl = bucket_table_alloc(tbl->size / 2, flags); + ntbl = bucket_table_alloc(tbl->size / 2); if (ntbl == NULL) return -ENOMEM; @@ -341,7 +339,6 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink); * rhashtable_insert - insert object into hash hash table * @ht: hash table * @obj: pointer to hash head inside object - * @flags: allocation flags (table expansion) * * Will automatically grow the table via rhashtable_expand() if the the * grow_decision function specified at rhashtable_init() returns true. @@ -349,8 +346,7 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink); * The caller must ensure that no concurrent table mutations occur. It is * however valid to have concurrent lookups if they are RCU protected. */ -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, - gfp_t flags) +void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) { struct bucket_table *tbl = rht_dereference(ht->tbl, ht); u32 hash; @@ -363,7 +359,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, ht->nelems++; if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) - rhashtable_expand(ht, flags); + rhashtable_expand(ht); } EXPORT_SYMBOL_GPL(rhashtable_insert); @@ -372,14 +368,13 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); * @ht: hash table * @obj: pointer to hash head inside object * @pprev: pointer to previous element - * @flags: allocation flags (table expansion) * * Identical to rhashtable_remove() but caller is alreayd aware of the element * in front of the element to be deleted. This is in particular useful for * deletion when combined with walking or lookup. */ void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev, gfp_t flags) + struct rhash_head __rcu **pprev) { struct bucket_table *tbl = rht_dereference(ht->tbl, ht); @@ -390,7 +385,7 @@ void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) - rhashtable_shrink(ht, flags); + rhashtable_shrink(ht); } EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); @@ -398,7 +393,6 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); * rhashtable_remove - remove object from hash table * @ht: hash table * @obj: pointer to hash head inside object - * @flags: allocation flags (table expansion) * * Since the hash chain is single linked, the removal operation needs to * walk the bucket chain upon removal. The removal operation is thus @@ -410,8 +404,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); * The caller must ensure that no concurrent table mutations occur. It is * however valid to have concurrent lookups if they are RCU protected. */ -bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, - gfp_t flags) +bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) { struct bucket_table *tbl = rht_dereference(ht->tbl, ht); struct rhash_head __rcu **pprev; @@ -429,7 +422,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, continue; } - rhashtable_remove_pprev(ht, he, pprev, flags); + rhashtable_remove_pprev(ht, he, pprev); return true; } @@ -576,7 +569,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) if (params->nelem_hint) size = rounded_hashtable_size(params); - tbl = bucket_table_alloc(size, GFP_KERNEL); + tbl = bucket_table_alloc(size); if (tbl == NULL) return -ENOMEM; @@ -713,7 +706,7 @@ static int __init test_rhashtable(struct rhashtable *ht) obj->ptr = TEST_PTR; obj->value = i * 2; - rhashtable_insert(ht, &obj->node, GFP_KERNEL); + rhashtable_insert(ht, &obj->node); } rcu_read_lock(); @@ -724,7 +717,7 @@ static int __init test_rhashtable(struct rhashtable *ht) for (i = 0; i < TEST_NEXPANDS; i++) { pr_info(" Table expansion iteration %u...\n", i); - rhashtable_expand(ht, GFP_KERNEL); + rhashtable_expand(ht); rcu_read_lock(); pr_info(" Verifying lookups...\n"); @@ -734,7 +727,7 @@ static int __init test_rhashtable(struct rhashtable *ht) for (i = 0; i < TEST_NEXPANDS; i++) { pr_info(" Table shrinkage iteration %u...\n", i); - rhashtable_shrink(ht, GFP_KERNEL); + rhashtable_shrink(ht); rcu_read_lock(); pr_info(" Verifying lookups...\n"); @@ -749,7 +742,7 @@ static int __init test_rhashtable(struct rhashtable *ht) obj = rhashtable_lookup(ht, &key); BUG_ON(!obj); - rhashtable_remove(ht, &obj->node, GFP_KERNEL); + rhashtable_remove(ht, &obj->node); kfree(obj); } diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 3f75aaaf9d06..1e316ce4cb5d 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -65,7 +65,7 @@ static int nft_hash_insert(const struct nft_set *set, if (set->flags & NFT_SET_MAP) nft_data_copy(he->data, &elem->data); - rhashtable_insert(priv, &he->node, GFP_KERNEL); + rhashtable_insert(priv, &he->node); return 0; } @@ -88,7 +88,7 @@ static void nft_hash_remove(const struct nft_set *set, pprev = elem->cookie; he = rht_dereference((*pprev), priv); - rhashtable_remove_pprev(priv, he, pprev, GFP_KERNEL); + rhashtable_remove_pprev(priv, he, pprev); synchronize_rcu(); kfree(he); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 9e0628cfdf67..a491c1a4861f 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1092,7 +1092,7 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid) nlk_sk(sk)->portid = portid; sock_hold(sk); - rhashtable_insert(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL); + rhashtable_insert(&table->hash, &nlk_sk(sk)->node); err = 0; err: mutex_unlock(&nl_sk_hash_lock); @@ -1105,7 +1105,7 @@ static void netlink_remove(struct sock *sk) mutex_lock(&nl_sk_hash_lock); table = &nl_table[sk->sk_protocol]; - if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL)) { + if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) { WARN_ON(atomic_read(&sk->sk_refcnt) == 1); __sock_put(sk); } -- cgit v1.2.3 From a77f9c5dcdf8480a93332792c336fa2bf9d31229 Mon Sep 17 00:00:00 2001 From: Jay Vosburgh Date: Fri, 14 Nov 2014 11:05:06 -0800 Subject: Revert "fast_hash: avoid indirect function calls" This reverts commit e5a2c899957659cd1a9f789bc462f9c0b35f5150. Commit e5a2c899 introduced an alternative_call, arch_fast_hash2, that selects between __jhash2 and __intel_crc4_2_hash based on the X86_FEATURE_XMM4_2. Unfortunately, the alternative_call system does not appear to be suitable for use with C functions, as register usage is not handled properly for the called functions. The __jhash2 function in particular clobbers registers that are not preserved when called via alternative_call, resulting in a panic for direct callers of arch_fast_hash2 on older CPUs lacking sse4_2. It is possible that __intel_crc4_2_hash works merely by chance because it uses fewer registers. This commit was suggested as the source of the problem by Jesse Gross . Signed-off-by: Jay Vosburgh Signed-off-by: David S. Miller --- arch/x86/include/asm/hash.h | 51 +++++---------------------------------------- arch/x86/lib/hash.c | 29 +++++++++++--------------- include/asm-generic/hash.h | 36 ++------------------------------ include/linux/hash.h | 34 ++++++++++++++++++++++++++++++ lib/Makefile | 2 +- lib/hash.c | 39 ++++++++++++++++++++++++++++++++++ 6 files changed, 93 insertions(+), 98 deletions(-) create mode 100644 lib/hash.c (limited to 'lib') diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h index a881d784f044..e8c58f88b1d4 100644 --- a/arch/x86/include/asm/hash.h +++ b/arch/x86/include/asm/hash.h @@ -1,48 +1,7 @@ -#ifndef __ASM_X86_HASH_H -#define __ASM_X86_HASH_H +#ifndef _ASM_X86_HASH_H +#define _ASM_X86_HASH_H -#include -#include +struct fast_hash_ops; +extern void setup_arch_fast_hash(struct fast_hash_ops *ops); -u32 __intel_crc4_2_hash(const void *data, u32 len, u32 seed); -u32 __intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed); - -/* - * non-inline versions of jhash so gcc does not need to generate - * duplicate code in every object file - */ -u32 __jhash(const void *data, u32 len, u32 seed); -u32 __jhash2(const u32 *data, u32 len, u32 seed); - -/* - * for documentation of these functions please look into - * - */ - -static inline u32 arch_fast_hash(const void *data, u32 len, u32 seed) -{ - u32 hash; - - alternative_call(__jhash, __intel_crc4_2_hash, X86_FEATURE_XMM4_2, -#ifdef CONFIG_X86_64 - "=a" (hash), "D" (data), "S" (len), "d" (seed)); -#else - "=a" (hash), "a" (data), "d" (len), "c" (seed)); -#endif - return hash; -} - -static inline u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) -{ - u32 hash; - - alternative_call(__jhash2, __intel_crc4_2_hash2, X86_FEATURE_XMM4_2, -#ifdef CONFIG_X86_64 - "=a" (hash), "D" (data), "S" (len), "d" (seed)); -#else - "=a" (hash), "a" (data), "d" (len), "c" (seed)); -#endif - return hash; -} - -#endif /* __ASM_X86_HASH_H */ +#endif /* _ASM_X86_HASH_H */ diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c index e14327198835..ff4fa51a5b1f 100644 --- a/arch/x86/lib/hash.c +++ b/arch/x86/lib/hash.c @@ -31,13 +31,13 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include +#include + #include #include #include -#include -#include - static inline u32 crc32_u32(u32 crc, u32 val) { #ifdef CONFIG_AS_CRC32 @@ -48,7 +48,7 @@ static inline u32 crc32_u32(u32 crc, u32 val) return crc; } -u32 __intel_crc4_2_hash(const void *data, u32 len, u32 seed) +static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed) { const u32 *p32 = (const u32 *) data; u32 i, tmp = 0; @@ -71,27 +71,22 @@ u32 __intel_crc4_2_hash(const void *data, u32 len, u32 seed) return seed; } -EXPORT_SYMBOL(__intel_crc4_2_hash); -u32 __intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed) +static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed) { + const u32 *p32 = (const u32 *) data; u32 i; for (i = 0; i < len; i++) - seed = crc32_u32(seed, *data++); + seed = crc32_u32(seed, *p32++); return seed; } -EXPORT_SYMBOL(__intel_crc4_2_hash2); -u32 __jhash(const void *data, u32 len, u32 seed) +void __init setup_arch_fast_hash(struct fast_hash_ops *ops) { - return jhash(data, len, seed); -} -EXPORT_SYMBOL(__jhash); - -u32 __jhash2(const u32 *data, u32 len, u32 seed) -{ - return jhash2(data, len, seed); + if (cpu_has_xmm4_2) { + ops->hash = intel_crc4_2_hash; + ops->hash2 = intel_crc4_2_hash2; + } } -EXPORT_SYMBOL(__jhash2); diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h index 3c82760ff2a4..b6312843dbd9 100644 --- a/include/asm-generic/hash.h +++ b/include/asm-generic/hash.h @@ -1,41 +1,9 @@ #ifndef __ASM_GENERIC_HASH_H #define __ASM_GENERIC_HASH_H -#include - -/** - * arch_fast_hash - Caclulates a hash over a given buffer that can have - * arbitrary size. This function will eventually use an - * architecture-optimized hashing implementation if - * available, and trades off distribution for speed. - * - * @data: buffer to hash - * @len: length of buffer in bytes - * @seed: start seed - * - * Returns 32bit hash. - */ -static inline u32 arch_fast_hash(const void *data, u32 len, u32 seed) -{ - return jhash(data, len, seed); -} - -/** - * arch_fast_hash2 - Caclulates a hash over a given buffer that has a - * size that is of a multiple of 32bit words. This - * function will eventually use an architecture- - * optimized hashing implementation if available, - * and trades off distribution for speed. - * - * @data: buffer to hash (must be 32bit padded) - * @len: number of 32bit words - * @seed: start seed - * - * Returns 32bit hash. - */ -static inline u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) +struct fast_hash_ops; +static inline void setup_arch_fast_hash(struct fast_hash_ops *ops) { - return jhash2(data, len, seed); } #endif /* __ASM_GENERIC_HASH_H */ diff --git a/include/linux/hash.h b/include/linux/hash.h index 6e8fb028848c..d0494c399392 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -84,4 +84,38 @@ static inline u32 hash32_ptr(const void *ptr) return (u32)val; } +struct fast_hash_ops { + u32 (*hash)(const void *data, u32 len, u32 seed); + u32 (*hash2)(const u32 *data, u32 len, u32 seed); +}; + +/** + * arch_fast_hash - Caclulates a hash over a given buffer that can have + * arbitrary size. This function will eventually use an + * architecture-optimized hashing implementation if + * available, and trades off distribution for speed. + * + * @data: buffer to hash + * @len: length of buffer in bytes + * @seed: start seed + * + * Returns 32bit hash. + */ +extern u32 arch_fast_hash(const void *data, u32 len, u32 seed); + +/** + * arch_fast_hash2 - Caclulates a hash over a given buffer that has a + * size that is of a multiple of 32bit words. This + * function will eventually use an architecture- + * optimized hashing implementation if available, + * and trades off distribution for speed. + * + * @data: buffer to hash (must be 32bit padded) + * @len: number of 32bit words + * @seed: start seed + * + * Returns 32bit hash. + */ +extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed); + #endif /* _LINUX_HASH_H */ diff --git a/lib/Makefile b/lib/Makefile index 04e53dd16070..7512dc978f18 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o rhashtable.o + percpu-refcount.o percpu_ida.o hash.o rhashtable.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o diff --git a/lib/hash.c b/lib/hash.c new file mode 100644 index 000000000000..fea973f4bd57 --- /dev/null +++ b/lib/hash.c @@ -0,0 +1,39 @@ +/* General purpose hashing library + * + * That's a start of a kernel hashing library, which can be extended + * with further algorithms in future. arch_fast_hash{2,}() will + * eventually resolve to an architecture optimized implementation. + * + * Copyright 2013 Francesco Fusco + * Copyright 2013 Daniel Borkmann + * Copyright 2013 Thomas Graf + * Licensed under the GNU General Public License, version 2.0 (GPLv2) + */ + +#include +#include +#include + +static struct fast_hash_ops arch_hash_ops __read_mostly = { + .hash = jhash, + .hash2 = jhash2, +}; + +u32 arch_fast_hash(const void *data, u32 len, u32 seed) +{ + return arch_hash_ops.hash(data, len, seed); +} +EXPORT_SYMBOL_GPL(arch_fast_hash); + +u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) +{ + return arch_hash_ops.hash2(data, len, seed); +} +EXPORT_SYMBOL_GPL(arch_fast_hash2); + +static int __init hashlib_init(void) +{ + setup_arch_fast_hash(&arch_hash_ops); + return 0; +} +early_initcall(hashlib_init); -- cgit v1.2.3 From 3e7b2ec4fe8ef4b05b33db3e84d1b1fbccde250e Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Mon, 24 Nov 2014 12:37:58 +0100 Subject: rhashtable: Check for count mismatch while iterating in selftest Verify whether both the lock and RCU protected iterators see all test entries before and after expanding and shrinking has been performed. Also verify whether the number of entries in the hashtable remains stable during expansion and shrinking. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index e5f5e69c7a7b..c7e987ab3361 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -653,15 +653,15 @@ static int __init test_rht_lookup(struct rhashtable *ht) return 0; } -static void test_bucket_stats(struct rhashtable *ht, - struct bucket_table *tbl, - bool quiet) +static void test_bucket_stats(struct rhashtable *ht, bool quiet) { - unsigned int cnt, i, total = 0; + unsigned int cnt, rcu_cnt, i, total = 0; struct test_obj *obj; + struct bucket_table *tbl; + tbl = rht_dereference_rcu(ht->tbl, ht); for (i = 0; i < tbl->size; i++) { - cnt = 0; + rcu_cnt = cnt = 0; if (!quiet) pr_info(" [%#4x/%zu]", i, tbl->size); @@ -673,6 +673,13 @@ static void test_bucket_stats(struct rhashtable *ht, pr_cont(" [%p],", obj); } + rht_for_each_entry_rcu(obj, tbl->buckets[i], node) + rcu_cnt++; + + if (rcu_cnt != cnt) + pr_warn("Test failed: Chain count mismach %d != %d", + cnt, rcu_cnt); + if (!quiet) pr_cont("\n [%#x] first element: %p, chain length: %u\n", i, tbl->buckets[i], cnt); @@ -680,6 +687,9 @@ static void test_bucket_stats(struct rhashtable *ht, pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", total, ht->nelems, TEST_ENTRIES); + + if (total != ht->nelems || total != TEST_ENTRIES) + pr_warn("Test failed: Total count mismatch ^^^"); } static int __init test_rhashtable(struct rhashtable *ht) @@ -710,8 +720,7 @@ static int __init test_rhashtable(struct rhashtable *ht) } rcu_read_lock(); - tbl = rht_dereference_rcu(ht->tbl, ht); - test_bucket_stats(ht, tbl, true); + test_bucket_stats(ht, true); test_rht_lookup(ht); rcu_read_unlock(); @@ -735,6 +744,10 @@ static int __init test_rhashtable(struct rhashtable *ht) rcu_read_unlock(); } + rcu_read_lock(); + test_bucket_stats(ht, true); + rcu_read_unlock(); + pr_info(" Deleting %d keys\n", TEST_ENTRIES); for (i = 0; i < TEST_ENTRIES; i++) { u32 key = i * 2; -- cgit v1.2.3 From 6867b17b26d80cfd419e491141feb75082915979 Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Mon, 1 Dec 2014 13:12:25 +0300 Subject: test: bpf: expand DIV_KX to DIV_MOD_KX Expand DIV_KX to use BPF_MOD operation in the DIV_KX bpf 'classic' test. CC: Alexei Starovoitov Signed-off-by: Denis Kirjanov Signed-off-by: David S. Miller --- lib/test_bpf.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 3f167d2eeb94..80d78c51f65f 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -124,7 +124,7 @@ static struct bpf_test tests[] = { { { 0, 0xfffffffd } } }, { - "DIV_KX", + "DIV_MOD_KX", .u.insns = { BPF_STMT(BPF_LD | BPF_IMM, 8), BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2), @@ -134,12 +134,18 @@ static struct bpf_test tests[] = { BPF_STMT(BPF_MISC | BPF_TAX, 0), BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), + BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), + BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x70000000), BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), BPF_STMT(BPF_RET | BPF_A, 0) }, CLASSIC | FLAG_NO_DATA, { }, - { { 0, 0x40000001 } } + { { 0, 0x20000000 } } }, { "AND_OR_LSH_K", -- cgit v1.2.3 From 218321e7a0838c2be974539f0a5341b398d4432b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 24 Nov 2014 19:45:05 -0500 Subject: bury memcpy_toiovec() no users left Signed-off-by: Al Viro --- include/linux/uio.h | 1 - lib/iovec.c | 25 ------------------------- 2 files changed, 26 deletions(-) (limited to 'lib') diff --git a/include/linux/uio.h b/include/linux/uio.h index bd8569a14c4a..a41e252396c0 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -131,7 +131,6 @@ size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); -int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, int offset, int len); int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, diff --git a/lib/iovec.c b/lib/iovec.c index df3abd1eaa4a..2d99cb4a5006 100644 --- a/lib/iovec.c +++ b/lib/iovec.c @@ -27,31 +27,6 @@ int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len) } EXPORT_SYMBOL(memcpy_fromiovec); -/* - * Copy kernel to iovec. Returns -EFAULT on error. - * - * Note: this modifies the original iovec. - */ - -int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len) -{ - while (len > 0) { - if (iov->iov_len) { - int copy = min_t(unsigned int, iov->iov_len, len); - if (copy_to_user(iov->iov_base, kdata, copy)) - return -EFAULT; - kdata += copy; - len -= copy; - iov->iov_len -= copy; - iov->iov_base += copy; - } - iov++; - } - - return 0; -} -EXPORT_SYMBOL(memcpy_toiovec); - /* * Copy kernel to iovec. Returns -EFAULT on error. */ -- cgit v1.2.3 From 87545899b52f9c8b1621be4347f443890c0cb196 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 10 Dec 2014 16:33:11 +0100 Subject: net: replace remaining users of arch_fast_hash with jhash This patch effectively reverts commit 500f80872645 ("net: ovs: use CRC32 accelerated flow hash if available"), and other remaining arch_fast_hash() users such as from nfsd via commit 6282cd565553 ("NFSD: Don't hand out delegations for 30 seconds after recalling them.") where it has been used as a hash function for bloom filtering. While we think that these users are actually not much of concern, it has been requested to remove the arch_fast_hash() library bits that arose from [1] entirely as per recent discussion [2]. The main argument is that using it as a hash may introduce bias due to its linearity (see avalanche criterion) and thus makes it less clear (though we tried to document that) when this security/performance trade-off is actually acceptable for a general purpose library function. Lets therefore avoid any further confusion on this matter and remove it to prevent any future accidental misuse of it. For the time being, this is going to make hashing of flow keys a bit more expensive in the ovs case, but future work could reevaluate a different hashing discipline. [1] https://patchwork.ozlabs.org/patch/299369/ [2] https://patchwork.ozlabs.org/patch/418756/ Cc: Neil Brown Cc: Francesco Fusco Cc: Jesse Gross Cc: Thomas Graf Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- fs/nfsd/nfs4state.c | 6 +++--- lib/rhashtable.c | 8 ++++---- net/openvswitch/flow_table.c | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index e9c3afe4b5d3..4e1d7268b004 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include "xdr4.h" #include "xdr4cb.h" #include "vfs.h" @@ -594,7 +594,7 @@ static int delegation_blocked(struct knfsd_fh *fh) } spin_unlock(&blocked_delegations_lock); } - hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0); + hash = jhash(&fh->fh_base, fh->fh_size, 0); if (test_bit(hash&255, bd->set[0]) && test_bit((hash>>8)&255, bd->set[0]) && test_bit((hash>>16)&255, bd->set[0])) @@ -613,7 +613,7 @@ static void block_delegations(struct knfsd_fh *fh) u32 hash; struct bloom_pair *bd = &blocked_delegations; - hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0); + hash = jhash(&fh->fh_base, fh->fh_size, 0); spin_lock(&blocked_delegations_lock); __set_bit(hash&255, bd->set[bd->new]); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index c7e987ab3361..6c3c723e902b 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -524,7 +524,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .head_offset = offsetof(struct test_obj, node), * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), - * .hashfn = arch_fast_hash, + * .hashfn = jhash, * #ifdef CONFIG_PROVE_LOCKING * .mutex_is_held = &my_mutex_is_held, * #endif @@ -545,7 +545,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), - * .hashfn = arch_fast_hash, + * .hashfn = jhash, * .obj_hashfn = my_hash_fn, * #ifdef CONFIG_PROVE_LOCKING * .mutex_is_held = &my_mutex_is_held, @@ -778,7 +778,7 @@ static int __init test_rht_init(void) .head_offset = offsetof(struct test_obj, node), .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), - .hashfn = arch_fast_hash, + .hashfn = jhash, #ifdef CONFIG_PROVE_LOCKING .mutex_is_held = &test_mutex_is_held, #endif diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index e0a7fefc1edf..5899bf161c61 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include @@ -366,7 +366,7 @@ static u32 flow_hash(const struct sw_flow_key *key, int key_start, /* Make sure number of hash bytes are multiple of u32. */ BUILD_BUG_ON(sizeof(long) % sizeof(u32)); - return arch_fast_hash2(hash_key, hash_u32s, 0); + return jhash2(hash_key, hash_u32s, 0); } static int flow_key_start(const struct sw_flow_key *key) -- cgit v1.2.3 From 0cb6c969ed9de43687abdfc63714b6fe4385d2fc Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 10 Dec 2014 16:33:12 +0100 Subject: net, lib: kill arch_fast_hash library bits As there are now no remaining users of arch_fast_hash(), lets kill it entirely. This basically reverts commit 71ae8aac3e19 ("lib: introduce arch optimized hash library") and follow-up work, that is f.e., commit 237217546d44 ("lib: hash: follow-up fixups for arch hash"), commit e3fec2f74f7f ("lib: Add missing arch generic-y entries for asm-generic/hash.h") and last but not least commit 6a02652df511 ("perf tools: Fix include for non x86 architectures"). Cc: Francesco Fusco Cc: Thomas Graf Cc: Arnaldo Carvalho de Melo Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- arch/alpha/include/asm/Kbuild | 1 - arch/arc/include/asm/Kbuild | 1 - arch/arm/include/asm/Kbuild | 1 - arch/arm64/include/asm/Kbuild | 1 - arch/avr32/include/asm/Kbuild | 1 - arch/blackfin/include/asm/Kbuild | 1 - arch/c6x/include/asm/Kbuild | 1 - arch/cris/include/asm/Kbuild | 1 - arch/frv/include/asm/Kbuild | 1 - arch/hexagon/include/asm/Kbuild | 1 - arch/ia64/include/asm/Kbuild | 1 - arch/m32r/include/asm/Kbuild | 1 - arch/m68k/include/asm/Kbuild | 1 - arch/metag/include/asm/Kbuild | 1 - arch/microblaze/include/asm/Kbuild | 1 - arch/mips/include/asm/Kbuild | 1 - arch/mn10300/include/asm/Kbuild | 1 - arch/openrisc/include/asm/Kbuild | 1 - arch/parisc/include/asm/Kbuild | 1 - arch/powerpc/include/asm/Kbuild | 1 - arch/s390/include/asm/Kbuild | 1 - arch/score/include/asm/Kbuild | 1 - arch/sh/include/asm/Kbuild | 1 - arch/sparc/include/asm/Kbuild | 1 - arch/tile/include/asm/Kbuild | 1 - arch/um/include/asm/Kbuild | 1 - arch/unicore32/include/asm/Kbuild | 1 - arch/x86/include/asm/hash.h | 7 --- arch/x86/lib/Makefile | 2 +- arch/x86/lib/hash.c | 92 -------------------------------------- arch/xtensa/include/asm/Kbuild | 1 - include/asm-generic/hash.h | 9 ---- include/linux/hash.h | 35 --------------- lib/Makefile | 2 +- lib/hash.c | 39 ---------------- tools/perf/util/include/asm/hash.h | 6 --- 36 files changed, 2 insertions(+), 218 deletions(-) delete mode 100644 arch/x86/include/asm/hash.h delete mode 100644 arch/x86/lib/hash.c delete mode 100644 include/asm-generic/hash.h delete mode 100644 lib/hash.c delete mode 100644 tools/perf/util/include/asm/hash.h (limited to 'lib') diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index 25b49725df07..76aeb8fa551a 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -3,7 +3,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index b8fffc1a2ac2..be0c39e76f7c 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -12,7 +12,6 @@ generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 70cd84eb7fda..fe74c0d1e485 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -7,7 +7,6 @@ generic-y += current.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h -generic-y += hash.h generic-y += ioctl.h generic-y += ipcbuf.h generic-y += irq_regs.h diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index dc770bd4f5a5..6b61091c7f4c 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -14,7 +14,6 @@ generic-y += early_ioremap.h generic-y += emergency-restart.h generic-y += errno.h generic-y += ftrace.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index 2a71b1cb9848..528d70d47a54 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -7,7 +7,6 @@ generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h generic-y += futex.h -generic-y += hash.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += local.h diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 46ed6bb9c679..4bd3c3cfc9ab 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -10,7 +10,6 @@ generic-y += emergency-restart.h generic-y += errno.h generic-y += fb.h generic-y += futex.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ipcbuf.h diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index e77e0c1dbe75..2de73391b81e 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -15,7 +15,6 @@ generic-y += exec.h generic-y += fb.h generic-y += fcntl.h generic-y += futex.h -generic-y += hash.h generic-y += hw_irq.h generic-y += io.h generic-y += ioctl.h diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index 2ca489eaadd3..d5f124832fd1 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -7,7 +7,6 @@ generic-y += barrier.h generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += kvm_para.h generic-y += linkage.h diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 3caf05cabfc5..e3f81b53578e 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 5f234a5a2320..c7a99f860b40 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 747320be9d0e..9b41b4bcc073 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += clkdev.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += kvm_para.h generic-y += mcs_spinlock.h diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index 3796801d6e0c..2edc793372fc 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += module.h diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index dbaf9f3065e8..9b6c691874bd 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -6,7 +6,6 @@ generic-y += device.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ipcbuf.h diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index 7b8111c8f937..0bf5d525b945 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -13,7 +13,6 @@ generic-y += fb.h generic-y += fcntl.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 448143b8cabd..ab564a6db5c3 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -4,7 +4,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += device.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 72e1cf1cab00..200efeac4181 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -3,7 +3,6 @@ generic-y += cputime.h generic-y += current.h generic-y += dma-contiguous.h generic-y += emergency-restart.h -generic-y += hash.h generic-y += irq_work.h generic-y += local64.h generic-y += mcs_spinlock.h diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 54a062cb9f2c..f892d9de47d9 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -3,7 +3,6 @@ generic-y += barrier.h generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index 89b61d7dc790..91f1f360a7c4 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -25,7 +25,6 @@ generic-y += fcntl.h generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index ffb024b8423f..8686237a3c3c 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -7,7 +7,6 @@ generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h -generic-y += hash.h generic-y += hw_irq.h generic-y += irq_regs.h generic-y += irq_work.h diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 31e8f59aff38..382b28e364dc 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -1,6 +1,5 @@ generic-y += clkdev.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 773f86676588..c631f98fd524 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += clkdev.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 46461c19f284..83ed116d414c 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -5,7 +5,6 @@ header-y += generic-y += barrier.h generic-y += clkdev.h generic-y += cputime.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 5a6c9acff0d2..654ebb6bd5d8 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -8,7 +8,6 @@ generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h generic-y += fcntl.h -generic-y += hash.h generic-y += ioctl.h generic-y += ipcbuf.h generic-y += irq_regs.h diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index f5f94ce1692c..94f36e7086a7 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -6,7 +6,6 @@ generic-y += cputime.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h -generic-y += hash.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += linkage.h diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index e6462b8a6284..b4c488b65745 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -11,7 +11,6 @@ generic-y += errno.h generic-y += exec.h generic-y += fb.h generic-y += fcntl.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 244b12c8cb39..9176fa11d49b 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -10,7 +10,6 @@ generic-y += exec.h generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += io.h generic-y += irq_regs.h diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 5a2bb53faa42..3e0c19d0f4c5 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += fcntl.h generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h deleted file mode 100644 index e8c58f88b1d4..000000000000 --- a/arch/x86/include/asm/hash.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _ASM_X86_HASH_H -#define _ASM_X86_HASH_H - -struct fast_hash_ops; -extern void setup_arch_fast_hash(struct fast_hash_ops *ops); - -#endif /* _ASM_X86_HASH_H */ diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index db92793b7e23..1530afb07c85 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -23,7 +23,7 @@ lib-y += memcpy_$(BITS).o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o -obj-y += msr.o msr-reg.o msr-reg-export.o hash.o +obj-y += msr.o msr-reg.o msr-reg-export.o ifeq ($(CONFIG_X86_32),y) obj-y += atomic64_32.o diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c deleted file mode 100644 index ff4fa51a5b1f..000000000000 --- a/arch/x86/lib/hash.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Some portions derived from code covered by the following notice: - * - * Copyright (c) 2010-2013 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include - -#include -#include -#include - -static inline u32 crc32_u32(u32 crc, u32 val) -{ -#ifdef CONFIG_AS_CRC32 - asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val)); -#else - asm (".byte 0xf2, 0x0f, 0x38, 0xf1, 0xc1" : "+a" (crc) : "c" (val)); -#endif - return crc; -} - -static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed) -{ - const u32 *p32 = (const u32 *) data; - u32 i, tmp = 0; - - for (i = 0; i < len / 4; i++) - seed = crc32_u32(seed, *p32++); - - switch (len & 3) { - case 3: - tmp |= *((const u8 *) p32 + 2) << 16; - /* fallthrough */ - case 2: - tmp |= *((const u8 *) p32 + 1) << 8; - /* fallthrough */ - case 1: - tmp |= *((const u8 *) p32); - seed = crc32_u32(seed, tmp); - break; - } - - return seed; -} - -static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed) -{ - const u32 *p32 = (const u32 *) data; - u32 i; - - for (i = 0; i < len; i++) - seed = crc32_u32(seed, *p32++); - - return seed; -} - -void __init setup_arch_fast_hash(struct fast_hash_ops *ops) -{ - if (cpu_has_xmm4_2) { - ops->hash = intel_crc4_2_hash; - ops->hash2 = intel_crc4_2_hash2; - } -} diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 105d38922c44..86a9ab2e2ca9 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -9,7 +9,6 @@ generic-y += errno.h generic-y += exec.h generic-y += fcntl.h generic-y += hardirq.h -generic-y += hash.h generic-y += ioctl.h generic-y += irq_regs.h generic-y += irq_work.h diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h deleted file mode 100644 index b6312843dbd9..000000000000 --- a/include/asm-generic/hash.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_GENERIC_HASH_H -#define __ASM_GENERIC_HASH_H - -struct fast_hash_ops; -static inline void setup_arch_fast_hash(struct fast_hash_ops *ops) -{ -} - -#endif /* __ASM_GENERIC_HASH_H */ diff --git a/include/linux/hash.h b/include/linux/hash.h index d0494c399392..1afde47e1528 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -15,7 +15,6 @@ */ #include -#include #include /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ @@ -84,38 +83,4 @@ static inline u32 hash32_ptr(const void *ptr) return (u32)val; } -struct fast_hash_ops { - u32 (*hash)(const void *data, u32 len, u32 seed); - u32 (*hash2)(const u32 *data, u32 len, u32 seed); -}; - -/** - * arch_fast_hash - Caclulates a hash over a given buffer that can have - * arbitrary size. This function will eventually use an - * architecture-optimized hashing implementation if - * available, and trades off distribution for speed. - * - * @data: buffer to hash - * @len: length of buffer in bytes - * @seed: start seed - * - * Returns 32bit hash. - */ -extern u32 arch_fast_hash(const void *data, u32 len, u32 seed); - -/** - * arch_fast_hash2 - Caclulates a hash over a given buffer that has a - * size that is of a multiple of 32bit words. This - * function will eventually use an architecture- - * optimized hashing implementation if available, - * and trades off distribution for speed. - * - * @data: buffer to hash (must be 32bit padded) - * @len: number of 32bit words - * @seed: start seed - * - * Returns 32bit hash. - */ -extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed); - #endif /* _LINUX_HASH_H */ diff --git a/lib/Makefile b/lib/Makefile index 0211d2bd5e17..4b9baa45a4d9 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o hash.o rhashtable.o reciprocal_div.o + percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o diff --git a/lib/hash.c b/lib/hash.c deleted file mode 100644 index fea973f4bd57..000000000000 --- a/lib/hash.c +++ /dev/null @@ -1,39 +0,0 @@ -/* General purpose hashing library - * - * That's a start of a kernel hashing library, which can be extended - * with further algorithms in future. arch_fast_hash{2,}() will - * eventually resolve to an architecture optimized implementation. - * - * Copyright 2013 Francesco Fusco - * Copyright 2013 Daniel Borkmann - * Copyright 2013 Thomas Graf - * Licensed under the GNU General Public License, version 2.0 (GPLv2) - */ - -#include -#include -#include - -static struct fast_hash_ops arch_hash_ops __read_mostly = { - .hash = jhash, - .hash2 = jhash2, -}; - -u32 arch_fast_hash(const void *data, u32 len, u32 seed) -{ - return arch_hash_ops.hash(data, len, seed); -} -EXPORT_SYMBOL_GPL(arch_fast_hash); - -u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) -{ - return arch_hash_ops.hash2(data, len, seed); -} -EXPORT_SYMBOL_GPL(arch_fast_hash2); - -static int __init hashlib_init(void) -{ - setup_arch_fast_hash(&arch_hash_ops); - return 0; -} -early_initcall(hashlib_init); diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h deleted file mode 100644 index d82b170bb216..000000000000 --- a/tools/perf/util/include/asm/hash.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_GENERIC_HASH_H -#define __ASM_GENERIC_HASH_H - -/* Stub */ - -#endif /* __ASM_GENERIC_HASH_H */ -- cgit v1.2.3