summaryrefslogtreecommitdiff
path: root/fs/fscache
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2021-10-20 17:45:28 +0300
committerDavid Howells <dhowells@redhat.com>2022-01-07 12:22:19 +0300
commite8a07c9d22afdace966353231d0273d29efe0890 (patch)
tree05ac1f37dc2a4d0c699e34cf00e44796169f2c16 /fs/fscache
parent1e1236b841166f1d2daf36fdf6bb3e656bc5f5ca (diff)
downloadlinux-e8a07c9d22afdace966353231d0273d29efe0890.tar.xz
fscache: Implement a hash function
Implement a function to generate hashes. It needs to be stable over time and endianness-independent as the hashes will appear on disk in future patches. It can assume that its input is a multiple of four bytes in size and alignment. This is borrowed from the VFS and simplified. le32_to_cpu() is added to make it endianness-independent. Changes ======= ver #3: - Read the data being hashed in an endianness-independent way[1]. - Change the size parameter to be in bytes rather than words. Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jeff Layton <jlayton@kernel.org> cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/CAHk-=whtkzB446+hX0zdLsdcUJsJ=8_-0S1mE_R+YurThfUbLA@mail.gmail.com [1] Link: https://lore.kernel.org/r/163819586113.215744.1699465806130102367.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/163906888735.143852.10944614318596881429.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/163967082342.1823006.8915671045444488742.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/164021493624.640689.9990442668811178628.stgit@warthog.procyon.org.uk/ # v4
Diffstat (limited to 'fs/fscache')
-rw-r--r--fs/fscache/internal.h2
-rw-r--r--fs/fscache/main.c40
2 files changed, 42 insertions, 0 deletions
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index ea52f8594a77..f345bdb018ba 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -22,6 +22,8 @@
*/
extern unsigned fscache_debug;
+extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
+
/*
* proc.c
*/
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 819de2ee1276..687b34903d5b 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -25,6 +25,46 @@ struct workqueue_struct *fscache_wq;
EXPORT_SYMBOL(fscache_wq);
/*
+ * Mixing scores (in bits) for (7,20):
+ * Input delta: 1-bit 2-bit
+ * 1 round: 330.3 9201.6
+ * 2 rounds: 1246.4 25475.4
+ * 3 rounds: 1907.1 31295.1
+ * 4 rounds: 2042.3 31718.6
+ * Perfect: 2048 31744
+ * (32*64) (32*31/2 * 64)
+ */
+#define HASH_MIX(x, y, a) \
+ ( x ^= (a), \
+ y ^= x, x = rol32(x, 7),\
+ x += y, y = rol32(y,20),\
+ y *= 9 )
+
+static inline unsigned int fold_hash(unsigned long x, unsigned long y)
+{
+ /* Use arch-optimized multiply if one exists */
+ return __hash_32(y ^ __hash_32(x));
+}
+
+/*
+ * Generate a hash. This is derived from full_name_hash(), but we want to be
+ * sure it is arch independent and that it doesn't change as bits of the
+ * computed hash value might appear on disk. The caller must guarantee that
+ * the source data is a multiple of four bytes in size.
+ */
+unsigned int fscache_hash(unsigned int salt, const void *data, size_t len)
+{
+ const __le32 *p = data;
+ unsigned int a, x = 0, y = salt, n = len / sizeof(__le32);
+
+ for (; n; n--) {
+ a = le32_to_cpu(*p++);
+ HASH_MIX(x, y, a);
+ }
+ return fold_hash(x, y);
+}
+
+/*
* initialise the fs caching module
*/
static int __init fscache_init(void)