summaryrefslogtreecommitdiff
path: root/include/linux/mbcache.h
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2022-07-12 13:54:29 +0300
committerTheodore Ts'o <tytso@mit.edu>2022-08-03 06:56:25 +0300
commit307af6c879377c1c63e71cbdd978201f9c7ee8df (patch)
tree254b1af58915e13b0d5924bb4120089c94ec4645 /include/linux/mbcache.h
parent75896339e43176af078509c1fce94ee6df9ca1a7 (diff)
downloadlinux-307af6c879377c1c63e71cbdd978201f9c7ee8df.tar.xz
mbcache: automatically delete entries from cache on freeing
Use the fact that entries with elevated refcount are not removed from the hash and just move removal of the entry from the hash to the entry freeing time. When doing this we also change the generic code to hold one reference to the cache entry, not two of them, which makes code somewhat more obvious. Signed-off-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20220712105436.32204-10-jack@suse.cz Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'include/linux/mbcache.h')
-rw-r--r--include/linux/mbcache.h24
1 files changed, 16 insertions, 8 deletions
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 452b579856d4..2da63fd7b98f 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -13,8 +13,16 @@ struct mb_cache;
struct mb_cache_entry {
/* List of entries in cache - protected by cache->c_list_lock */
struct list_head e_list;
- /* Hash table list - protected by hash chain bitlock */
+ /*
+ * Hash table list - protected by hash chain bitlock. The entry is
+ * guaranteed to be hashed while e_refcnt > 0.
+ */
struct hlist_bl_node e_hash_list;
+ /*
+ * Entry refcount. Once it reaches zero, entry is unhashed and freed.
+ * While refcount > 0, the entry is guaranteed to stay in the hash and
+ * e.g. mb_cache_entry_try_delete() will fail.
+ */
atomic_t e_refcnt;
/* Key in hash - stable during lifetime of the entry */
u32 e_key;
@@ -29,20 +37,20 @@ void mb_cache_destroy(struct mb_cache *cache);
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
u64 value, bool reusable);
-void __mb_cache_entry_free(struct mb_cache_entry *entry);
+void __mb_cache_entry_free(struct mb_cache *cache,
+ struct mb_cache_entry *entry);
void mb_cache_entry_wait_unused(struct mb_cache_entry *entry);
-static inline int mb_cache_entry_put(struct mb_cache *cache,
- struct mb_cache_entry *entry)
+static inline void mb_cache_entry_put(struct mb_cache *cache,
+ struct mb_cache_entry *entry)
{
unsigned int cnt = atomic_dec_return(&entry->e_refcnt);
if (cnt > 0) {
- if (cnt <= 3)
+ if (cnt <= 2)
wake_up_var(&entry->e_refcnt);
- return 0;
+ return;
}
- __mb_cache_entry_free(entry);
- return 1;
+ __mb_cache_entry_free(cache, entry);
}
struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,