summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/slab.c16
-rw-r--r--mm/slab.h17
2 files changed, 22 insertions, 11 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f7117ad9b3a3..db01e9aae31b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -371,12 +371,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata;
-static inline struct kmem_cache *virt_to_cache(const void *obj)
-{
- struct page *page = virt_to_head_page(obj);
- return page->slab_cache;
-}
-
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
unsigned int idx)
{
@@ -3715,6 +3709,8 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
s = virt_to_cache(objp);
else
s = cache_from_obj(orig_s, objp);
+ if (!s)
+ continue;
debug_check_no_locks_freed(objp, s->object_size);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
@@ -3749,6 +3745,10 @@ void kfree(const void *objp)
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
+ if (!c) {
+ local_irq_restore(flags);
+ return;
+ }
debug_check_no_locks_freed(objp, c->object_size);
debug_check_no_obj_freed(objp, c->object_size);
@@ -4219,13 +4219,15 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
*/
size_t ksize(const void *objp)
{
+ struct kmem_cache *c;
size_t size;
BUG_ON(!objp);
if (unlikely(objp == ZERO_SIZE_PTR))
return 0;
- size = virt_to_cache(objp)->object_size;
+ c = virt_to_cache(objp);
+ size = c ? c->object_size : 0;
/* We assume that ksize callers could use the whole allocated area,
* so we need to unpoison this area.
*/
diff --git a/mm/slab.h b/mm/slab.h
index 4dafae2c8620..739099af6cbb 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -350,10 +350,20 @@ static inline void memcg_link_cache(struct kmem_cache *s)
#endif /* CONFIG_MEMCG_KMEM */
+static inline struct kmem_cache *virt_to_cache(const void *obj)
+{
+ struct page *page;
+
+ page = virt_to_head_page(obj);
+ if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
+ __func__))
+ return NULL;
+ return page->slab_cache;
+}
+
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
struct kmem_cache *cachep;
- struct page *page;
/*
* When kmemcg is not being used, both assignments should return the
@@ -367,9 +377,8 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
!unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
return s;
- page = virt_to_head_page(x);
- cachep = page->slab_cache;
- WARN_ONCE(!slab_equal_or_root(cachep, s),
+ cachep = virt_to_cache(x);
+ WARN_ONCE(cachep && !slab_equal_or_root(cachep, s),
"%s: Wrong slab cache. %s but object is from %s\n",
__func__, s->name, cachep->name);
return cachep;