summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2022-06-05 17:25:38 +0200
committerakpm <akpm@linux-foundation.org>2022-06-16 19:48:29 -0700
commita3967244430eb91698ac8dca7db8bd0871251305 (patch)
tree2c1e68fcdc6f96c6f07a97f0f60e607f4fa11c20 /mm/slab.c
parent446ec83805ddaab5b8734d30ba4ae8c56739a9b4 (diff)
downloadlwn-a3967244430eb91698ac8dca7db8bd0871251305.tar.gz
lwn-a3967244430eb91698ac8dca7db8bd0871251305.zip
mm/slab: delete cache_alloc_debugcheck_before()
It only does a might_sleep_if(GFP_RECLAIM) check, which is already covered by the might_alloc() in slab_pre_alloc_hook(). And all callers of cache_alloc_debugcheck_before() call that beforehand already. Link: https://lkml.kernel.org/r/20220605152539.3196045-2-daniel.vetter@ffwll.ch Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Roman Gushchin <roman.gushchin@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c10
1 files changed, 0 insertions, 10 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f8cd00f4ba13..47151fb2b2d2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2958,12 +2958,6 @@ direct_grow:
return ac->entry[--ac->avail];
}
-static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
- gfp_t flags)
-{
- might_sleep_if(gfpflags_allow_blocking(flags));
-}
-
#if DEBUG
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
gfp_t flags, void *objp, unsigned long caller)
@@ -3205,7 +3199,6 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
if (unlikely(ptr))
goto out_hooks;
- cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
if (nodeid == NUMA_NO_NODE)
@@ -3290,7 +3283,6 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
if (unlikely(objp))
goto out;
- cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
objp = __do_cache_alloc(cachep, flags);
local_irq_restore(save_flags);
@@ -3527,8 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
if (!s)
return 0;
- cache_alloc_debugcheck_before(s, flags);
-
local_irq_disable();
for (i = 0; i < size; i++) {
void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);