summaryrefslogtreecommitdiff
path: root/fs/xfs/kmem.h
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2018-03-06 17:03:28 -0800
committerDarrick J. Wong <darrick.wong@oracle.com>2018-03-11 20:27:55 -0700
commitcb0a8d23024e7bd234dea4d0fc5c4902a8dda766 (patch)
tree350326308ff764800517d26319de97f6814a6e7c /fs/xfs/kmem.h
parent0c8efd610b58cb23cefdfa12015799079aef94ae (diff)
downloadlwn-cb0a8d23024e7bd234dea4d0fc5c4902a8dda766.tar.gz
lwn-cb0a8d23024e7bd234dea4d0fc5c4902a8dda766.zip
xfs: fall back to vmalloc when allocation log vector buffers
When using large directory blocks, we regularly see memory allocations of >64k being made for the shadow log vector buffer. When we are under memory pressure, kmalloc() may not be able to find contiguous memory chunks large enough to satisfy these allocations easily, and if memory is fragmented we can potentially stall here. TO avoid this problem, switch the log vector buffer allocation to use kmem_alloc_large(). This will allow failed allocations to fall back to vmalloc and so remove the dependency on large contiguous regions of memory being available. This should prevent slowdowns and potential stalls when memory is low and/or fragmented. Signed-Off-By: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs/xfs/kmem.h')
-rw-r--r--fs/xfs/kmem.h8
1 files changed, 7 insertions, 1 deletions
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 4b87472f35bc..6023b594ead7 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -71,7 +71,7 @@ kmem_flags_convert(xfs_km_flags_t flags)
}
extern void *kmem_alloc(size_t, xfs_km_flags_t);
-extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
+extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
static inline void kmem_free(const void *ptr)
{
@@ -85,6 +85,12 @@ kmem_zalloc(size_t size, xfs_km_flags_t flags)
return kmem_alloc(size, flags | KM_ZERO);
}
+static inline void *
+kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
+{
+ return kmem_alloc_large(size, flags | KM_ZERO);
+}
+
/*
* Zone interfaces
*/