diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-07-28 22:59:02 +0100 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2010-07-28 22:59:02 +0100 |
commit | b94de9bb7519f597a3aed521d5eaeb5b02a7cbc0 (patch) | |
tree | 1c2771372fc23c55dd678080aa6529c98f8de371 /lib | |
parent | a2b6bf63cb7a3e34bd2e753a6f2c2776b5c8496f (diff) | |
download | lwn-b94de9bb7519f597a3aed521d5eaeb5b02a7cbc0.tar.gz lwn-b94de9bb7519f597a3aed521d5eaeb5b02a7cbc0.zip |
lib/scatterlist: Hook sg_kmalloc into kmemleak (v2)
kmemleak ignores page_alloc() and so believes the final sub-page
allocation using the plain kmalloc is decoupled and lost. This leads to
lots of false-positives with code that uses scatterlists.
The options seem to be either to tell kmemleak that the kmalloc is not
leaked or to notify kmemleak of the page allocations. The danger of the
first approach is that we may hide a real leak, so choose the latter
approach (of which I am not sure of the downsides).
v2: Added comments on the suggestion of Catalin.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <jaxboe@fusionio.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/scatterlist.c | 23 |
1 files changed, 18 insertions, 5 deletions
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 9afa25b52a83..a5ec42868f99 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/scatterlist.h> #include <linux/highmem.h> +#include <linux/kmemleak.h> /** * sg_next - return the next scatterlist entry in a list @@ -115,17 +116,29 @@ EXPORT_SYMBOL(sg_init_one); */ static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) { - if (nents == SG_MAX_SINGLE_ALLOC) - return (struct scatterlist *) __get_free_page(gfp_mask); - else + if (nents == SG_MAX_SINGLE_ALLOC) { + /* + * Kmemleak doesn't track page allocations as they are not + * commonly used (in a raw form) for kernel data structures. + * As we chain together a list of pages and then a normal + * kmalloc (tracked by kmemleak), in order to for that last + * allocation not to become decoupled (and thus a + * false-positive) we need to inform kmemleak of all the + * intermediate allocations. + */ + void *ptr = (void *) __get_free_page(gfp_mask); + kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); + return ptr; + } else return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); } static void sg_kfree(struct scatterlist *sg, unsigned int nents) { - if (nents == SG_MAX_SINGLE_ALLOC) + if (nents == SG_MAX_SINGLE_ALLOC) { + kmemleak_free(sg); free_page((unsigned long) sg); - else + } else kfree(sg); } |