diff options
author | Kalesh Singh <kaleshsingh@google.com> | 2024-01-09 17:22:33 -0800 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-02-21 16:00:01 -0800 |
commit | 51ae3f4ac5e94334ac6078145a02bc8f30e8528b (patch) | |
tree | 6c7210cc29de716f6bd18330867a291583382648 /mm/cma.c | |
parent | 2597c9947b0174fcc71bdd7ab6cb49c2b4291e95 (diff) | |
download | lwn-51ae3f4ac5e94334ac6078145a02bc8f30e8528b.tar.gz lwn-51ae3f4ac5e94334ac6078145a02bc8f30e8528b.zip |
mm/cma: fix placement of trace_cma_alloc_start/finish
The current placement of trace_cma_alloc_start/finish misses the fail
cases: !cma || !cma->count || !cma->bitmap.
trace_cma_alloc_finish is also not emitted for the failure case
where bitmap_count > bitmap_maxno.
Fix these missed cases by moving the start event before the failure
checks and moving the finish event to the out label.
Link: https://lkml.kernel.org/r/20240110012234.3793639-1-kaleshsingh@google.com
Fixes: 7bc1aec5e287 ("mm: cma: add trace events for CMA alloc perf testing")
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Liam Mark <lmark@codeaurora.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/cma.c')
-rw-r--r-- | mm/cma.c | 8 |
1 files changed, 4 insertions, 4 deletions
@@ -436,6 +436,9 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned long i; struct page *page = NULL; int ret = -ENOMEM; + const char *name = cma ? cma->name : NULL; + + trace_cma_alloc_start(name, count, align); if (!cma || !cma->count || !cma->bitmap) goto out; @@ -446,8 +449,6 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, if (!count) goto out; - trace_cma_alloc_start(cma->name, count, align); - mask = cma_bitmap_aligned_mask(cma, align); offset = cma_bitmap_aligned_offset(cma, align); bitmap_maxno = cma_bitmap_maxno(cma); @@ -496,8 +497,6 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, start = bitmap_no + mask + 1; } - trace_cma_alloc_finish(cma->name, pfn, page, count, align, ret); - /* * CMA can allocate multiple page blocks, which results in different * blocks being marked with different tags. Reset the tags to ignore @@ -516,6 +515,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, pr_debug("%s(): returned %p\n", __func__, page); out: + trace_cma_alloc_finish(name, pfn, page, count, align, ret); if (page) { count_vm_event(CMA_ALLOC_SUCCESS); cma_sysfs_account_success_pages(cma, count); |