diff options
author | Rasmus Villemoes <linux@rasmusvillemoes.dk> | 2016-05-19 17:10:55 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-19 19:12:14 -0700 |
commit | 48a270554a3251681ae11173f2fd6389d943e183 (patch) | |
tree | b650279a0e0c255f28ad819593a3c86328821571 /include/linux/slab.h | |
parent | d64e85d3e1c59c3664b9ec1183052ec4641ea1e2 (diff) | |
download | lwn-48a270554a3251681ae11173f2fd6389d943e183.tar.gz lwn-48a270554a3251681ae11173f2fd6389d943e183.zip |
include/linux: apply __malloc attribute
Attach the malloc attribute to a few allocation functions. This helps
gcc generate better code by telling it that the return value doesn't
alias any existing pointers (which is even more valuable given the
pessimizations implied by -fno-strict-aliasing).
A simple example of what this allows gcc to do can be seen by looking at
the last part of drm_atomic_helper_plane_reset:
plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
if (plane->state) {
plane->state->plane = plane;
plane->state->rotation = BIT(DRM_ROTATE_0);
}
which compiles to
e8 99 bf d6 ff callq ffffffff8116d540 <kmem_cache_alloc_trace>
48 85 c0 test %rax,%rax
48 89 83 40 02 00 00 mov %rax,0x240(%rbx)
74 11 je ffffffff814015c4 <drm_atomic_helper_plane_reset+0x64>
48 89 18 mov %rbx,(%rax)
48 8b 83 40 02 00 00 mov 0x240(%rbx),%rax [*]
c7 40 40 01 00 00 00 movl $0x1,0x40(%rax)
With this patch applied, the instruction at [*] is elided, since the
store to plane->state->plane is known to not alter the value of
plane->state.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r-- | include/linux/slab.h | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 508bd827e6dc..aeb3e6d00a66 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -315,8 +315,8 @@ static __always_inline int kmalloc_index(size_t size) } #endif /* !CONFIG_SLOB */ -void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment; -void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment; +void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; +void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *, void *); /* @@ -339,8 +339,8 @@ static __always_inline void kfree_bulk(size_t size, void **p) } #ifdef CONFIG_NUMA -void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; -void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; +void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; +void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; #else static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) { @@ -354,12 +354,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f #endif #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment; +extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node, size_t size) __assume_slab_alignment; + int node, size_t size) __assume_slab_alignment __malloc; #else static __always_inline void * kmem_cache_alloc_node_trace(struct kmem_cache *s, @@ -392,10 +392,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, } #endif /* CONFIG_TRACING */ -extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; +extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; #ifdef CONFIG_TRACING -extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; #else static __always_inline void * kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) |