summaryrefslogtreecommitdiff
path: root/arch/c6x/mm
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-12-14 09:00:40 +0100
committerChristoph Hellwig <hch@lst.de>2018-12-20 08:13:52 +0100
commit518a2f1925c3165befbf06b75e07636549d92c1c (patch)
tree629130481205f514709c3cc84a2f5debed28f8cd /arch/c6x/mm
parent6c503d0d88db9d57c1dc4c87175c94766b6a6c61 (diff)
downloadlwn-518a2f1925c3165befbf06b75e07636549d92c1c.tar.gz
lwn-518a2f1925c3165befbf06b75e07636549d92c1c.zip
dma-mapping: zero memory returned from dma_alloc_*
If we want to map memory from the DMA allocator to userspace it must be zeroed at allocation time to prevent stale data leaks. We already do this on most common architectures, but some architectures don't do this yet, fix them up, either by passing GFP_ZERO when we use the normal page allocator or doing a manual memset otherwise. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Acked-by: Sam Ravnborg <sam@ravnborg.org> [sparc]
Diffstat (limited to 'arch/c6x/mm')
-rw-r--r--arch/c6x/mm/dma-coherent.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
index 01305c787201..75b79571732c 100644
--- a/arch/c6x/mm/dma-coherent.c
+++ b/arch/c6x/mm/dma-coherent.c
@@ -78,6 +78,7 @@ static void __free_dma_pages(u32 addr, int order)
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, unsigned long attrs)
{
+ void *ret;
u32 paddr;
int order;
@@ -94,7 +95,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (!paddr)
return NULL;
- return phys_to_virt(paddr);
+ ret = phys_to_virt(paddr);
+ memset(ret, 0, 1 << order);
+ return ret;
}
/*