diff options
author | Takashi Iwai <tiwai@suse.de> | 2021-10-17 09:48:59 +0200 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2021-10-18 13:32:13 +0200 |
commit | 2d9ea39917a4e4293bc2caea902c7059a330b611 (patch) | |
tree | 07b9ee3a8103a1f1f0872c63caac39321cbe5f13 /sound/core/memalloc.c | |
parent | 73325f60e2ed28f04032d43c2828b73776cfefd0 (diff) | |
download | lwn-2d9ea39917a4e4293bc2caea902c7059a330b611.tar.gz lwn-2d9ea39917a4e4293bc2caea902c7059a330b611.zip |
ALSA: memalloc: Convert x86 SG-buffer handling with non-contiguous type
We've had an x86-specific SG-buffer handling code, but now it can be
merged gracefully with the standard non-contiguous DMA pages.
After the migration, SNDRV_DMA_TYPE_DMA_SG becomes identical with
SNDRV_DMA_TYPE_NONCONTIG on x86, while others still fall back to
SNDRV_DMA_TYPE_DEV.
The remaining problem is about the SG-buffer with WC pages: the DMA
core stuff on x86 doesn't treat it well, so we still need some special
handling to manipulate the page attribute manually. The mmap handler
for SNDRV_DMA_TYPE_DEV_SG_WC still returns -ENOENT intentionally for
the fallback to the default handler.
Link: https://lore.kernel.org/r/20211017074859.24112-4-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'sound/core/memalloc.c')
-rw-r--r-- | sound/core/memalloc.c | 51 |
1 files changed, 47 insertions, 4 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 99681e651223..acdebecf1a2e 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -560,6 +560,50 @@ static const struct snd_malloc_ops snd_dma_noncontig_ops = { .get_chunk_size = snd_dma_vmalloc_get_chunk_size, }; +/* x86-specific SG-buffer with WC pages */ +#ifdef CONFIG_SND_DMA_SGBUF +#define vmalloc_to_virt(v) (unsigned long)page_to_virt(vmalloc_to_page(v)) + +static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size) +{ + void *p = snd_dma_noncontig_alloc(dmab, size); + size_t ofs; + + if (!p) + return NULL; + for (ofs = 0; ofs < size; ofs += PAGE_SIZE) + set_memory_uc(vmalloc_to_virt(p + ofs), 1); + return p; +} + +static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab) +{ + size_t ofs; + + for (ofs = 0; ofs < dmab->bytes; ofs += PAGE_SIZE) + set_memory_wb(vmalloc_to_virt(dmab->area + ofs), 1); + snd_dma_noncontig_free(dmab); +} + +static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab, + struct vm_area_struct *area) +{ + area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); + /* FIXME: dma_mmap_noncontiguous() works? */ + return -ENOENT; /* continue with the default mmap handler */ +} + +const struct snd_malloc_ops snd_dma_sg_wc_ops = { + .alloc = snd_dma_sg_wc_alloc, + .free = snd_dma_sg_wc_free, + .mmap = snd_dma_sg_wc_mmap, + .sync = snd_dma_noncontig_sync, + .get_addr = snd_dma_vmalloc_get_addr, + .get_page = snd_dma_vmalloc_get_page, + .get_chunk_size = snd_dma_vmalloc_get_chunk_size, +}; +#endif /* CONFIG_SND_DMA_SGBUF */ + /* * Non-coherent pages allocator */ @@ -619,14 +663,13 @@ static const struct snd_malloc_ops *dma_ops[] = { [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops, [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops, [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops, +#ifdef CONFIG_SND_DMA_SGBUF + [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops, +#endif #ifdef CONFIG_GENERIC_ALLOCATOR [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, #endif /* CONFIG_GENERIC_ALLOCATOR */ #endif /* CONFIG_HAS_DMA */ -#ifdef CONFIG_SND_DMA_SGBUF - [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops, - [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops, -#endif }; static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab) |