diff options
author | Krzysztof Kozlowski <k.kozlowski@samsung.com> | 2016-08-03 13:46:00 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-04 08:50:07 -0400 |
commit | 00085f1efa387a8ce100e3734920f7639c80caa3 (patch) | |
tree | 41ff3d6e6884918b4fc4f1ae96a284098167c5b0 /drivers | |
parent | 1605d2715ad2e67ddd0485a26e05ed670a4285ca (diff) | |
download | lwn-00085f1efa387a8ce100e3734920f7639c80caa3.tar.gz lwn-00085f1efa387a8ce100e3734920f7639c80caa3.zip |
dma-mapping: use unsigned long for dma_attrs
The dma-mapping core and the implementations do not change the DMA
attributes passed by pointer. Thus the pointer can point to const data.
However the attributes do not have to be a bitfield. Instead unsigned
long will do fine:
1. This is just simpler. Both in terms of reading the code and setting
attributes. Instead of initializing local attributes on the stack
and passing pointer to it to dma_set_attr(), just set the bits.
2. It brings safeness and checking for const correctness because the
attributes are passed by value.
Semantic patches for this change (at least most of them):
virtual patch
virtual context
@r@
identifier f, attrs;
@@
f(...,
- struct dma_attrs *attrs
+ unsigned long attrs
, ...)
{
...
}
@@
identifier r.f;
@@
f(...,
- NULL
+ 0
)
and
// Options: --all-includes
virtual patch
virtual context
@r@
identifier f, attrs;
type t;
@@
t f(..., struct dma_attrs *attrs);
@@
identifier r.f;
@@
f(...,
- NULL
+ 0
)
Link: http://lkml.kernel.org/r/1468399300-5399-2-git-send-email-k.kozlowski@samsung.com
Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Acked-by: Vineet Gupta <vgupta@synopsys.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no>
Acked-by: Mark Salter <msalter@redhat.com> [c6x]
Acked-by: Jesper Nilsson <jesper.nilsson@axis.com> [cris]
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> [drm]
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
Acked-by: Joerg Roedel <jroedel@suse.de> [iommu]
Acked-by: Fabien Dessenne <fabien.dessenne@st.com> [bdisp]
Reviewed-by: Marek Szyprowski <m.szyprowski@samsung.com> [vb2-core]
Acked-by: David Vrabel <david.vrabel@citrix.com> [xen]
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> [xen swiotlb]
Acked-by: Joerg Roedel <jroedel@suse.de> [iommu]
Acked-by: Richard Kuo <rkuo@codeaurora.org> [hexagon]
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k]
Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [s390]
Acked-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no> [avr32]
Acked-by: Vineet Gupta <vgupta@synopsys.com> [arc]
Acked-by: Robin Murphy <robin.murphy@arm.com> [arm64 and dma-iommu]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
25 files changed, 131 insertions, 169 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index fb49443bfd32..4cfb39d543b4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -52,7 +52,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info, ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie, exynos_gem->dma_addr, exynos_gem->size, - &exynos_gem->dma_attrs); + exynos_gem->dma_attrs); if (ret < 0) { DRM_ERROR("failed to mmap.\n"); return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 8564c3da0d22..4bf00f57ffe8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -17,7 +17,6 @@ #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/dma-mapping.h> -#include <linux/dma-attrs.h> #include <linux/of.h> #include <drm/drmP.h> @@ -235,7 +234,7 @@ struct g2d_data { struct mutex cmdlist_mutex; dma_addr_t cmdlist_pool; void *cmdlist_pool_virt; - struct dma_attrs cmdlist_dma_attrs; + unsigned long cmdlist_dma_attrs; /* runqueue*/ struct g2d_runqueue_node *runqueue_node; @@ -256,13 +255,12 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) int ret; struct g2d_buf_info *buf_info; - init_dma_attrs(&g2d->cmdlist_dma_attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); + g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE; g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE, &g2d->cmdlist_pool, GFP_KERNEL, - &g2d->cmdlist_dma_attrs); + g2d->cmdlist_dma_attrs); if (!g2d->cmdlist_pool_virt) { dev_err(dev, "failed to allocate dma memory\n"); return -ENOMEM; @@ -295,7 +293,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) err: dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, - g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); + g2d->cmdlist_pool, g2d->cmdlist_dma_attrs); return ret; } @@ -309,7 +307,7 @@ static void g2d_fini_cmdlist(struct g2d_data *g2d) dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, - g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); + g2d->cmdlist_pool, g2d->cmdlist_dma_attrs); } } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index cdf9f1af4347..f2ae72ba7d5a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -24,7 +24,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) { struct drm_device *dev = exynos_gem->base.dev; - enum dma_attr attr; + unsigned long attr; unsigned int nr_pages; struct sg_table sgt; int ret = -ENOMEM; @@ -34,7 +34,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) return 0; } - init_dma_attrs(&exynos_gem->dma_attrs); + exynos_gem->dma_attrs = 0; /* * if EXYNOS_BO_CONTIG, fully physically contiguous memory @@ -42,7 +42,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) * as possible. */ if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) - dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs); + exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS; /* * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping @@ -54,8 +54,8 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) else attr = DMA_ATTR_NON_CONSISTENT; - dma_set_attr(attr, &exynos_gem->dma_attrs); - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs); + exynos_gem->dma_attrs |= attr; + exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; nr_pages = exynos_gem->size >> PAGE_SHIFT; @@ -67,7 +67,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, &exynos_gem->dma_addr, GFP_KERNEL, - &exynos_gem->dma_attrs); + exynos_gem->dma_attrs); if (!exynos_gem->cookie) { DRM_ERROR("failed to allocate buffer.\n"); goto err_free; @@ -75,7 +75,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie, exynos_gem->dma_addr, exynos_gem->size, - &exynos_gem->dma_attrs); + exynos_gem->dma_attrs); if (ret < 0) { DRM_ERROR("failed to get sgtable.\n"); goto err_dma_free; @@ -99,7 +99,7 @@ err_sgt_free: sg_free_table(&sgt); err_dma_free: dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, - exynos_gem->dma_addr, &exynos_gem->dma_attrs); + exynos_gem->dma_addr, exynos_gem->dma_attrs); err_free: drm_free_large(exynos_gem->pages); @@ -120,7 +120,7 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, (dma_addr_t)exynos_gem->dma_addr, - &exynos_gem->dma_attrs); + exynos_gem->dma_attrs); drm_free_large(exynos_gem->pages); } @@ -346,7 +346,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, exynos_gem->dma_addr, exynos_gem->size, - &exynos_gem->dma_attrs); + exynos_gem->dma_attrs); if (ret < 0) { DRM_ERROR("failed to mmap.\n"); return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 78100742281d..df7c543d6558 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -50,7 +50,7 @@ struct exynos_drm_gem { void *cookie; void __iomem *kvaddr; dma_addr_t dma_addr; - struct dma_attrs dma_attrs; + unsigned long dma_attrs; struct page **pages; struct sg_table *sgt; }; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index fa2ec0cd00e8..7abc550ebc00 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -54,15 +54,14 @@ struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, obj = &mtk_gem->base; - init_dma_attrs(&mtk_gem->dma_attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs); + mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE; if (!alloc_kmap) - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs); + mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size, &mtk_gem->dma_addr, GFP_KERNEL, - &mtk_gem->dma_attrs); + mtk_gem->dma_attrs); if (!mtk_gem->cookie) { DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); ret = -ENOMEM; @@ -93,7 +92,7 @@ void mtk_drm_gem_free_object(struct drm_gem_object *obj) drm_prime_gem_destroy(obj, mtk_gem->sg); else dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie, - mtk_gem->dma_addr, &mtk_gem->dma_attrs); + mtk_gem->dma_addr, mtk_gem->dma_attrs); /* release file pointer to gem object. */ drm_gem_object_release(obj); @@ -173,7 +172,7 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, vma->vm_pgoff = 0; ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, - mtk_gem->dma_addr, obj->size, &mtk_gem->dma_attrs); + mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs); if (ret) drm_gem_vm_close(vma); @@ -224,7 +223,7 @@ struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj) ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie, mtk_gem->dma_addr, obj->size, - &mtk_gem->dma_attrs); + mtk_gem->dma_attrs); if (ret) { DRM_ERROR("failed to allocate sgt, %d\n", ret); kfree(sgt); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h index 3a2a5624a1cb..2752718fa5b2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h @@ -35,7 +35,7 @@ struct mtk_drm_gem_obj { void *cookie; void *kvaddr; dma_addr_t dma_addr; - struct dma_attrs dma_attrs; + unsigned long dma_attrs; struct sg_table *sg; }; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 26f859ec24b3..8a0237008f74 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -238,11 +238,10 @@ static int msm_drm_uninit(struct device *dev) } if (priv->vram.paddr) { - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); + unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING; drm_mm_takedown(&priv->vram.mm); dma_free_attrs(dev, priv->vram.size, NULL, - priv->vram.paddr, &attrs); + priv->vram.paddr, attrs); } component_unbind_all(dev, ddev); @@ -310,21 +309,21 @@ static int msm_init_vram(struct drm_device *dev) } if (size) { - DEFINE_DMA_ATTRS(attrs); + unsigned long attrs = 0; void *p; priv->vram.size = size; drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + attrs |= DMA_ATTR_NO_KERNEL_MAPPING; + attrs |= DMA_ATTR_WRITE_COMBINE; /* note that for no-kernel-mapping, the vaddr returned * is bogus, but non-null if allocation succeeded: */ p = dma_alloc_attrs(dev->dev, size, - &priv->vram.paddr, GFP_KERNEL, &attrs); + &priv->vram.paddr, GFP_KERNEL, attrs); if (!p) { dev_err(dev->dev, "failed to allocate VRAM\n"); priv->vram.paddr = 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c index 6b8f2a19b2d9..a6a7fa0d7679 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c @@ -109,7 +109,7 @@ struct gk20a_instmem { u16 iommu_bit; /* Only used by DMA API */ - struct dma_attrs attrs; + unsigned long attrs; }; #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) @@ -293,7 +293,7 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory) goto out; dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr, - node->handle, &imem->attrs); + node->handle, imem->attrs); out: return node; @@ -386,7 +386,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, &node->handle, GFP_KERNEL, - &imem->attrs); + imem->attrs); if (!node->base.vaddr) { nvkm_error(subdev, "cannot allocate DMA memory\n"); return -ENOMEM; @@ -597,10 +597,9 @@ gk20a_instmem_new(struct nvkm_device *device, int index, nvkm_info(&imem->base.subdev, "using IOMMU\n"); } else { - init_dma_attrs(&imem->attrs); - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs); - dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs); + imem->attrs = DMA_ATTR_NON_CONSISTENT | + DMA_ATTR_WEAK_ORDERING | + DMA_ATTR_WRITE_COMBINE; nvkm_info(&imem->base.subdev, "using DMA API\n"); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 059e902f872d..b70f9423379c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -17,8 +17,6 @@ #include <drm/drm_gem.h> #include <drm/drm_vma_manager.h> -#include <linux/dma-attrs.h> - #include "rockchip_drm_drv.h" #include "rockchip_drm_gem.h" @@ -28,15 +26,14 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, struct drm_gem_object *obj = &rk_obj->base; struct drm_device *drm = obj->dev; - init_dma_attrs(&rk_obj->dma_attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs); + rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE; if (!alloc_kmap) - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs); + rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, &rk_obj->dma_addr, GFP_KERNEL, - &rk_obj->dma_attrs); + rk_obj->dma_attrs); if (!rk_obj->kvaddr) { DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size); return -ENOMEM; @@ -51,7 +48,7 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) struct drm_device *drm = obj->dev; dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr, - &rk_obj->dma_attrs); + rk_obj->dma_attrs); } static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, @@ -70,7 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, vma->vm_pgoff = 0; ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, - obj->size, &rk_obj->dma_attrs); + obj->size, rk_obj->dma_attrs); if (ret) drm_gem_vm_close(vma); @@ -262,7 +259,7 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, rk_obj->dma_addr, obj->size, - &rk_obj->dma_attrs); + rk_obj->dma_attrs); if (ret) { DRM_ERROR("failed to allocate sgt, %d\n", ret); kfree(sgt); @@ -276,7 +273,7 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); - if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs)) + if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) return NULL; return rk_obj->kvaddr; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index ad22618473a4..18b3488db4ec 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -23,7 +23,7 @@ struct rockchip_gem_object { void *kvaddr; dma_addr_t dma_addr; - struct dma_attrs dma_attrs; + unsigned long dma_attrs; }; struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index fe4d2e1a8b58..c68746ce6624 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -37,7 +37,6 @@ #include <linux/sched.h> #include <linux/export.h> #include <linux/hugetlb.h> -#include <linux/dma-attrs.h> #include <linux/slab.h> #include <rdma/ib_umem_odp.h> @@ -92,12 +91,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, unsigned long npages; int ret; int i; - DEFINE_DMA_ATTRS(attrs); + unsigned long dma_attrs = 0; struct scatterlist *sg, *sg_list_start; int need_release = 0; if (dmasync) - dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); + dma_attrs |= DMA_ATTR_WRITE_BARRIER; if (!size) return ERR_PTR(-EINVAL); @@ -215,7 +214,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, umem->sg_head.sgl, umem->npages, DMA_BIDIRECTIONAL, - &attrs); + dma_attrs); if (umem->nmap <= 0) { ret = -ENOMEM; diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 33c177ba93be..96de97a46079 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2375,7 +2375,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, static dma_addr_t map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { phys_addr_t paddr = page_to_phys(page) + offset; struct protection_domain *domain; @@ -2398,7 +2398,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, * The exported unmap_single function for dma_ops. */ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { struct protection_domain *domain; struct dma_ops_domain *dma_dom; @@ -2444,7 +2444,7 @@ static int sg_num_pages(struct device *dev, */ static int map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { int mapped_pages = 0, npages = 0, prot = 0, i; struct protection_domain *domain; @@ -2525,7 +2525,7 @@ out_err: */ static void unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct protection_domain *domain; struct dma_ops_domain *dma_dom; @@ -2548,7 +2548,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, */ static void *alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { u64 dma_mask = dev->coherent_dma_mask; struct protection_domain *domain; @@ -2604,7 +2604,7 @@ out_free: */ static void free_coherent(struct device *dev, size_t size, void *virt_addr, dma_addr_t dma_addr, - struct dma_attrs *attrs) + unsigned long attrs) { struct protection_domain *domain; struct dma_ops_domain *dma_dom; diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index ea5a9ebf0f78..08a1e2f3690f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -286,7 +286,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size, * or NULL on failure. */ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, - struct dma_attrs *attrs, int prot, dma_addr_t *handle, + unsigned long attrs, int prot, dma_addr_t *handle, void (*flush_page)(struct device *, const void *, phys_addr_t)) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); @@ -306,7 +306,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, } else { size = ALIGN(size, min_size); } - if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) + if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) alloc_sizes = min_size; count = PAGE_ALIGN(size) >> PAGE_SHIFT; @@ -400,7 +400,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, } void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); } @@ -560,7 +560,7 @@ out_restore_sg: } void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { /* * The scatterlist segments are mapped into a single diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index afbaa2c69a59..ebb5bf3ddbd9 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3552,7 +3552,7 @@ error: static dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { return __intel_map_single(dev, page_to_phys(page) + offset, size, dir, *dev->dma_mask); @@ -3711,14 +3711,14 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { intel_unmap(dev, dev_addr, size); } static void *intel_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { struct page *page = NULL; int order; @@ -3764,7 +3764,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, } static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { int order; struct page *page = virt_to_page(vaddr); @@ -3779,7 +3779,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK; unsigned long nrpages = 0; @@ -3808,7 +3808,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, } static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { int i; struct dmar_domain *domain; diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c index 3df66d11c795..b7892f3efd98 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-hw.c +++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c @@ -430,14 +430,11 @@ int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp) */ void bdisp_hw_free_nodes(struct bdisp_ctx *ctx) { - if (ctx && ctx->node[0]) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + if (ctx && ctx->node[0]) dma_free_attrs(ctx->bdisp_dev->dev, sizeof(struct bdisp_node) * MAX_NB_NODE, - ctx->node[0], ctx->node_paddr[0], &attrs); - } + ctx->node[0], ctx->node_paddr[0], + DMA_ATTR_WRITE_COMBINE); } /** @@ -455,12 +452,10 @@ int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx) unsigned int i, node_size = sizeof(struct bdisp_node); void *base; dma_addr_t paddr; - DEFINE_DMA_ATTRS(attrs); /* Allocate all the nodes within a single memory page */ - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr, - GFP_KERNEL | GFP_DMA, &attrs); + GFP_KERNEL | GFP_DMA, DMA_ATTR_WRITE_COMBINE); if (!base) { dev_err(dev, "%s no mem\n", __func__); return -ENOMEM; @@ -493,13 +488,9 @@ void bdisp_hw_free_filters(struct device *dev) { int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER); - if (bdisp_h_filter[0].virt) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + if (bdisp_h_filter[0].virt) dma_free_attrs(dev, size, bdisp_h_filter[0].virt, - bdisp_h_filter[0].paddr, &attrs); - } + bdisp_h_filter[0].paddr, DMA_ATTR_WRITE_COMBINE); } /** @@ -516,12 +507,11 @@ int bdisp_hw_alloc_filters(struct device *dev) unsigned int i, size; void *base; dma_addr_t paddr; - DEFINE_DMA_ATTRS(attrs); /* Allocate all the filters within a single memory page */ size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, &attrs); + base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, + DMA_ATTR_WRITE_COMBINE); if (!base) return -ENOMEM; diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 863f658a3fa1..b09b2c9b6b63 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -27,7 +27,7 @@ struct vb2_dc_buf { unsigned long size; void *cookie; dma_addr_t dma_addr; - struct dma_attrs attrs; + unsigned long attrs; enum dma_data_direction dma_dir; struct sg_table *dma_sgt; struct frame_vector *vec; @@ -130,12 +130,12 @@ static void vb2_dc_put(void *buf_priv) kfree(buf->sgt_base); } dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr, - &buf->attrs); + buf->attrs); put_device(buf->dev); kfree(buf); } -static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs, +static void *vb2_dc_alloc(struct device *dev, unsigned long attrs, unsigned long size, enum dma_data_direction dma_dir, gfp_t gfp_flags) { @@ -146,16 +146,16 @@ static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs, return ERR_PTR(-ENOMEM); if (attrs) - buf->attrs = *attrs; + buf->attrs = attrs; buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr, - GFP_KERNEL | gfp_flags, &buf->attrs); + GFP_KERNEL | gfp_flags, buf->attrs); if (!buf->cookie) { dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size); kfree(buf); return ERR_PTR(-ENOMEM); } - if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs)) + if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) buf->vaddr = buf->cookie; /* Prevent the device from being released while the buffer is used */ @@ -189,7 +189,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) vma->vm_pgoff = 0; ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, - buf->dma_addr, buf->size, &buf->attrs); + buf->dma_addr, buf->size, buf->attrs); if (ret) { pr_err("Remapping memory failed, error: %d\n", ret); @@ -372,7 +372,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) } ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr, - buf->size, &buf->attrs); + buf->size, buf->attrs); if (ret < 0) { dev_err(buf->dev, "failed to get scatterlist from DMA API\n"); kfree(sgt); @@ -421,15 +421,12 @@ static void vb2_dc_put_userptr(void *buf_priv) struct page **pages; if (sgt) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); /* * No need to sync to CPU, it's already synced to the CPU * since the finish() memop will have been called before this. */ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, &attrs); + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); pages = frame_vector_pages(buf->vec); /* sgt should exist only if vector contains pages... */ BUG_ON(IS_ERR(pages)); @@ -484,9 +481,6 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, struct sg_table *sgt; unsigned long contig_size; unsigned long dma_align = dma_get_cache_alignment(); - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); /* Only cache aligned DMA transfers are reliable */ if (!IS_ALIGNED(vaddr | size, dma_align)) { @@ -548,7 +542,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, * prepare() memop is called. */ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, &attrs); + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); if (sgt->nents <= 0) { pr_err("failed to map scatterlist\n"); ret = -EIO; @@ -572,7 +566,7 @@ out: fail_map_sg: dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, &attrs); + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); fail_sgt_init: sg_free_table(sgt); diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index a39db8a6db7a..bd82d709ee82 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -95,7 +95,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, return 0; } -static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_attrs, +static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs, unsigned long size, enum dma_data_direction dma_dir, gfp_t gfp_flags) { @@ -103,9 +103,6 @@ static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_at struct sg_table *sgt; int ret; int num_pages; - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); if (WARN_ON(dev == NULL)) return NULL; @@ -144,7 +141,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_at * prepare() memop is called. */ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, &attrs); + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); if (!sgt->nents) goto fail_map; @@ -179,13 +176,10 @@ static void vb2_dma_sg_put(void *buf_priv) int i = buf->num_pages; if (atomic_dec_and_test(&buf->refcount)) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, buf->num_pages); dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, &attrs); + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(buf->dma_sgt); @@ -228,10 +222,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, { struct vb2_dma_sg_buf *buf; struct sg_table *sgt; - DEFINE_DMA_ATTRS(attrs); struct frame_vector *vec; - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; @@ -262,7 +254,7 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, * prepare() memop is called. */ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, &attrs); + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); if (!sgt->nents) goto userptr_fail_map; @@ -286,14 +278,11 @@ static void vb2_dma_sg_put_userptr(void *buf_priv) struct vb2_dma_sg_buf *buf = buf_priv; struct sg_table *sgt = &buf->sg_table; int i = buf->num_pages; - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); dprintk(1, "%s: Releasing userspace buffer of %d pages\n", __func__, buf->num_pages); dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, - &attrs); + DMA_ATTR_SKIP_CPU_SYNC); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(buf->dma_sgt); diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index 7e8a07ed8d82..c2820a6e164d 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c @@ -33,7 +33,7 @@ struct vb2_vmalloc_buf { static void vb2_vmalloc_put(void *buf_priv); -static void *vb2_vmalloc_alloc(struct device *dev, const struct dma_attrs *attrs, +static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs, unsigned long size, enum dma_data_direction dma_dir, gfp_t gfp_flags) { diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index e047efd83f57..9599d732aff3 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c @@ -38,7 +38,7 @@ static inline struct mic_device *vpdev_to_mdev(struct device *dev) static dma_addr_t _mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { void *va = phys_to_virt(page_to_phys(page)) + offset; struct mic_device *mdev = vpdev_to_mdev(dev); @@ -48,7 +48,7 @@ _mic_dma_map_page(struct device *dev, struct page *page, static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct mic_device *mdev = vpdev_to_mdev(dev); @@ -144,7 +144,7 @@ static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev) static void *__mic_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -164,7 +164,7 @@ static void *__mic_dma_alloc(struct device *dev, size_t size, } static void __mic_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -176,7 +176,7 @@ static void __mic_dma_free(struct device *dev, size_t size, void *vaddr, static dma_addr_t __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { void *va = phys_to_virt(page_to_phys(page)) + offset; struct scif_hw_dev *scdev = dev_get_drvdata(dev); @@ -188,7 +188,7 @@ __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, static void __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -198,7 +198,7 @@ __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -229,7 +229,7 @@ err: static void __mic_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -327,7 +327,7 @@ static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev) static dma_addr_t mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { void *va = phys_to_virt(page_to_phys(page)) + offset; struct mic_device *mdev = dev_get_drvdata(dev->parent); @@ -338,7 +338,7 @@ mic_dma_map_page(struct device *dev, struct page *page, static void mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct mic_device *mdev = dev_get_drvdata(dev->parent); mic_unmap_single(mdev, dma_addr, size); diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index e24b05996a1b..3ed6238f8f6e 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -790,7 +790,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size, static dma_addr_t ccio_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { return ccio_map_single(dev, page_address(page) + offset, size, direction); @@ -806,7 +806,7 @@ ccio_map_page(struct device *dev, struct page *page, unsigned long offset, */ static void ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; unsigned long flags; @@ -844,7 +844,7 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, */ static void * ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { void *ret; #if 0 @@ -878,9 +878,9 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, */ static void ccio_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { - ccio_unmap_page(dev, dma_handle, size, 0, NULL); + ccio_unmap_page(dev, dma_handle, size, 0, 0); free_pages((unsigned long)cpu_addr, get_order(size)); } @@ -907,7 +907,7 @@ ccio_free(struct device *dev, size_t size, void *cpu_addr, */ static int ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; int coalesced, filled = 0; @@ -984,7 +984,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, */ static void ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; @@ -1004,7 +1004,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; #endif ccio_unmap_page(dev, sg_dma_address(sglist), - sg_dma_len(sglist), direction, NULL); + sg_dma_len(sglist), direction, 0); ++sglist; } diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 42ec4600b7e4..151b86b6d2e2 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -783,7 +783,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, static dma_addr_t sba_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { return sba_map_single(dev, page_address(page) + offset, size, direction); @@ -801,7 +801,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset, */ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; #if DELAYED_RESOURCE_CNT > 0 @@ -876,7 +876,7 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, * See Documentation/DMA-API-HOWTO.txt */ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, struct dma_attrs *attrs) + gfp_t gfp, unsigned long attrs) { void *ret; @@ -908,9 +908,9 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle */ static void sba_free(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { - sba_unmap_page(hwdev, dma_handle, size, 0, NULL); + sba_unmap_page(hwdev, dma_handle, size, 0, 0); free_pages((unsigned long) vaddr, get_order(size)); } @@ -943,7 +943,7 @@ int dump_run_sg = 0; */ static int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; int coalesced, filled = 0; @@ -1026,7 +1026,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, */ static void sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; #ifdef ASSERT_PDIR_SANITY @@ -1051,7 +1051,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, while (sg_dma_len(sglist) && nents--) { sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), - direction, NULL); + direction, 0); #ifdef SBA_COLLECT_STATS ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; ioc->usingle_calls--; /* kluge since call is unmap_sg() */ diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c index 24791886219a..2a1b2c7d8f2c 100644 --- a/drivers/remoteproc/qcom_q6v5_pil.c +++ b/drivers/remoteproc/qcom_q6v5_pil.c @@ -349,13 +349,12 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc, static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) { - DEFINE_DMA_ATTRS(attrs); + unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; dma_addr_t phys; void *ptr; int ret; - dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs); - ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs); + ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs); if (!ptr) { dev_err(qproc->dev, "failed to allocate mdt buffer\n"); return -ENOMEM; @@ -372,7 +371,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) else if (ret < 0) dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); - dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs); + dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs); return ret < 0 ? ret : 0; } diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c index 2fb90cb6803f..1d7c012f09db 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c @@ -1332,7 +1332,7 @@ static void omapfb_free_fbmem(struct fb_info *fbi) } dma_free_attrs(fbdev->dev, rg->size, rg->token, rg->dma_handle, - &rg->attrs); + rg->attrs); rg->token = NULL; rg->vaddr = NULL; @@ -1370,7 +1370,7 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size, struct omapfb2_device *fbdev = ofbi->fbdev; struct omapfb2_mem_region *rg; void *token; - DEFINE_DMA_ATTRS(attrs); + unsigned long attrs; dma_addr_t dma_handle; int r; @@ -1386,15 +1386,15 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size, size = PAGE_ALIGN(size); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + attrs = DMA_ATTR_WRITE_COMBINE; if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); + attrs |= DMA_ATTR_NO_KERNEL_MAPPING; DBG("allocating %lu bytes for fb %d\n", size, ofbi->id); token = dma_alloc_attrs(fbdev->dev, size, &dma_handle, - GFP_KERNEL, &attrs); + GFP_KERNEL, attrs); if (token == NULL) { dev_err(fbdev->dev, "failed to allocate framebuffer\n"); @@ -1408,7 +1408,7 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size, r = omap_vrfb_request_ctx(&rg->vrfb); if (r) { dma_free_attrs(fbdev->dev, size, token, dma_handle, - &attrs); + attrs); dev_err(fbdev->dev, "vrfb create ctx failed\n"); return r; } diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb.h b/drivers/video/fbdev/omap2/omapfb/omapfb.h index bcb9ff4a607d..555487d6dbea 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb.h +++ b/drivers/video/fbdev/omap2/omapfb/omapfb.h @@ -28,7 +28,6 @@ #endif #include <linux/rwsem.h> -#include <linux/dma-attrs.h> #include <linux/dma-mapping.h> #include <video/omapfb_dss.h> @@ -51,7 +50,7 @@ extern bool omapfb_debug; struct omapfb2_mem_region { int id; - struct dma_attrs attrs; + unsigned long attrs; void *token; dma_addr_t dma_handle; u32 paddr; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 7399782c0998..87e6035c9e81 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -294,7 +294,7 @@ error: void * xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { void *ret; int order = get_order(size); @@ -346,7 +346,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); void xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dev_addr, struct dma_attrs *attrs) + dma_addr_t dev_addr, unsigned long attrs) { int order = get_order(size); phys_addr_t phys; @@ -378,7 +378,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { phys_addr_t map, phys = page_to_phys(page) + offset; dma_addr_t dev_addr = xen_phys_to_bus(phys); @@ -434,7 +434,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); */ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { phys_addr_t paddr = xen_bus_to_phys(dev_addr); @@ -462,7 +462,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { xen_unmap_single(hwdev, dev_addr, size, dir, attrs); } @@ -538,7 +538,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device); int xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -599,7 +599,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs); void xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; |