summaryrefslogtreecommitdiff
path: root/lib/dma-virt.c
blob: dcd4df1f71746c293f8914bca2654a911f34ffe3 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
/*
 *	lib/dma-virt.c
 *
 * DMA operations that map to virtual addresses without flushing memory.
 */
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>

static void *dma_virt_alloc(struct device *dev, size_t size,
			    dma_addr_t *dma_handle, gfp_t gfp,
			    unsigned long attrs)
{
	void *ret;

	ret = (void *)__get_free_pages(gfp, get_order(size));
	if (ret)
		*dma_handle = (uintptr_t)ret;
	return ret;
}

static void dma_virt_free(struct device *dev, size_t size,
			  void *cpu_addr, dma_addr_t dma_addr,
			  unsigned long attrs)
{
	free_pages((unsigned long)cpu_addr, get_order(size));
}

static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
				    unsigned long offset, size_t size,
				    enum dma_data_direction dir,
				    unsigned long attrs)
{
	return (uintptr_t)(page_address(page) + offset);
}

static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
			   int nents, enum dma_data_direction dir,
			   unsigned long attrs)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sgl, sg, nents, i) {
		BUG_ON(!sg_page(sg));
		sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
		sg_dma_len(sg) = sg->length;
	}

	return nents;
}

static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	return false;
}

static int dma_virt_supported(struct device *dev, u64 mask)
{
	return true;
}

const struct dma_map_ops dma_virt_ops = {
	.alloc			= dma_virt_alloc,
	.free			= dma_virt_free,
	.map_page		= dma_virt_map_page,
	.map_sg			= dma_virt_map_sg,
	.mapping_error		= dma_virt_mapping_error,
	.dma_supported		= dma_virt_supported,
};
EXPORT_SYMBOL(dma_virt_ops);