summaryrefslogblamecommitdiff
path: root/lib/iommu-helper.c
blob: 495575a59ca643f752a2314b44e5e0e77a2963f5 (plain) (tree)















































































                                                                                
/*
 * IOMMU helper functions for the free area management
 */

#include <linux/module.h>
#include <linux/bitops.h>

static unsigned long find_next_zero_area(unsigned long *map,
					 unsigned long size,
					 unsigned long start,
					 unsigned int nr,
					 unsigned long align_mask)
{
	unsigned long index, end, i;
again:
	index = find_next_zero_bit(map, size, start);

	/* Align allocation */
	index = (index + align_mask) & ~align_mask;

	end = index + nr;
	if (end >= size)
		return -1;
	for (i = index; i < end; i++) {
		if (test_bit(i, map)) {
			start = i+1;
			goto again;
		}
	}
	return index;
}

static inline void set_bit_area(unsigned long *map, unsigned long i,
				int len)
{
	unsigned long end = i + len;
	while (i < end) {
		__set_bit(i, map);
		i++;
	}
}

static inline int is_span_boundary(unsigned int index, unsigned int nr,
				   unsigned long shift,
				   unsigned long boundary_size)
{
	shift = (shift + index) & (boundary_size - 1);
	return shift + nr > boundary_size;
}

unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
			       unsigned long start, unsigned int nr,
			       unsigned long shift, unsigned long boundary_size,
			       unsigned long align_mask)
{
	unsigned long index;
again:
	index = find_next_zero_area(map, size, start, nr, align_mask);
	if (index != -1) {
		if (is_span_boundary(index, nr, shift, boundary_size)) {
			/* we could do more effectively */
			start = index + 1;
			goto again;
		}
		set_bit_area(map, index, nr);
	}
	return index;
}
EXPORT_SYMBOL(iommu_area_alloc);

void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
{
	unsigned long end = start + nr;

	while (start < end) {
		__clear_bit(start, map);
		start++;
	}
}
EXPORT_SYMBOL(iommu_area_free);