summaryrefslogtreecommitdiff
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c366
1 files changed, 231 insertions, 135 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index a738e7745865..fcfaf1b899c8 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -61,7 +61,7 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer + alloc->buffer_size - buffer->user_data;
+ return alloc->vm_start + alloc->buffer_size - buffer->user_data;
return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
@@ -169,32 +169,33 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return buffer;
}
static inline void
-binder_set_installed_page(struct binder_lru_page *lru_page,
+binder_set_installed_page(struct binder_alloc *alloc,
+ unsigned long index,
struct page *page)
{
/* Pairs with acquire in binder_get_installed_page() */
- smp_store_release(&lru_page->page_ptr, page);
+ smp_store_release(&alloc->pages[index], page);
}
static inline struct page *
-binder_get_installed_page(struct binder_lru_page *lru_page)
+binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
{
/* Pairs with release in binder_set_installed_page() */
- return smp_load_acquire(&lru_page->page_ptr);
+ return smp_load_acquire(&alloc->pages[index]);
}
static void binder_lru_freelist_add(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, false, start, end);
@@ -202,65 +203,159 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
size_t index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (!binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
+ if (!page)
continue;
trace_binder_free_lru_start(alloc, index);
- ret = list_lru_add_obj(&binder_freelist, &page->lru);
+ ret = list_lru_add(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
}
}
-static int binder_install_single_page(struct binder_alloc *alloc,
- struct binder_lru_page *lru_page,
- unsigned long addr)
+static inline
+void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
{
- struct page *page;
- int ret = 0;
+ /* pairs with smp_load_acquire in binder_alloc_is_mapped() */
+ smp_store_release(&alloc->mapped, state);
+}
- if (!mmget_not_zero(alloc->mm))
- return -ESRCH;
+static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
+{
+ /* pairs with smp_store_release in binder_alloc_set_mapped() */
+ return smp_load_acquire(&alloc->mapped);
+}
+
+static struct page *binder_page_lookup(struct binder_alloc *alloc,
+ unsigned long addr)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct page *page;
+ long npages = 0;
/*
- * Protected with mmap_sem in write mode as multiple tasks
- * might race to install the same page.
+ * Find an existing page in the remote mm. If missing,
+ * don't attempt to fault-in just propagate an error.
*/
- mmap_write_lock(alloc->mm);
- if (binder_get_installed_page(lru_page))
- goto out;
+ mmap_read_lock(mm);
+ if (binder_alloc_is_mapped(alloc))
+ npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
+ &page, NULL);
+ mmap_read_unlock(mm);
- if (!alloc->vma) {
- pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
- ret = -ESRCH;
- goto out;
+ return npages > 0 ? page : NULL;
+}
+
+static int binder_page_insert(struct binder_alloc *alloc,
+ unsigned long addr,
+ struct page *page)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct vm_area_struct *vma;
+ int ret = -ESRCH;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, addr);
+ if (vma) {
+ if (binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ vma_end_read(vma);
+ return ret;
}
+ /* fall back to mmap_lock */
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, addr);
+ if (vma && binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ mmap_read_unlock(mm);
+
+ return ret;
+}
+
+static struct page *binder_page_alloc(struct binder_alloc *alloc,
+ unsigned long index)
+{
+ struct binder_shrinker_mdata *mdata;
+ struct page *page;
+
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ /* allocate and install shrinker metadata under page->private */
+ mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
+ if (!mdata) {
+ __free_page(page);
+ return NULL;
+ }
+
+ mdata->alloc = alloc;
+ mdata->page_index = index;
+ INIT_LIST_HEAD(&mdata->lru);
+ set_page_private(page, (unsigned long)mdata);
+
+ return page;
+}
+
+static void binder_free_page(struct page *page)
+{
+ kfree((struct binder_shrinker_mdata *)page_private(page));
+ __free_page(page);
+}
+
+static int binder_install_single_page(struct binder_alloc *alloc,
+ unsigned long index,
+ unsigned long addr)
+{
+ struct page *page;
+ int ret;
+
+ if (!mmget_not_zero(alloc->mm))
+ return -ESRCH;
+
+ page = binder_page_alloc(alloc, index);
if (!page) {
- pr_err("%d: failed to allocate page\n", alloc->pid);
ret = -ENOMEM;
goto out;
}
- ret = vm_insert_page(alloc->vma, addr, page);
- if (ret) {
+ ret = binder_page_insert(alloc, addr, page);
+ switch (ret) {
+ case -EBUSY:
+ /*
+ * EBUSY is ok. Someone installed the pte first but the
+ * alloc->pages[index] has not been updated yet. Discard
+ * our page and look up the one already installed.
+ */
+ ret = 0;
+ binder_free_page(page);
+ page = binder_page_lookup(alloc, addr);
+ if (!page) {
+ pr_err("%d: failed to find page at offset %lx\n",
+ alloc->pid, addr - alloc->vm_start);
+ ret = -ESRCH;
+ break;
+ }
+ fallthrough;
+ case 0:
+ /* Mark page installation complete and safe to use */
+ binder_set_installed_page(alloc, index, page);
+ break;
+ default:
+ binder_free_page(page);
pr_err("%d: %s failed to insert page at offset %lx with %d\n",
- alloc->pid, __func__, addr - alloc->buffer, ret);
- __free_page(page);
- ret = -ENOMEM;
- goto out;
+ alloc->pid, __func__, addr - alloc->vm_start, ret);
+ break;
}
-
- /* Mark page installation complete and safe to use */
- binder_set_installed_page(lru_page, page);
out:
- mmap_write_unlock(alloc->mm);
mmput_async(alloc->mm);
return ret;
}
@@ -269,7 +364,6 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{
- struct binder_lru_page *page;
unsigned long start, final;
unsigned long page_addr;
@@ -280,15 +374,13 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
unsigned long index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (binder_get_installed_page(alloc, index))
continue;
trace_binder_alloc_page_start(alloc, index);
- ret = binder_install_single_page(alloc, page, page_addr);
+ ret = binder_install_single_page(alloc, index, page_addr);
if (ret)
return ret;
@@ -302,8 +394,8 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, true, start, end);
@@ -311,13 +403,16 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long index;
bool on_lru;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
- if (page->page_ptr) {
+ if (page) {
trace_binder_alloc_lru_start(alloc, index);
- on_lru = list_lru_del_obj(&binder_freelist, &page->lru);
+ on_lru = list_lru_del(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!on_lru);
trace_binder_alloc_lru_end(alloc, index);
@@ -329,20 +424,6 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
}
}
-static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
-{
- /* pairs with smp_load_acquire in binder_alloc_get_vma() */
- smp_store_release(&alloc->vma, vma);
-}
-
-static inline struct vm_area_struct *binder_alloc_get_vma(
- struct binder_alloc *alloc)
-{
- /* pairs with smp_store_release in binder_alloc_set_vma() */
- return smp_load_acquire(&alloc->vma);
-}
-
static void debug_no_space_locked(struct binder_alloc *alloc)
{
size_t largest_alloc_size = 0;
@@ -576,7 +657,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
int ret;
/* Check binder_alloc is fully initialized */
- if (!binder_alloc_get_vma(alloc)) {
+ if (!binder_alloc_is_mapped(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
@@ -597,10 +678,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
if (!next)
return ERR_PTR(-ENOMEM);
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
goto out;
}
@@ -608,7 +689,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
@@ -674,8 +755,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON(buffer->user_data < alloc->buffer);
- BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->user_data < alloc->vm_start);
+ BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += buffer_size;
@@ -734,14 +815,13 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
pgoff_t *pgoffp)
{
binder_size_t buffer_space_offset = buffer_offset +
- (buffer->user_data - alloc->buffer);
+ (buffer->user_data - alloc->vm_start);
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
size_t index = buffer_space_offset >> PAGE_SHIFT;
- struct binder_lru_page *lru_page;
- lru_page = &alloc->pages[index];
*pgoffp = pgoff;
- return lru_page->page_ptr;
+
+ return alloc->pages[index];
}
/**
@@ -785,17 +865,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
* binder_free_buf_locked(). However, that could
- * increase contention for the alloc->lock if clear_on_free
- * is used frequently for large buffers. This lock is not
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
* needed for correctness here.
*/
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
/**
@@ -816,7 +896,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
const char *failure_string;
- int ret, i;
+ int ret;
if (unlikely(vma->vm_mm != alloc->mm)) {
ret = -EINVAL;
@@ -834,22 +914,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
SZ_4M);
mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer = vma->vm_start;
+ alloc->vm_start = vma->vm_start;
alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
- if (alloc->pages == NULL) {
+ if (!alloc->pages) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- alloc->pages[i].alloc = alloc;
- INIT_LIST_HEAD(&alloc->pages[i].lru);
- }
-
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
@@ -857,14 +932,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed;
}
- buffer->user_data = alloc->buffer;
+ buffer->user_data = alloc->vm_start;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
/* Signal binder_alloc is fully initialized */
- binder_alloc_set_vma(alloc, vma);
+ binder_alloc_set_mapped(alloc, true);
return 0;
@@ -872,7 +947,7 @@ err_alloc_buf_struct_failed:
kvfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- alloc->buffer = 0;
+ alloc->vm_start = 0;
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer_size = 0;
err_already_mapped:
@@ -893,8 +968,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
struct binder_buffer *buffer;
buffers = 0;
- spin_lock(&alloc->lock);
- BUG_ON(alloc->vma);
+ mutex_lock(&alloc->mutex);
+ BUG_ON(alloc->mapped);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -925,22 +1000,26 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ struct page *page;
bool on_lru;
- if (!alloc->pages[i].page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
continue;
- on_lru = list_lru_del_obj(&binder_freelist,
- &alloc->pages[i].lru);
+ on_lru = list_lru_del(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d %s\n",
__func__, alloc->pid, i,
on_lru ? "on lru" : "active");
- __free_page(alloc->pages[i].page_ptr);
+ binder_free_page(page);
page_count++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
kvfree(alloc->pages);
if (alloc->mm)
mmdrop(alloc->mm);
@@ -964,17 +1043,17 @@ void binder_alloc_print_allocated(struct seq_file *m,
struct binder_buffer *buffer;
struct rb_node *n;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
buffer->debug_id,
- buffer->user_data - alloc->buffer,
+ buffer->user_data - alloc->vm_start,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
/**
@@ -985,29 +1064,29 @@ void binder_alloc_print_allocated(struct seq_file *m,
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc)
{
- struct binder_lru_page *page;
+ struct page *page;
int i;
int active = 0;
int lru = 0;
int free = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
+ if (binder_alloc_is_mapped(alloc)) {
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
free++;
- else if (list_empty(&page->lru))
+ else if (list_empty(page_to_lru(page)))
active++;
else
lru++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
@@ -1023,10 +1102,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return count;
}
@@ -1036,12 +1115,12 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
* @alloc: binder_alloc for this proc
*
* Called from binder_vma_close() when releasing address space.
- * Clears alloc->vma to prevent new incoming transactions from
+ * Clears alloc->mapped to prevent new incoming transactions from
* allocating more buffers.
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- binder_alloc_set_vma(alloc, NULL);
+ binder_alloc_set_mapped(alloc, false);
}
/**
@@ -1058,39 +1137,50 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
void *cb_arg)
__must_hold(&lru->lock)
{
- struct binder_lru_page *page = container_of(item, typeof(*page), lru);
- struct binder_alloc *alloc = page->alloc;
+ struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
+ struct binder_alloc *alloc = mdata->alloc;
struct mm_struct *mm = alloc->mm;
struct vm_area_struct *vma;
struct page *page_to_free;
unsigned long page_addr;
+ int mm_locked = 0;
size_t index;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!mmap_read_trylock(mm))
- goto err_mmap_read_lock_failed;
- if (!spin_trylock(&alloc->lock))
- goto err_get_alloc_lock_failed;
- if (!page->page_ptr)
- goto err_page_already_freed;
-
- index = page - alloc->pages;
- page_addr = alloc->buffer + index * PAGE_SIZE;
-
- vma = vma_lookup(mm, page_addr);
- if (vma && vma != binder_alloc_get_vma(alloc))
+
+ index = mdata->page_index;
+ page_addr = alloc->vm_start + index * PAGE_SIZE;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, page_addr);
+ if (!vma) {
+ /* fall back to mmap_lock */
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
+ mm_locked = 1;
+ vma = vma_lookup(mm, page_addr);
+ }
+
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ /*
+ * Since a binder_alloc can only be mapped once, we ensure
+ * the vma corresponds to this mapping by checking whether
+ * the binder_alloc is still mapped.
+ */
+ if (vma && !binder_alloc_is_mapped(alloc))
goto err_invalid_vma;
trace_binder_unmap_kernel_start(alloc, index);
- page_to_free = page->page_ptr;
- page->page_ptr = NULL;
+ page_to_free = alloc->pages[index];
+ binder_set_installed_page(alloc, index, NULL);
trace_binder_unmap_kernel_end(alloc, index);
list_lru_isolate(lru, item);
- spin_unlock(&alloc->lock);
spin_unlock(&lru->lock);
if (vma) {
@@ -1101,17 +1191,23 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
mmput_async(mm);
- __free_page(page_to_free);
+ binder_free_page(page_to_free);
return LRU_REMOVED_RETRY;
err_invalid_vma:
-err_page_already_freed:
- spin_unlock(&alloc->lock);
-err_get_alloc_lock_failed:
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
@@ -1145,7 +1241,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
alloc->pid = current->group_leader->pid;
alloc->mm = current->mm;
mmgrab(alloc->mm);
- spin_lock_init(&alloc->lock);
+ mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}