blob: 2fbecaa3a1ba4053d9b53e1b0a686a240a6b9c4e (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
|
#ifndef IOU_ALLOC_CACHE_H
#define IOU_ALLOC_CACHE_H
/*
* Don't allow the cache to grow beyond this size.
*/
#define IO_ALLOC_CACHE_MAX 512
struct io_cache_entry {
struct io_wq_work_node node;
};
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
struct io_cache_entry *entry)
{
if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
cache->nr_cached++;
wq_stack_add_head(&entry->node, &cache->list);
/* KASAN poisons object */
kasan_slab_free_mempool(entry);
return true;
}
return false;
}
static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
{
if (cache->list.next) {
struct io_cache_entry *entry;
entry = container_of(cache->list.next, struct io_cache_entry, node);
kasan_unpoison_range(entry, cache->elem_size);
cache->list.next = cache->list.next->next;
cache->nr_cached--;
return entry;
}
return NULL;
}
static inline void io_alloc_cache_init(struct io_alloc_cache *cache, size_t size)
{
cache->list.next = NULL;
cache->nr_cached = 0;
cache->elem_size = size;
}
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
void (*free)(struct io_cache_entry *))
{
while (1) {
struct io_cache_entry *entry = io_alloc_cache_get(cache);
if (!entry)
break;
free(entry);
}
cache->nr_cached = 0;
}
#endif
|