summaryrefslogtreecommitdiff
path: root/fs/erofs/pcpubuf.c
blob: a2efd833d1b6c32791c3ed380d5792361160fd05 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) Gao Xiang <xiang@kernel.org>
 *
 * For low-latency decompression algorithms (e.g. lz4), reserve consecutive
 * per-CPU virtual memory (in pages) in advance to store such inplace I/O
 * data if inplace decompression is failed (due to unmet inplace margin for
 * example).
 */
#include "internal.h"

struct erofs_pcpubuf {
	raw_spinlock_t lock;
	void *ptr;
	struct page **pages;
	unsigned int nrpages;
};

static DEFINE_PER_CPU(struct erofs_pcpubuf, erofs_pcb);

void *erofs_get_pcpubuf(unsigned int requiredpages)
	__acquires(pcb->lock)
{
	struct erofs_pcpubuf *pcb = &get_cpu_var(erofs_pcb);

	raw_spin_lock(&pcb->lock);
	/* check if the per-CPU buffer is too small */
	if (requiredpages > pcb->nrpages) {
		raw_spin_unlock(&pcb->lock);
		put_cpu_var(erofs_pcb);
		/* (for sparse checker) pretend pcb->lock is still taken */
		__acquire(pcb->lock);
		return NULL;
	}
	return pcb->ptr;
}

void erofs_put_pcpubuf(void *ptr) __releases(pcb->lock)
{
	struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, smp_processor_id());

	DBG_BUGON(pcb->ptr != ptr);
	raw_spin_unlock(&pcb->lock);
	put_cpu_var(erofs_pcb);
}

/* the next step: support per-CPU page buffers hotplug */
int erofs_pcpubuf_growsize(unsigned int nrpages)
{
	static DEFINE_MUTEX(pcb_resize_mutex);
	static unsigned int pcb_nrpages;
	struct page *pagepool = NULL;
	int delta, cpu, ret, i;

	mutex_lock(&pcb_resize_mutex);
	delta = nrpages - pcb_nrpages;
	ret = 0;
	/* avoid shrinking pcpubuf, since no idea how many fses rely on */
	if (delta <= 0)
		goto out;

	for_each_possible_cpu(cpu) {
		struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
		struct page **pages, **oldpages;
		void *ptr, *old_ptr;

		pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL);
		if (!pages) {
			ret = -ENOMEM;
			break;
		}

		for (i = 0; i < nrpages; ++i) {
			pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL);
			if (!pages[i]) {
				ret = -ENOMEM;
				oldpages = pages;
				goto free_pagearray;
			}
		}
		ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL);
		if (!ptr) {
			ret = -ENOMEM;
			oldpages = pages;
			goto free_pagearray;
		}
		raw_spin_lock(&pcb->lock);
		old_ptr = pcb->ptr;
		pcb->ptr = ptr;
		oldpages = pcb->pages;
		pcb->pages = pages;
		i = pcb->nrpages;
		pcb->nrpages = nrpages;
		raw_spin_unlock(&pcb->lock);

		if (!oldpages) {
			DBG_BUGON(old_ptr);
			continue;
		}

		if (old_ptr)
			vunmap(old_ptr);
free_pagearray:
		while (i)
			erofs_pagepool_add(&pagepool, oldpages[--i]);
		kfree(oldpages);
		if (ret)
			break;
	}
	pcb_nrpages = nrpages;
	erofs_release_pages(&pagepool);
out:
	mutex_unlock(&pcb_resize_mutex);
	return ret;
}

void erofs_pcpubuf_init(void)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);

		raw_spin_lock_init(&pcb->lock);
	}
}

void erofs_pcpubuf_exit(void)
{
	int cpu, i;

	for_each_possible_cpu(cpu) {
		struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);

		if (pcb->ptr) {
			vunmap(pcb->ptr);
			pcb->ptr = NULL;
		}
		if (!pcb->pages)
			continue;

		for (i = 0; i < pcb->nrpages; ++i)
			if (pcb->pages[i])
				put_page(pcb->pages[i]);
		kfree(pcb->pages);
		pcb->pages = NULL;
	}
}