summaryrefslogtreecommitdiff
path: root/drivers/message/i2o/memory.c
blob: 292b41e49fbd57c5881c03cf7daa889a72404948 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
/*
 *	Functions to handle I2O memory
 *
 *	Pulled from the inlines in i2o headers and uninlined
 *
 *
 *	This program is free software; you can redistribute it and/or modify it
 *	under the terms of the GNU General Public License as published by the
 *	Free Software Foundation; either version 2 of the License, or (at your
 *	option) any later version.
 */

#include <linux/module.h>
#include <linux/i2o.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "core.h"

/* Protects our 32/64bit mask switching */
static DEFINE_MUTEX(mem_lock);

/**
 *	i2o_sg_tablesize - Calculate the maximum number of elements in a SGL
 *	@c: I2O controller for which the calculation should be done
 *	@body_size: maximum body size used for message in 32-bit words.
 *
 *	Return the maximum number of SG elements in a SG list.
 */
u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
{
	i2o_status_block *sb = c->status_block.virt;
	u16 sg_count =
	    (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
	    body_size;

	if (c->pae_support) {
		/*
		 * for 64-bit a SG attribute element must be added and each
		 * SG element needs 12 bytes instead of 8.
		 */
		sg_count -= 2;
		sg_count /= 3;
	} else
		sg_count /= 2;

	if (c->short_req && (sg_count > 8))
		sg_count = 8;

	return sg_count;
}
EXPORT_SYMBOL_GPL(i2o_sg_tablesize);


/**
 *	i2o_dma_map_single - Map pointer to controller and fill in I2O message.
 *	@c: I2O controller
 *	@ptr: pointer to the data which should be mapped
 *	@size: size of data in bytes
 *	@direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
 *	@sg_ptr: pointer to the SG list inside the I2O message
 *
 *	This function does all necessary DMA handling and also writes the I2O
 *	SGL elements into the I2O message. For details on DMA handling see also
 *	dma_map_single(). The pointer sg_ptr will only be set to the end of the
 *	SG list if the allocation was successful.
 *
 *	Returns DMA address which must be checked for failures using
 *	dma_mapping_error().
 */
dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
					    size_t size,
					    enum dma_data_direction direction,
					    u32 ** sg_ptr)
{
	u32 sg_flags;
	u32 *mptr = *sg_ptr;
	dma_addr_t dma_addr;

	switch (direction) {
	case DMA_TO_DEVICE:
		sg_flags = 0xd4000000;
		break;
	case DMA_FROM_DEVICE:
		sg_flags = 0xd0000000;
		break;
	default:
		return 0;
	}

	dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
	if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
		if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
			*mptr++ = cpu_to_le32(0x7C020002);
			*mptr++ = cpu_to_le32(PAGE_SIZE);
		}
#endif

		*mptr++ = cpu_to_le32(sg_flags | size);
		*mptr++ = cpu_to_le32(i2o_dma_low(dma_addr));
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
		if ((sizeof(dma_addr_t) > 4) && c->pae_support)
			*mptr++ = cpu_to_le32(i2o_dma_high(dma_addr));
#endif
		*sg_ptr = mptr;
	}
	return dma_addr;
}
EXPORT_SYMBOL_GPL(i2o_dma_map_single);

/**
 *	i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
 *	@c: I2O controller
 *	@sg: SG list to be mapped
 *	@sg_count: number of elements in the SG list
 *	@direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
 *	@sg_ptr: pointer to the SG list inside the I2O message
 *
 *	This function does all necessary DMA handling and also writes the I2O
 *	SGL elements into the I2O message. For details on DMA handling see also
 *	dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
 *	list if the allocation was successful.
 *
 *	Returns 0 on failure or 1 on success.
 */
int i2o_dma_map_sg(struct i2o_controller *c, struct scatterlist *sg,
	    int sg_count, enum dma_data_direction direction, u32 ** sg_ptr)
{
	u32 sg_flags;
	u32 *mptr = *sg_ptr;

	switch (direction) {
	case DMA_TO_DEVICE:
		sg_flags = 0x14000000;
		break;
	case DMA_FROM_DEVICE:
		sg_flags = 0x10000000;
		break;
	default:
		return 0;
	}

	sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
	if (!sg_count)
		return 0;

#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
	if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
		*mptr++ = cpu_to_le32(0x7C020002);
		*mptr++ = cpu_to_le32(PAGE_SIZE);
	}
#endif

	while (sg_count-- > 0) {
		if (!sg_count)
			sg_flags |= 0xC0000000;
		*mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg));
		*mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg)));
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
		if ((sizeof(dma_addr_t) > 4) && c->pae_support)
			*mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
#endif
		sg = sg_next(sg);
	}
	*sg_ptr = mptr;

	return 1;
}
EXPORT_SYMBOL_GPL(i2o_dma_map_sg);

/**
 *	i2o_dma_alloc - Allocate DMA memory
 *	@dev: struct device pointer to the PCI device of the I2O controller
 *	@addr: i2o_dma struct which should get the DMA buffer
 *	@len: length of the new DMA memory
 *
 *	Allocate a coherent DMA memory and write the pointers into addr.
 *
 *	Returns 0 on success or -ENOMEM on failure.
 */
int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	int dma_64 = 0;

	mutex_lock(&mem_lock);
	if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_BIT_MASK(64))) {
		dma_64 = 1;
		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
			mutex_unlock(&mem_lock);
			return -ENOMEM;
		}
	}

	addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL);

	if ((sizeof(dma_addr_t) > 4) && dma_64)
		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
			printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
	mutex_unlock(&mem_lock);

	if (!addr->virt)
		return -ENOMEM;

	memset(addr->virt, 0, len);
	addr->len = len;

	return 0;
}
EXPORT_SYMBOL_GPL(i2o_dma_alloc);


/**
 *	i2o_dma_free - Free DMA memory
 *	@dev: struct device pointer to the PCI device of the I2O controller
 *	@addr: i2o_dma struct which contains the DMA buffer
 *
 *	Free a coherent DMA memory and set virtual address of addr to NULL.
 */
void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
{
	if (addr->virt) {
		if (addr->phys)
			dma_free_coherent(dev, addr->len, addr->virt,
					  addr->phys);
		else
			kfree(addr->virt);
		addr->virt = NULL;
	}
}
EXPORT_SYMBOL_GPL(i2o_dma_free);


/**
 *	i2o_dma_realloc - Realloc DMA memory
 *	@dev: struct device pointer to the PCI device of the I2O controller
 *	@addr: pointer to a i2o_dma struct DMA buffer
 *	@len: new length of memory
 *
 *	If there was something allocated in the addr, free it first. If len > 0
 *	than try to allocate it and write the addresses back to the addr
 *	structure. If len == 0 set the virtual address to NULL.
 *
 *	Returns the 0 on success or negative error code on failure.
 */
int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len)
{
	i2o_dma_free(dev, addr);

	if (len)
		return i2o_dma_alloc(dev, addr, len);

	return 0;
}
EXPORT_SYMBOL_GPL(i2o_dma_realloc);

/*
 *	i2o_pool_alloc - Allocate an slab cache and mempool
 *	@mempool: pointer to struct i2o_pool to write data into.
 *	@name: name which is used to identify cache
 *	@size: size of each object
 *	@min_nr: minimum number of objects
 *
 *	First allocates a slab cache with name and size. Then allocates a
 *	mempool which uses the slab cache for allocation and freeing.
 *
 *	Returns 0 on success or negative error code on failure.
 */
int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
				 size_t size, int min_nr)
{
	pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
	if (!pool->name)
		goto exit;
	strcpy(pool->name, name);

	pool->slab =
	    kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL);
	if (!pool->slab)
		goto free_name;

	pool->mempool = mempool_create_slab_pool(min_nr, pool->slab);
	if (!pool->mempool)
		goto free_slab;

	return 0;

free_slab:
	kmem_cache_destroy(pool->slab);

free_name:
	kfree(pool->name);

exit:
	return -ENOMEM;
}
EXPORT_SYMBOL_GPL(i2o_pool_alloc);

/*
 *	i2o_pool_free - Free slab cache and mempool again
 *	@mempool: pointer to struct i2o_pool which should be freed
 *
 *	Note that you have to return all objects to the mempool again before
 *	calling i2o_pool_free().
 */
void i2o_pool_free(struct i2o_pool *pool)
{
	mempool_destroy(pool->mempool);
	kmem_cache_destroy(pool->slab);
	kfree(pool->name);
};
EXPORT_SYMBOL_GPL(i2o_pool_free);