summaryrefslogtreecommitdiff
path: root/drivers/uwb/i1480/i1480u-wlp/rx.c
blob: d4e51e108aa4aa52d5dc015430575f9920bba535 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
/*
 * WUSB Wire Adapter: WLP interface
 * Driver for the Linux Network stack.
 *
 * Copyright (C) 2005-2006 Intel Corporation
 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version
 * 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA.
 *
 *
 * i1480u's RX handling is simple. i1480u will send the received
 * network packets broken up in fragments; 1 to N fragments make a
 * packet, we assemble them together and deliver the packet with netif_rx().
 *
 * Beacuse each USB transfer is a *single* fragment (except when the
 * transfer contains a first fragment), each URB called thus
 * back contains one or two fragments. So we queue N URBs, each with its own
 * fragment buffer. When a URB is done, we process it (adding to the
 * current skb from the fragment buffer until complete). Once
 * processed, we requeue the URB. There is always a bunch of URBs
 * ready to take data, so the intergap should be minimal.
 *
 * An URB's transfer buffer is the data field of a socket buffer. This
 * reduces copying as data can be passed directly to network layer. If a
 * complete packet or 1st fragment is received the URB's transfer buffer is
 * taken away from it and used to send data to the network layer. In this
 * case a new transfer buffer is allocated to the URB before being requeued.
 * If a "NEXT" or "LAST" fragment is received, the fragment contents is
 * appended to the RX packet under construction and the transfer buffer
 * is reused. To be able to use this buffer to assemble complete packets
 * we set each buffer's size to that of the MAX ethernet packet that can
 * be received. There is thus room for improvement in memory usage.
 *
 * When the max tx fragment size increases, we should be able to read
 * data into the skbs directly with very simple code.
 *
 * ROADMAP:
 *
 *   ENTRY POINTS:
 *
 *     i1480u_rx_setup(): setup RX context [from i1480u_open()]
 *
 *     i1480u_rx_release(): release RX context [from i1480u_stop()]
 *
 *     i1480u_rx_cb(): called when the RX USB URB receives a
 *                     packet. It removes the header and pushes it up
 *                     the Linux netdev stack with netif_rx().
 *
 *       i1480u_rx_buffer()
 *         i1480u_drop() and i1480u_fix()
 *         i1480u_skb_deliver
 *
 */

#include <linux/gfp.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "i1480u-wlp.h"

/*
 * Setup the RX context
 *
 * Each URB is provided with a transfer_buffer that is the data field
 * of a new socket buffer.
 */
int i1480u_rx_setup(struct i1480u *i1480u)
{
	int result, cnt;
	struct device *dev = &i1480u->usb_iface->dev;
	struct net_device *net_dev = i1480u->net_dev;
	struct usb_endpoint_descriptor *epd;
	struct sk_buff *skb;

	/* Alloc RX stuff */
	i1480u->rx_skb = NULL;	/* not in process of receiving packet */
	result = -ENOMEM;
	epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
	for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
		struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
		rx_buf->i1480u = i1480u;
		skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
		if (!skb) {
			dev_err(dev,
				"RX: cannot allocate RX buffer %d\n", cnt);
			result = -ENOMEM;
			goto error;
		}
		skb->dev = net_dev;
		skb->ip_summed = CHECKSUM_NONE;
		skb_reserve(skb, 2);
		rx_buf->data = skb;
		rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
		if (unlikely(rx_buf->urb == NULL)) {
			dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
			result = -ENOMEM;
			goto error;
		}
		usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
			  usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
			  rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
			  i1480u_rx_cb, rx_buf);
		result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
		if (unlikely(result < 0)) {
			dev_err(dev, "RX: cannot submit URB %d: %d\n",
				cnt, result);
			goto error;
		}
	}
	return 0;

error:
	i1480u_rx_release(i1480u);
	return result;
}


/* Release resources associated to the rx context */
void i1480u_rx_release(struct i1480u *i1480u)
{
	int cnt;
	for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
		if (i1480u->rx_buf[cnt].data)
			dev_kfree_skb(i1480u->rx_buf[cnt].data);
		if (i1480u->rx_buf[cnt].urb) {
			usb_kill_urb(i1480u->rx_buf[cnt].urb);
			usb_free_urb(i1480u->rx_buf[cnt].urb);
		}
	}
	if (i1480u->rx_skb != NULL)
		dev_kfree_skb(i1480u->rx_skb);
}

static
void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
{
	int cnt;
	for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
		if (i1480u->rx_buf[cnt].urb)
			usb_unlink_urb(i1480u->rx_buf[cnt].urb);
	}
}

/* Fix an out-of-sequence packet */
#define i1480u_fix(i1480u, msg...)			\
do {							\
	if (printk_ratelimit())				\
		dev_err(&i1480u->usb_iface->dev, msg);	\
	dev_kfree_skb_irq(i1480u->rx_skb);		\
	i1480u->rx_skb = NULL;				\
	i1480u->rx_untd_pkt_size = 0;			\
} while (0)


/* Drop an out-of-sequence packet */
#define i1480u_drop(i1480u, msg...)			\
do {							\
	if (printk_ratelimit())				\
		dev_err(&i1480u->usb_iface->dev, msg);	\
	i1480u->net_dev->stats.rx_dropped++;			\
} while (0)




/* Finalizes setting up the SKB and delivers it
 *
 * We first pass the incoming frame to WLP substack for verification. It
 * may also be a WLP association frame in which case WLP will take over the
 * processing. If WLP does not take it over it will still verify it, if the
 * frame is invalid the skb will be freed by WLP and we will not continue
 * parsing.
 * */
static
void i1480u_skb_deliver(struct i1480u *i1480u)
{
	int should_parse;
	struct net_device *net_dev = i1480u->net_dev;
	struct device *dev = &i1480u->usb_iface->dev;

	should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
					 &i1480u->rx_srcaddr);
	if (!should_parse)
		goto out;
	i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
	net_dev->stats.rx_packets++;
	net_dev->stats.rx_bytes += i1480u->rx_untd_pkt_size;

	netif_rx(i1480u->rx_skb);		/* deliver */
out:
	i1480u->rx_skb = NULL;
	i1480u->rx_untd_pkt_size = 0;
}


/*
 * Process a buffer of data received from the USB RX endpoint
 *
 * First fragment arrives with next or last fragment. All other fragments
 * arrive alone.
 *
 * /me hates long functions.
 */
static
void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
{
	unsigned pkt_completed = 0;	/* !0 when we got all pkt fragments */
	size_t untd_hdr_size, untd_frg_size;
	size_t i1480u_hdr_size;
	struct wlp_rx_hdr *i1480u_hdr = NULL;

	struct i1480u *i1480u = rx_buf->i1480u;
	struct sk_buff *skb = rx_buf->data;
	int size_left = rx_buf->urb->actual_length;
	void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
	struct untd_hdr *untd_hdr;

	struct net_device *net_dev = i1480u->net_dev;
	struct device *dev = &i1480u->usb_iface->dev;
	struct sk_buff *new_skb;

#if 0
	dev_fnstart(dev,
		    "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
	dev_err(dev, "RX packet, %zu bytes\n", size_left);
	dump_bytes(dev, ptr, size_left);
#endif
	i1480u_hdr_size = sizeof(struct wlp_rx_hdr);

	while (size_left > 0) {
		if (pkt_completed) {
			i1480u_drop(i1480u, "RX: fragment follows completed"
					 "packet in same buffer. Dropping\n");
			break;
		}
		untd_hdr = ptr;
		if (size_left < sizeof(*untd_hdr)) {	/*  Check the UNTD header */
			i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
			goto out;
		}
		if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) {	/* Paranoia: TX set? */
			i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
			goto out;
		}
		switch (untd_hdr_type(untd_hdr)) {	/* Check the UNTD header type */
		case i1480u_PKT_FRAG_1ST: {
			struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
			dev_dbg(dev, "1st fragment\n");
			untd_hdr_size = sizeof(struct untd_hdr_1st);
			if (i1480u->rx_skb != NULL)
				i1480u_fix(i1480u, "RX: 1st fragment out of "
					"sequence! Fixing\n");
			if (size_left < untd_hdr_size + i1480u_hdr_size) {
				i1480u_drop(i1480u, "RX: short 1st fragment! "
					"Dropping\n");
				goto out;
			}
			i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
						 - i1480u_hdr_size;
			untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
			if (size_left < untd_hdr_size + untd_frg_size) {
				i1480u_drop(i1480u,
					    "RX: short payload! Dropping\n");
				goto out;
			}
			i1480u->rx_skb = skb;
			i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
			i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
			skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
			skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
			stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
			stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
			rx_buf->data = NULL; /* need to create new buffer */
			break;
		}
		case i1480u_PKT_FRAG_NXT: {
			dev_dbg(dev, "nxt fragment\n");
			untd_hdr_size = sizeof(struct untd_hdr_rst);
			if (i1480u->rx_skb == NULL) {
				i1480u_drop(i1480u, "RX: next fragment out of "
					    "sequence! Dropping\n");
				goto out;
			}
			if (size_left < untd_hdr_size) {
				i1480u_drop(i1480u, "RX: short NXT fragment! "
					    "Dropping\n");
				goto out;
			}
			untd_frg_size = le16_to_cpu(untd_hdr->len);
			if (size_left < untd_hdr_size + untd_frg_size) {
				i1480u_drop(i1480u,
					    "RX: short payload! Dropping\n");
				goto out;
			}
			memmove(skb_put(i1480u->rx_skb, untd_frg_size),
					ptr + untd_hdr_size, untd_frg_size);
			break;
		}
		case i1480u_PKT_FRAG_LST: {
			dev_dbg(dev, "Lst fragment\n");
			untd_hdr_size = sizeof(struct untd_hdr_rst);
			if (i1480u->rx_skb == NULL) {
				i1480u_drop(i1480u, "RX: last fragment out of "
					    "sequence! Dropping\n");
				goto out;
			}
			if (size_left < untd_hdr_size) {
				i1480u_drop(i1480u, "RX: short LST fragment! "
					    "Dropping\n");
				goto out;
			}
			untd_frg_size = le16_to_cpu(untd_hdr->len);
			if (size_left < untd_frg_size + untd_hdr_size) {
				i1480u_drop(i1480u,
					    "RX: short payload! Dropping\n");
				goto out;
			}
			memmove(skb_put(i1480u->rx_skb, untd_frg_size),
					ptr + untd_hdr_size, untd_frg_size);
			pkt_completed = 1;
			break;
		}
		case i1480u_PKT_FRAG_CMP: {
			dev_dbg(dev, "cmp fragment\n");
			untd_hdr_size = sizeof(struct untd_hdr_cmp);
			if (i1480u->rx_skb != NULL)
				i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
					   " fragment!\n");
			if (size_left < untd_hdr_size + i1480u_hdr_size) {
				i1480u_drop(i1480u, "RX: short CMP fragment! "
					    "Dropping\n");
				goto out;
			}
			i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
			untd_frg_size = i1480u->rx_untd_pkt_size;
			if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
				i1480u_drop(i1480u,
					    "RX: short payload! Dropping\n");
				goto out;
			}
			i1480u->rx_skb = skb;
			i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
			i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
			stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
			stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
			skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
			skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
			rx_buf->data = NULL;	/* for hand off skb to network stack */
			pkt_completed = 1;
			i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
			break;
		}
		default:
			i1480u_drop(i1480u, "RX: unknown packet type %u! "
				    "Dropping\n", untd_hdr_type(untd_hdr));
			goto out;
		}
		size_left -= untd_hdr_size + untd_frg_size;
		if (size_left > 0)
			ptr += untd_hdr_size + untd_frg_size;
	}
	if (pkt_completed)
		i1480u_skb_deliver(i1480u);
out:
	/* recreate needed RX buffers*/
	if (rx_buf->data == NULL) {
		/* buffer is being used to receive packet, create new */
		new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
		if (!new_skb) {
			if (printk_ratelimit())
				dev_err(dev,
				"RX: cannot allocate RX buffer\n");
		} else {
			new_skb->dev = net_dev;
			new_skb->ip_summed = CHECKSUM_NONE;
			skb_reserve(new_skb, 2);
			rx_buf->data = new_skb;
		}
	}
	return;
}


/*
 * Called when an RX URB has finished receiving or has found some kind
 * of error condition.
 *
 * LIMITATIONS:
 *
 *  - We read USB-transfers, each transfer contains a SINGLE fragment
 *    (can contain a complete packet, or a 1st, next, or last fragment
 *    of a packet).
 *    Looks like a transfer can contain more than one fragment (07/18/06)
 *
 *  - Each transfer buffer is the size of the maximum packet size (minus
 *    headroom), i1480u_MAX_PKT_SIZE - 2
 *
 *  - We always read the full USB-transfer, no partials.
 *
 *  - Each transfer is read directly into a skb. This skb will be used to
 *    send data to the upper layers if it is the first fragment or a complete
 *    packet. In the other cases the data will be copied from the skb to
 *    another skb that is being prepared for the upper layers from a prev
 *    first fragment.
 *
 * It is simply too much of a pain. Gosh, there should be a unified
 * SG infrastructure for *everything* [so that I could declare a SG
 * buffer, pass it to USB for receiving, append some space to it if
 * I wish, receive more until I have the whole chunk, adapt
 * pointers on each fragment to remove hardware headers and then
 * attach that to an skbuff and netif_rx()].
 */
void i1480u_rx_cb(struct urb *urb)
{
	int result;
	int do_parse_buffer = 1;
	struct i1480u_rx_buf *rx_buf = urb->context;
	struct i1480u *i1480u = rx_buf->i1480u;
	struct device *dev = &i1480u->usb_iface->dev;
	unsigned long flags;
	u8 rx_buf_idx = rx_buf - i1480u->rx_buf;

	switch (urb->status) {
	case 0:
		break;
	case -ECONNRESET:	/* Not an error, but a controlled situation; */
	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
	case -ESHUTDOWN:	/* going away! */
		dev_err(dev, "RX URB[%u]: goind down %d\n",
			rx_buf_idx, urb->status);
		goto error;
	default:
		dev_err(dev, "RX URB[%u]: unknown status %d\n",
			rx_buf_idx, urb->status);
		if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
					EDC_ERROR_TIMEFRAME)) {
			dev_err(dev, "RX: max acceptable errors exceeded,"
					" resetting device.\n");
			i1480u_rx_unlink_urbs(i1480u);
			wlp_reset_all(&i1480u->wlp);
			goto error;
		}
		do_parse_buffer = 0;
		break;
	}
	spin_lock_irqsave(&i1480u->lock, flags);
	/* chew the data fragments, extract network packets */
	if (do_parse_buffer) {
		i1480u_rx_buffer(rx_buf);
		if (rx_buf->data) {
			rx_buf->urb->transfer_buffer = rx_buf->data->data;
			result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
			if (result < 0) {
				dev_err(dev, "RX URB[%u]: cannot submit %d\n",
					rx_buf_idx, result);
			}
		}
	}
	spin_unlock_irqrestore(&i1480u->lock, flags);
error:
	return;
}