summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/brocade/bna/bna.h
blob: f1eafc409bbd131894146d775b5b586d5318de86 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
/*
 * Linux network driver for Brocade Converged Network Adapter.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License (GPL) Version 2 as
 * published by the Free Software Foundation
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */
/*
 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
 * All rights reserved
 * www.brocade.com
 */
#ifndef __BNA_H__
#define __BNA_H__

#include "bfa_defs.h"
#include "bfa_ioc.h"
#include "bfi_enet.h"
#include "bna_types.h"

extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];

/*  Macros and constants  */

#define BNA_IOC_TIMER_FREQ		200

/* Log string size */
#define BNA_MESSAGE_SIZE		256

#define bna_is_small_rxq(_id) ((_id) & 0x1)

#define BNA_MAC_IS_EQUAL(_mac1, _mac2)					\
	(!memcmp((_mac1), (_mac2), sizeof(mac_t)))

#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)

#define BNA_TO_POWER_OF_2(x)						\
do {									\
	int _shift = 0;							\
	while ((x) && (x) != 1) {					\
		(x) >>= 1;						\
		_shift++;						\
	}								\
	(x) <<= _shift;							\
} while (0)

#define BNA_TO_POWER_OF_2_HIGH(x)					\
do {									\
	int n = 1;							\
	while (n < (x))							\
		n <<= 1;						\
	(x) = n;							\
} while (0)

/*
 * input : _addr-> os dma addr in host endian format,
 * output : _bna_dma_addr-> pointer to hw dma addr
 */
#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr)				\
do {									\
	u64 tmp_addr =						\
	cpu_to_be64((u64)(_addr));				\
	(_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
	(_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
} while (0)

/*
 * input : _bna_dma_addr-> pointer to hw dma addr
 * output : _addr-> os dma addr in host endian format
 */
#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr)			\
do {								\
	(_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32)		\
	| ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff));	\
} while (0)

#define	containing_rec(addr, type, field)				\
	((type *)((unsigned char *)(addr) -				\
	(unsigned char *)(&((type *)0)->field)))

#define BNA_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)

/* TxQ element is 64 bytes */
#define BNA_TXQ_PAGE_INDEX_MAX		(PAGE_SIZE >> 6)
#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT	(PAGE_SHIFT - 6)

#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
{									\
	unsigned int page_index;	/* index within a page */	\
	void *page_addr;						\
	page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1);		\
	(_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index);	\
	page_addr = (_qpt_ptr)[((_qe_idx) >>  BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
	(_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
}

/* RxQ element is 8 bytes */
#define BNA_RXQ_PAGE_INDEX_MAX		(PAGE_SIZE >> 3)
#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT	(PAGE_SHIFT - 3)

#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
{									\
	unsigned int page_index;	/* index within a page */	\
	void *page_addr;						\
	page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1);		\
	(_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index);	\
	page_addr = (_qpt_ptr)[((_qe_idx) >>				\
				BNA_RXQ_PAGE_INDEX_MAX_SHIFT)];		\
	(_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
}

/* CQ element is 16 bytes */
#define BNA_CQ_PAGE_INDEX_MAX		(PAGE_SIZE >> 4)
#define BNA_CQ_PAGE_INDEX_MAX_SHIFT	(PAGE_SHIFT - 4)

#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range)	\
{									\
	unsigned int page_index;	  /* index within a page */	\
	void *page_addr;						\
									\
	page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1);		\
	(_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index);		\
	page_addr = (_qpt_ptr)[((_qe_idx) >>				\
				    BNA_CQ_PAGE_INDEX_MAX_SHIFT)];	\
	(_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
}

#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base)			\
	(&((_cast *)(_q_base))[(_qe_idx)])

#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))

#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth)			\
	((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))

#define BNA_QE_INDX_INC(_idx, _q_depth) BNA_QE_INDX_ADD(_idx, 1, _q_depth)

#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)		\
	(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))

#define BNA_QE_FREE_CNT(_q_ptr, _q_depth)				\
	(((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) &	\
	 ((_q_depth) - 1))

#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth)				\
	((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) &	\
	 (_q_depth - 1))

#define BNA_Q_GET_CI(_q_ptr)		((_q_ptr)->q.consumer_index)

#define BNA_Q_GET_PI(_q_ptr)		((_q_ptr)->q.producer_index)

#define BNA_Q_PI_ADD(_q_ptr, _num)					\
	(_q_ptr)->q.producer_index =					\
		(((_q_ptr)->q.producer_index + (_num)) &		\
		((_q_ptr)->q.q_depth - 1))

#define BNA_Q_CI_ADD(_q_ptr, _num)					\
	(_q_ptr)->q.consumer_index =					\
		(((_q_ptr)->q.consumer_index + (_num))			\
		& ((_q_ptr)->q.q_depth - 1))

#define BNA_Q_FREE_COUNT(_q_ptr)					\
	(BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))

#define BNA_Q_IN_USE_COUNT(_q_ptr)					\
	(BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))

#define BNA_LARGE_PKT_SIZE		1000

#define BNA_UPDATE_PKT_CNT(_pkt, _len)					\
do {									\
	if ((_len) > BNA_LARGE_PKT_SIZE) {				\
		(_pkt)->large_pkt_cnt++;				\
	} else {							\
		(_pkt)->small_pkt_cnt++;				\
	}								\
} while (0)

#define	call_rxf_stop_cbfn(rxf)						\
do {									\
	if ((rxf)->stop_cbfn) {						\
		void (*cbfn)(struct bna_rx *);			\
		struct bna_rx *cbarg;					\
		cbfn = (rxf)->stop_cbfn;				\
		cbarg = (rxf)->stop_cbarg;				\
		(rxf)->stop_cbfn = NULL;				\
		(rxf)->stop_cbarg = NULL;				\
		cbfn(cbarg);						\
	}								\
} while (0)

#define	call_rxf_start_cbfn(rxf)					\
do {									\
	if ((rxf)->start_cbfn) {					\
		void (*cbfn)(struct bna_rx *);			\
		struct bna_rx *cbarg;					\
		cbfn = (rxf)->start_cbfn;				\
		cbarg = (rxf)->start_cbarg;				\
		(rxf)->start_cbfn = NULL;				\
		(rxf)->start_cbarg = NULL;				\
		cbfn(cbarg);						\
	}								\
} while (0)

#define	call_rxf_cam_fltr_cbfn(rxf)					\
do {									\
	if ((rxf)->cam_fltr_cbfn) {					\
		void (*cbfn)(struct bnad *, struct bna_rx *);	\
		struct bnad *cbarg;					\
		cbfn = (rxf)->cam_fltr_cbfn;				\
		cbarg = (rxf)->cam_fltr_cbarg;				\
		(rxf)->cam_fltr_cbfn = NULL;				\
		(rxf)->cam_fltr_cbarg = NULL;				\
		cbfn(cbarg, rxf->rx);					\
	}								\
} while (0)

#define	call_rxf_pause_cbfn(rxf)					\
do {									\
	if ((rxf)->oper_state_cbfn) {					\
		void (*cbfn)(struct bnad *, struct bna_rx *);	\
		struct bnad *cbarg;					\
		cbfn = (rxf)->oper_state_cbfn;				\
		cbarg = (rxf)->oper_state_cbarg;			\
		(rxf)->oper_state_cbfn = NULL;				\
		(rxf)->oper_state_cbarg = NULL;				\
		cbfn(cbarg, rxf->rx);					\
	}								\
} while (0)

#define	call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)

#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))

#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))

#define xxx_enable(mode, bitmask, xxx)					\
do {									\
	bitmask |= xxx;							\
	mode |= xxx;							\
} while (0)

#define xxx_disable(mode, bitmask, xxx)					\
do {									\
	bitmask |= xxx;							\
	mode &= ~xxx;							\
} while (0)

#define xxx_inactive(mode, bitmask, xxx)				\
do {									\
	bitmask &= ~xxx;						\
	mode &= ~xxx;							\
} while (0)

#define is_promisc_enable(mode, bitmask)				\
	is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)

#define is_promisc_disable(mode, bitmask)				\
	is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)

#define promisc_enable(mode, bitmask)					\
	xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)

#define promisc_disable(mode, bitmask)					\
	xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)

#define promisc_inactive(mode, bitmask)					\
	xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)

#define is_default_enable(mode, bitmask)				\
	is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)

#define is_default_disable(mode, bitmask)				\
	is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)

#define default_enable(mode, bitmask)					\
	xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)

#define default_disable(mode, bitmask)					\
	xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)

#define default_inactive(mode, bitmask)					\
	xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)

#define is_allmulti_enable(mode, bitmask)				\
	is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)

#define is_allmulti_disable(mode, bitmask)				\
	is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)

#define allmulti_enable(mode, bitmask)					\
	xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)

#define allmulti_disable(mode, bitmask)					\
	xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)

#define allmulti_inactive(mode, bitmask)				\
	xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)

#define	GET_RXQS(rxp, q0, q1)	do {					\
	switch ((rxp)->type) {						\
	case BNA_RXP_SINGLE:						\
		(q0) = rxp->rxq.single.only;				\
		(q1) = NULL;						\
		break;							\
	case BNA_RXP_SLR:						\
		(q0) = rxp->rxq.slr.large;				\
		(q1) = rxp->rxq.slr.small;				\
		break;							\
	case BNA_RXP_HDS:						\
		(q0) = rxp->rxq.hds.data;				\
		(q1) = rxp->rxq.hds.hdr;				\
		break;							\
	}								\
} while (0)

#define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask)

#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)

#define bna_tx_from_rid(_bna, _rid, _tx)				\
do {								    \
	struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod;	  \
	struct bna_tx *__tx;					    \
	struct list_head *qe;					   \
	_tx = NULL;						     \
	list_for_each(qe, &__tx_mod->tx_active_q) {		     \
		__tx = (struct bna_tx *)qe;			     \
		if (__tx->rid == (_rid)) {			      \
			(_tx) = __tx;				   \
			break;					  \
		}						       \
	}							       \
} while (0)

#define bna_rx_from_rid(_bna, _rid, _rx)				\
do {									\
	struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod;			\
	struct bna_rx *__rx;						\
	struct list_head *qe;						\
	_rx = NULL;							\
	list_for_each(qe, &__rx_mod->rx_active_q) {			\
		__rx = (struct bna_rx *)qe;				\
		if (__rx->rid == (_rid)) {				\
			(_rx) = __rx;					\
			break;						\
		}							\
	}								\
} while (0)

/*  Inline functions  */

static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
{
	struct bna_mac *mac = NULL;
	struct list_head *qe;
	list_for_each(qe, q) {
		if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
			mac = (struct bna_mac *)qe;
			break;
		}
	}
	return mac;
}

#define bna_attr(_bna) (&(_bna)->ioceth.attr)

/* Function prototypes */

/* BNA */

/* FW response handlers */
void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);

/* APIs for BNAD */
void bna_res_req(struct bna_res_info *res_info);
void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info);
void bna_init(struct bna *bna, struct bnad *bnad,
			struct bfa_pcidev *pcidev,
			struct bna_res_info *res_info);
void bna_mod_init(struct bna *bna, struct bna_res_info *res_info);
void bna_uninit(struct bna *bna);
int bna_num_txq_set(struct bna *bna, int num_txq);
int bna_num_rxp_set(struct bna *bna, int num_rxp);
void bna_hw_stats_get(struct bna *bna);

/* APIs for RxF */
struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
			  struct bna_mac *mac);
struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
			  struct bna_mac *mac);
struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
			  struct bna_mcam_handle *handle);

/* MBOX */

/* API for BNAD */
void bna_mbox_handler(struct bna *bna, u32 intr_status);

/* ETHPORT */

/* Callbacks for RX */
void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);

/* TX MODULE AND TX */

/* FW response handelrs */
void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
			       struct bfi_msgq_mhdr *msghdr);
void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx,
			      struct bfi_msgq_mhdr *msghdr);
void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod);

/* APIs for BNA */
void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
		     struct bna_res_info *res_info);
void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);

/* APIs for ENET */
void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);

/* APIs for BNAD */
void bna_tx_res_req(int num_txq, int txq_depth,
		    struct bna_res_info *res_info);
struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
			       struct bna_tx_config *tx_cfg,
			       const struct bna_tx_event_cbfn *tx_cbfn,
			       struct bna_res_info *res_info, void *priv);
void bna_tx_destroy(struct bna_tx *tx);
void bna_tx_enable(struct bna_tx *tx);
void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
		    void (*cbfn)(void *, struct bna_tx *));
void bna_tx_cleanup_complete(struct bna_tx *tx);
void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);

/* RX MODULE, RX, RXF */

/* FW response handlers */
void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
			       struct bfi_msgq_mhdr *msghdr);
void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
			      struct bfi_msgq_mhdr *msghdr);
void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
			       struct bfi_msgq_mhdr *msghdr);
void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
			       struct bfi_msgq_mhdr *msghdr);

/* APIs for BNA */
void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
		     struct bna_res_info *res_info);
void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);

/* APIs for ENET */
void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);

/* APIs for BNAD */
void bna_rx_res_req(struct bna_rx_config *rx_config,
		    struct bna_res_info *res_info);
struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
			       struct bna_rx_config *rx_cfg,
			       const struct bna_rx_event_cbfn *rx_cbfn,
			       struct bna_res_info *res_info, void *priv);
void bna_rx_destroy(struct bna_rx *rx);
void bna_rx_enable(struct bna_rx *rx);
void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
		    void (*cbfn)(void *, struct bna_rx *));
void bna_rx_cleanup_complete(struct bna_rx *rx);
void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
void bna_rx_dim_update(struct bna_ccb *ccb);
enum bna_cb_status
bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
		 void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
		 void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
		 void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
		 void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
		     void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
		enum bna_rxmode bitmask,
		void (*cbfn)(struct bnad *, struct bna_rx *));
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
/* ENET */

/* API for RX */
int bna_enet_mtu_get(struct bna_enet *enet);

/* Callbacks for TX, RX */
void bna_enet_cb_tx_stopped(struct bna_enet *enet);
void bna_enet_cb_rx_stopped(struct bna_enet *enet);

/* API for BNAD */
void bna_enet_enable(struct bna_enet *enet);
void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
		      void (*cbfn)(void *));
void bna_enet_pause_config(struct bna_enet *enet,
			   struct bna_pause_config *pause_config,
			   void (*cbfn)(struct bnad *));
void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
		      void (*cbfn)(struct bnad *));
void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);

/* IOCETH */

/* APIs for BNAD */
void bna_ioceth_enable(struct bna_ioceth *ioceth);
void bna_ioceth_disable(struct bna_ioceth *ioceth,
			enum bna_cleanup_type type);

/* BNAD */

/* Callbacks for ENET */
void bnad_cb_ethport_link_status(struct bnad *bnad,
			      enum bna_link_status status);

/* Callbacks for IOCETH */
void bnad_cb_ioceth_ready(struct bnad *bnad);
void bnad_cb_ioceth_failed(struct bnad *bnad);
void bnad_cb_ioceth_disabled(struct bnad *bnad);
void bnad_cb_mbox_intr_enable(struct bnad *bnad);
void bnad_cb_mbox_intr_disable(struct bnad *bnad);

/* Callbacks for BNA */
void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
		       struct bna_stats *stats);

#endif  /* __BNA_H__ */