summaryrefslogblamecommitdiff
path: root/drivers/atm/he.c
blob: db33f6f4dd2a5517302e22fc5575c43c1a28413e (plain) (tree)
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
























































                                                                           
                         











                            
                              







































                                                                                                
                                                         



































































































































































































































































                                                                                                    
                                                             











                                                                       
                                               




                                                                    


















                                            
                      










































                                                                     
                     





























                                                                                
                     





























                                                                                
                     




























                                                                                
                    




















                                                                                  
                     






























































































                                                                
                    























































































































                                                                                  
                    



































                                                                                           
                                                                                             






























































                                                                                               
                                                                                             



































































                                                                                  
                    


















































                                                                                        
                                                                                                              








                                                                         
                    



































































































































































































































































































































































































































































































































































































































































































































                                                                                                 
                                                                                





























































































































































                                                                                                
                                     

























                                                                                                        
                                                                           














                                                                                                                                
                                                  
                                    
                                                






























































































































































































































































































                                                                                                              
                                     
































































                                                                                         

                                 






                                                                  






                                                                                    




















































                                                                                    
                                                            



























































































































































































                                                                                                                                      
                              









































































































































































































































































































































































































































































































































































                                                                                                               
/* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */

/*

  he.c

  ForeRunnerHE ATM Adapter driver for ATM on Linux
  Copyright (C) 1999-2001  Naval Research Laboratory

  This library is free software; you can redistribute it and/or
  modify it under the terms of the GNU Lesser General Public
  License as published by the Free Software Foundation; either
  version 2.1 of the License, or (at your option) any later version.

  This library is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  Lesser General Public License for more details.

  You should have received a copy of the GNU Lesser General Public
  License along with this library; if not, write to the Free Software
  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA

*/

/*

  he.c

  ForeRunnerHE ATM Adapter driver for ATM on Linux
  Copyright (C) 1999-2001  Naval Research Laboratory

  Permission to use, copy, modify and distribute this software and its
  documentation is hereby granted, provided that both the copyright
  notice and this permission notice appear in all copies of the software,
  derivative works or modified versions, and any portions thereof, and
  that both notices appear in supporting documentation.

  NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
  DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
  RESULTING FROM THE USE OF THIS SOFTWARE.

  This driver was written using the "Programmer's Reference Manual for
  ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.

  AUTHORS:
	chas williams <chas@cmf.nrl.navy.mil>
	eric kinzie <ekinzie@cmf.nrl.navy.mil>

  NOTES:
	4096 supported 'connections'
	group 0 is used for all traffic
	interrupt queue 0 is used for all interrupts
	aal0 support (based on work from ulrich.u.muller@nokia.com)

 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>

#include <linux/atmdev.h>
#include <linux/atm.h>
#include <linux/sonet.h>

#define USE_TASKLET
#undef USE_SCATTERGATHER
#undef USE_CHECKSUM_HW			/* still confused about this */
#define USE_RBPS
#undef USE_RBPS_POOL			/* if memory is tight try this */
#undef USE_RBPL_POOL			/* if memory is tight try this */
#define USE_TPD_POOL
/* #undef CONFIG_ATM_HE_USE_SUNI */
/* #undef HE_DEBUG */

#include "he.h"
#include "suni.h"
#include <linux/atm_he.h>

#define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)

#ifdef HE_DEBUG
#define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
#else /* !HE_DEBUG */
#define HPRINTK(fmt,args...)	do { } while (0)
#endif /* HE_DEBUG */

/* version definition */

static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";

/* declarations */

static int he_open(struct atm_vcc *vcc);
static void he_close(struct atm_vcc *vcc);
static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
static irqreturn_t he_irq_handler(int irq, void *dev_id);
static void he_tasklet(unsigned long data);
static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
static int he_start(struct atm_dev *dev);
static void he_stop(struct he_dev *dev);
static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
static unsigned char he_phy_get(struct atm_dev *, unsigned long);

static u8 read_prom_byte(struct he_dev *he_dev, int addr);

/* globals */

static struct he_dev *he_devs;
static int disable64;
static short nvpibits = -1;
static short nvcibits = -1;
static short rx_skb_reserve = 16;
static int irq_coalesce = 1;
static int sdh = 0;

/* Read from EEPROM = 0000 0011b */
static unsigned int readtab[] = {
	CS_HIGH | CLK_HIGH,
	CS_LOW | CLK_LOW,
	CLK_HIGH,               /* 0 */
	CLK_LOW,
	CLK_HIGH,               /* 0 */
	CLK_LOW,
	CLK_HIGH,               /* 0 */
	CLK_LOW,
	CLK_HIGH,               /* 0 */
	CLK_LOW,
	CLK_HIGH,               /* 0 */
	CLK_LOW,
	CLK_HIGH,               /* 0 */
	CLK_LOW | SI_HIGH,
	CLK_HIGH | SI_HIGH,     /* 1 */
	CLK_LOW | SI_HIGH,
	CLK_HIGH | SI_HIGH      /* 1 */
};     
 
/* Clock to read from/write to the EEPROM */
static unsigned int clocktab[] = {
	CLK_LOW,
	CLK_HIGH,
	CLK_LOW,
	CLK_HIGH,
	CLK_LOW,
	CLK_HIGH,
	CLK_LOW,
	CLK_HIGH,
	CLK_LOW,
	CLK_HIGH,
	CLK_LOW,
	CLK_HIGH,
	CLK_LOW,
	CLK_HIGH,
	CLK_LOW,
	CLK_HIGH,
	CLK_LOW
};     

static struct atmdev_ops he_ops =
{
	.open =		he_open,
	.close =	he_close,	
	.ioctl =	he_ioctl,	
	.send =		he_send,
	.phy_put =	he_phy_put,
	.phy_get =	he_phy_get,
	.proc_read =	he_proc_read,
	.owner =	THIS_MODULE
};

#define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
#define he_readl(dev, reg)		readl((dev)->membase + (reg))

/* section 2.12 connection memory access */

static __inline__ void
he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
								unsigned flags)
{
	he_writel(he_dev, val, CON_DAT);
	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
}

#define he_writel_rcm(dev, val, reg) 				\
			he_writel_internal(dev, val, reg, CON_CTL_RCM)

#define he_writel_tcm(dev, val, reg) 				\
			he_writel_internal(dev, val, reg, CON_CTL_TCM)

#define he_writel_mbox(dev, val, reg) 				\
			he_writel_internal(dev, val, reg, CON_CTL_MBOX)

static unsigned
he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
{
	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
	return he_readl(he_dev, CON_DAT);
}

#define he_readl_rcm(dev, reg) \
			he_readl_internal(dev, reg, CON_CTL_RCM)

#define he_readl_tcm(dev, reg) \
			he_readl_internal(dev, reg, CON_CTL_TCM)

#define he_readl_mbox(dev, reg) \
			he_readl_internal(dev, reg, CON_CTL_MBOX)


/* figure 2.2 connection id */

#define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)

/* 2.5.1 per connection transmit state registers */

#define he_writel_tsr0(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
#define he_readl_tsr0(dev, cid) \
		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)

#define he_writel_tsr1(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)

#define he_writel_tsr2(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)

#define he_writel_tsr3(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)

#define he_writel_tsr4(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)

	/* from page 2-20
	 *
	 * NOTE While the transmit connection is active, bits 23 through 0
	 *      of this register must not be written by the host.  Byte
	 *      enables should be used during normal operation when writing
	 *      the most significant byte.
	 */

#define he_writel_tsr4_upper(dev, val, cid) \
		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
							CON_CTL_TCM \
							| CON_BYTE_DISABLE_2 \
							| CON_BYTE_DISABLE_1 \
							| CON_BYTE_DISABLE_0)

#define he_readl_tsr4(dev, cid) \
		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)

#define he_writel_tsr5(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)

#define he_writel_tsr6(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)

#define he_writel_tsr7(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)


#define he_writel_tsr8(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)

#define he_writel_tsr9(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)

#define he_writel_tsr10(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)

#define he_writel_tsr11(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)


#define he_writel_tsr12(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)

#define he_writel_tsr13(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)


#define he_writel_tsr14(dev, val, cid) \
		he_writel_tcm(dev, val, CONFIG_TSRD | cid)

#define he_writel_tsr14_upper(dev, val, cid) \
		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
							CON_CTL_TCM \
							| CON_BYTE_DISABLE_2 \
							| CON_BYTE_DISABLE_1 \
							| CON_BYTE_DISABLE_0)

/* 2.7.1 per connection receive state registers */

#define he_writel_rsr0(dev, val, cid) \
		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
#define he_readl_rsr0(dev, cid) \
		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)

#define he_writel_rsr1(dev, val, cid) \
		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)

#define he_writel_rsr2(dev, val, cid) \
		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)

#define he_writel_rsr3(dev, val, cid) \
		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)

#define he_writel_rsr4(dev, val, cid) \
		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)

#define he_writel_rsr5(dev, val, cid) \
		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)

#define he_writel_rsr6(dev, val, cid) \
		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)

#define he_writel_rsr7(dev, val, cid) \
		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)

static __inline__ struct atm_vcc*
__find_vcc(struct he_dev *he_dev, unsigned cid)
{
	struct hlist_head *head;
	struct atm_vcc *vcc;
	struct hlist_node *node;
	struct sock *s;
	short vpi;
	int vci;

	vpi = cid >> he_dev->vcibits;
	vci = cid & ((1 << he_dev->vcibits) - 1);
	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];

	sk_for_each(s, node, head) {
		vcc = atm_sk(s);
		if (vcc->dev == he_dev->atm_dev &&
		    vcc->vci == vci && vcc->vpi == vpi &&
		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
				return vcc;
		}
	}
	return NULL;
}

static int __devinit
he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
{
	struct atm_dev *atm_dev = NULL;
	struct he_dev *he_dev = NULL;
	int err = 0;

	printk(KERN_INFO "he: %s\n", version);

	if (pci_enable_device(pci_dev))
		return -EIO;
	if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
		printk(KERN_WARNING "he: no suitable dma available\n");
		err = -EIO;
		goto init_one_failure;
	}

	atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
	if (!atm_dev) {
		err = -ENODEV;
		goto init_one_failure;
	}
	pci_set_drvdata(pci_dev, atm_dev);

	he_dev = kzalloc(sizeof(struct he_dev),
							GFP_KERNEL);
	if (!he_dev) {
		err = -ENOMEM;
		goto init_one_failure;
	}
	he_dev->pci_dev = pci_dev;
	he_dev->atm_dev = atm_dev;
	he_dev->atm_dev->dev_data = he_dev;
	atm_dev->dev_data = he_dev;
	he_dev->number = atm_dev->number;
	if (he_start(atm_dev)) {
		he_stop(he_dev);
		err = -ENODEV;
		goto init_one_failure;
	}
	he_dev->next = NULL;
	if (he_devs)
		he_dev->next = he_devs;
	he_devs = he_dev;
	return 0;

init_one_failure:
	if (atm_dev)
		atm_dev_deregister(atm_dev);
	kfree(he_dev);
	pci_disable_device(pci_dev);
	return err;
}

static void __devexit
he_remove_one (struct pci_dev *pci_dev)
{
	struct atm_dev *atm_dev;
	struct he_dev *he_dev;

	atm_dev = pci_get_drvdata(pci_dev);
	he_dev = HE_DEV(atm_dev);

	/* need to remove from he_devs */

	he_stop(he_dev);
	atm_dev_deregister(atm_dev);
	kfree(he_dev);

	pci_set_drvdata(pci_dev, NULL);
	pci_disable_device(pci_dev);
}


static unsigned
rate_to_atmf(unsigned rate)		/* cps to atm forum format */
{
#define NONZERO (1 << 14)

	unsigned exp = 0;

	if (rate == 0)
		return 0;

	rate <<= 9;
	while (rate > 0x3ff) {
		++exp;
		rate >>= 1;
	}

	return (NONZERO | (exp << 9) | (rate & 0x1ff));
}

static void __devinit
he_init_rx_lbfp0(struct he_dev *he_dev)
{
	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
	
	lbufd_index = 0;
	lbm_offset = he_readl(he_dev, RCMLBM_BA);

	he_writel(he_dev, lbufd_index, RLBF0_H);

	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
		lbufd_index += 2;
		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;

		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);

		if (++lbuf_count == lbufs_per_row) {
			lbuf_count = 0;
			row_offset += he_dev->bytes_per_row;
		}
		lbm_offset += 4;
	}
		
	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
}

static void __devinit
he_init_rx_lbfp1(struct he_dev *he_dev)
{
	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
	
	lbufd_index = 1;
	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);

	he_writel(he_dev, lbufd_index, RLBF1_H);

	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
		lbufd_index += 2;
		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;

		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);

		if (++lbuf_count == lbufs_per_row) {
			lbuf_count = 0;
			row_offset += he_dev->bytes_per_row;
		}
		lbm_offset += 4;
	}
		
	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
}

static void __devinit
he_init_tx_lbfp(struct he_dev *he_dev)
{
	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
	
	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);

	he_writel(he_dev, lbufd_index, TLBF_H);

	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
		lbufd_index += 1;
		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;

		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);

		if (++lbuf_count == lbufs_per_row) {
			lbuf_count = 0;
			row_offset += he_dev->bytes_per_row;
		}
		lbm_offset += 2;
	}
		
	he_writel(he_dev, lbufd_index - 1, TLBF_T);
}

static int __devinit
he_init_tpdrq(struct he_dev *he_dev)
{
	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
	if (he_dev->tpdrq_base == NULL) {
		hprintk("failed to alloc tpdrq\n");
		return -ENOMEM;
	}
	memset(he_dev->tpdrq_base, 0,
				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));

	he_dev->tpdrq_tail = he_dev->tpdrq_base;
	he_dev->tpdrq_head = he_dev->tpdrq_base;

	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
	he_writel(he_dev, 0, TPDRQ_T);	
	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);

	return 0;
}

static void __devinit
he_init_cs_block(struct he_dev *he_dev)
{
	unsigned clock, rate, delta;
	int reg;

	/* 5.1.7 cs block initialization */

	for (reg = 0; reg < 0x20; ++reg)
		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);

	/* rate grid timer reload values */

	clock = he_is622(he_dev) ? 66667000 : 50000000;
	rate = he_dev->atm_dev->link_rate;
	delta = rate / 16 / 2;

	for (reg = 0; reg < 0x10; ++reg) {
		/* 2.4 internal transmit function
		 *
	 	 * we initialize the first row in the rate grid.
		 * values are period (in clock cycles) of timer
		 */
		unsigned period = clock / rate;

		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
		rate -= delta;
	}

	if (he_is622(he_dev)) {
		/* table 5.2 (4 cells per lbuf) */
		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);

		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);

		he_writel_mbox(he_dev, 0x4680, CS_RTATR);

		/* table 5.8 */
		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);

		/* table 5.9 */
		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
	} else {
		/* table 5.1 (4 cells per lbuf) */
		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);

		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);

		he_writel_mbox(he_dev, 0x4680, CS_RTATR);

		/* table 5.8 */
		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);

		/* table 5.9 */
		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
	}

	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);

	for (reg = 0; reg < 0x8; ++reg)
		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);

}

static int __devinit
he_init_cs_block_rcm(struct he_dev *he_dev)
{
	unsigned (*rategrid)[16][16];
	unsigned rate, delta;
	int i, j, reg;

	unsigned rate_atmf, exp, man;
	unsigned long long rate_cps;
	int mult, buf, buf_limit = 4;

	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
	if (!rategrid)
		return -ENOMEM;

	/* initialize rate grid group table */

	for (reg = 0x0; reg < 0xff; ++reg)
		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);

	/* initialize rate controller groups */

	for (reg = 0x100; reg < 0x1ff; ++reg)
		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
	
	/* initialize tNrm lookup table */

	/* the manual makes reference to a routine in a sample driver
	   for proper configuration; fortunately, we only need this
	   in order to support abr connection */
	
	/* initialize rate to group table */

	rate = he_dev->atm_dev->link_rate;
	delta = rate / 32;

	/*
	 * 2.4 transmit internal functions
	 * 
	 * we construct a copy of the rate grid used by the scheduler
	 * in order to construct the rate to group table below
	 */

	for (j = 0; j < 16; j++) {
		(*rategrid)[0][j] = rate;
		rate -= delta;
	}

	for (i = 1; i < 16; i++)
		for (j = 0; j < 16; j++)
			if (i > 14)
				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
			else
				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;

	/*
	 * 2.4 transmit internal function
	 *
	 * this table maps the upper 5 bits of exponent and mantissa
	 * of the atm forum representation of the rate into an index
	 * on rate grid  
	 */

	rate_atmf = 0;
	while (rate_atmf < 0x400) {
		man = (rate_atmf & 0x1f) << 4;
		exp = rate_atmf >> 5;

		/* 
			instead of '/ 512', use '>> 9' to prevent a call
			to divdu3 on x86 platforms
		*/
		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;

		if (rate_cps < 10)
			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */

		for (i = 255; i > 0; i--)
			if ((*rategrid)[i/16][i%16] >= rate_cps)
				break;	 /* pick nearest rate instead? */

		/*
		 * each table entry is 16 bits: (rate grid index (8 bits)
		 * and a buffer limit (8 bits)
		 * there are two table entries in each 32-bit register
		 */

#ifdef notdef
		buf = rate_cps * he_dev->tx_numbuffs /
				(he_dev->atm_dev->link_rate * 2);
#else
		/* this is pretty, but avoids _divdu3 and is mostly correct */
		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
		if (rate_cps > (272 * mult))
			buf = 4;
		else if (rate_cps > (204 * mult))
			buf = 3;
		else if (rate_cps > (136 * mult))
			buf = 2;
		else if (rate_cps > (68 * mult))
			buf = 1;
		else
			buf = 0;
#endif
		if (buf > buf_limit)
			buf = buf_limit;
		reg = (reg << 16) | ((i << 8) | buf);

#define RTGTBL_OFFSET 0x400
	  
		if (rate_atmf & 0x1)
			he_writel_rcm(he_dev, reg,
				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));

		++rate_atmf;
	}

	kfree(rategrid);
	return 0;
}

static int __devinit
he_init_group(struct he_dev *he_dev, int group)
{
	int i;

#ifdef USE_RBPS
	/* small buffer pool */
#ifdef USE_RBPS_POOL
	he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
			CONFIG_RBPS_BUFSIZE, 8, 0);
	if (he_dev->rbps_pool == NULL) {
		hprintk("unable to create rbps pages\n");
		return -ENOMEM;
	}
#else /* !USE_RBPS_POOL */
	he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
		CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
	if (he_dev->rbps_pages == NULL) {
		hprintk("unable to create rbps page pool\n");
		return -ENOMEM;
	}
#endif /* USE_RBPS_POOL */

	he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
		CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
	if (he_dev->rbps_base == NULL) {
		hprintk("failed to alloc rbps\n");
		return -ENOMEM;
	}
	memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
	he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);

	for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
		dma_addr_t dma_handle;
		void *cpuaddr;

#ifdef USE_RBPS_POOL 
		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
		if (cpuaddr == NULL)
			return -ENOMEM;
#else
		cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
		dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
#endif

		he_dev->rbps_virt[i].virt = cpuaddr;
		he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
		he_dev->rbps_base[i].phys = dma_handle;

	}
	he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];

	he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
	he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
						G0_RBPS_T + (group * 32));
	he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
						G0_RBPS_BS + (group * 32));
	he_writel(he_dev,
			RBP_THRESH(CONFIG_RBPS_THRESH) |
			RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
			RBP_INT_ENB,
						G0_RBPS_QI + (group * 32));
#else /* !USE_RBPS */
	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
						G0_RBPS_BS + (group * 32));
#endif /* USE_RBPS */

	/* large buffer pool */
#ifdef USE_RBPL_POOL
	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
			CONFIG_RBPL_BUFSIZE, 8, 0);
	if (he_dev->rbpl_pool == NULL) {
		hprintk("unable to create rbpl pool\n");
		return -ENOMEM;
	}
#else /* !USE_RBPL_POOL */
	he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
		CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
	if (he_dev->rbpl_pages == NULL) {
		hprintk("unable to create rbpl pages\n");
		return -ENOMEM;
	}
#endif /* USE_RBPL_POOL */

	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
	if (he_dev->rbpl_base == NULL) {
		hprintk("failed to alloc rbpl\n");
		return -ENOMEM;
	}
	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
	he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);

	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
		dma_addr_t dma_handle;
		void *cpuaddr;

#ifdef USE_RBPL_POOL
		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
		if (cpuaddr == NULL)
			return -ENOMEM;
#else
		cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
		dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
#endif

		he_dev->rbpl_virt[i].virt = cpuaddr;
		he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
		he_dev->rbpl_base[i].phys = dma_handle;
	}
	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];

	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
						G0_RBPL_T + (group * 32));
	he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
						G0_RBPL_BS + (group * 32));
	he_writel(he_dev,
			RBP_THRESH(CONFIG_RBPL_THRESH) |
			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
			RBP_INT_ENB,
						G0_RBPL_QI + (group * 32));

	/* rx buffer ready queue */

	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
	if (he_dev->rbrq_base == NULL) {
		hprintk("failed to allocate rbrq\n");
		return -ENOMEM;
	}
	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));

	he_dev->rbrq_head = he_dev->rbrq_base;
	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
	he_writel(he_dev,
		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
						G0_RBRQ_Q + (group * 16));
	if (irq_coalesce) {
		hprintk("coalescing interrupts\n");
		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
						G0_RBRQ_I + (group * 16));
	} else
		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
						G0_RBRQ_I + (group * 16));

	/* tx buffer ready queue */

	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
	if (he_dev->tbrq_base == NULL) {
		hprintk("failed to allocate tbrq\n");
		return -ENOMEM;
	}
	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));

	he_dev->tbrq_head = he_dev->tbrq_base;

	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));

	return 0;
}

static int __devinit
he_init_irq(struct he_dev *he_dev)
{
	int i;

	/* 2.9.3.5  tail offset for each interrupt queue is located after the
		    end of the interrupt queue */

	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
	if (he_dev->irq_base == NULL) {
		hprintk("failed to allocate irq\n");
		return -ENOMEM;
	}
	he_dev->irq_tailoffset = (unsigned *)
					&he_dev->irq_base[CONFIG_IRQ_SIZE];
	*he_dev->irq_tailoffset = 0;
	he_dev->irq_head = he_dev->irq_base;
	he_dev->irq_tail = he_dev->irq_base;

	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
		he_dev->irq_base[i].isw = ITYPE_INVALID;

	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
	he_writel(he_dev,
		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
								IRQ0_HEAD);
	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
	he_writel(he_dev, 0x0, IRQ0_DATA);

	he_writel(he_dev, 0x0, IRQ1_BASE);
	he_writel(he_dev, 0x0, IRQ1_HEAD);
	he_writel(he_dev, 0x0, IRQ1_CNTL);
	he_writel(he_dev, 0x0, IRQ1_DATA);

	he_writel(he_dev, 0x0, IRQ2_BASE);
	he_writel(he_dev, 0x0, IRQ2_HEAD);
	he_writel(he_dev, 0x0, IRQ2_CNTL);
	he_writel(he_dev, 0x0, IRQ2_DATA);

	he_writel(he_dev, 0x0, IRQ3_BASE);
	he_writel(he_dev, 0x0, IRQ3_HEAD);
	he_writel(he_dev, 0x0, IRQ3_CNTL);
	he_writel(he_dev, 0x0, IRQ3_DATA);

	/* 2.9.3.2 interrupt queue mapping registers */

	he_writel(he_dev, 0x0, GRP_10_MAP);
	he_writel(he_dev, 0x0, GRP_32_MAP);
	he_writel(he_dev, 0x0, GRP_54_MAP);
	he_writel(he_dev, 0x0, GRP_76_MAP);

	if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
		return -EINVAL;
	}   

	he_dev->irq = he_dev->pci_dev->irq;

	return 0;
}

static int __devinit
he_start(struct atm_dev *dev)
{
	struct he_dev *he_dev;
	struct pci_dev *pci_dev;
	unsigned long membase;

	u16 command;
	u32 gen_cntl_0, host_cntl, lb_swap;
	u8 cache_size, timer;
	
	unsigned err;
	unsigned int status, reg;
	int i, group;

	he_dev = HE_DEV(dev);
	pci_dev = he_dev->pci_dev;

	membase = pci_resource_start(pci_dev, 0);
	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);

	/*
	 * pci bus controller initialization 
	 */

	/* 4.3 pci bus controller-specific initialization */
	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
		hprintk("can't read GEN_CNTL_0\n");
		return -EINVAL;
	}
	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
		hprintk("can't write GEN_CNTL_0.\n");
		return -EINVAL;
	}

	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
		hprintk("can't read PCI_COMMAND.\n");
		return -EINVAL;
	}

	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
		hprintk("can't enable memory.\n");
		return -EINVAL;
	}

	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
		hprintk("can't read cache line size?\n");
		return -EINVAL;
	}

	if (cache_size < 16) {
		cache_size = 16;
		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
			hprintk("can't set cache line size to %d\n", cache_size);
	}

	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
		hprintk("can't read latency timer?\n");
		return -EINVAL;
	}

	/* from table 3.9
	 *
	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
	 * 
	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
	 *
	 */ 
#define LAT_TIMER 209
	if (timer < LAT_TIMER) {
		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
		timer = LAT_TIMER;
		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
			hprintk("can't set latency timer to %d\n", timer);
	}

	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
		hprintk("can't set up page mapping\n");
		return -EINVAL;
	}

	/* 4.4 card reset */
	he_writel(he_dev, 0x0, RESET_CNTL);
	he_writel(he_dev, 0xff, RESET_CNTL);

	udelay(16*1000);	/* 16 ms */
	status = he_readl(he_dev, RESET_CNTL);
	if ((status & BOARD_RST_STATUS) == 0) {
		hprintk("reset failed\n");
		return -EINVAL;
	}

	/* 4.5 set bus width */
	host_cntl = he_readl(he_dev, HOST_CNTL);
	if (host_cntl & PCI_BUS_SIZE64)
		gen_cntl_0 |= ENBL_64;
	else
		gen_cntl_0 &= ~ENBL_64;

	if (disable64 == 1) {
		hprintk("disabling 64-bit pci bus transfers\n");
		gen_cntl_0 &= ~ENBL_64;
	}

	if (gen_cntl_0 & ENBL_64)
		hprintk("64-bit transfers enabled\n");

	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);

	/* 4.7 read prom contents */
	for (i = 0; i < PROD_ID_LEN; ++i)
		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);

	he_dev->media = read_prom_byte(he_dev, MEDIA);

	for (i = 0; i < 6; ++i)
		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);

	hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
				he_dev->prod_id,
					he_dev->media & 0x40 ? "SM" : "MM",
						dev->esi[0],
						dev->esi[1],
						dev->esi[2],
						dev->esi[3],
						dev->esi[4],
						dev->esi[5]);
	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
						ATM_OC12_PCR : ATM_OC3_PCR;

	/* 4.6 set host endianess */
	lb_swap = he_readl(he_dev, LB_SWAP);
	if (he_is622(he_dev))
		lb_swap &= ~XFER_SIZE;		/* 4 cells */
	else
		lb_swap |= XFER_SIZE;		/* 8 cells */
#ifdef __BIG_ENDIAN
	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
#else
	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
#endif /* __BIG_ENDIAN */
	he_writel(he_dev, lb_swap, LB_SWAP);

	/* 4.8 sdram controller initialization */
	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);

	/* 4.9 initialize rnum value */
	lb_swap |= SWAP_RNUM_MAX(0xf);
	he_writel(he_dev, lb_swap, LB_SWAP);

	/* 4.10 initialize the interrupt queues */
	if ((err = he_init_irq(he_dev)) != 0)
		return err;

#ifdef USE_TASKLET
	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
#endif
	spin_lock_init(&he_dev->global_lock);

	/* 4.11 enable pci bus controller state machines */
	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
	he_writel(he_dev, host_cntl, HOST_CNTL);

	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);

	/*
	 * atm network controller initialization
	 */

	/* 5.1.1 generic configuration state */

	/*
	 *		local (cell) buffer memory map
	 *                    
	 *             HE155                          HE622
	 *                                                      
	 *        0 ____________1023 bytes  0 _______________________2047 bytes
	 *         |            |            |                   |   |
	 *         |  utility   |            |        rx0        |   |
	 *        5|____________|         255|___________________| u |
	 *        6|            |         256|                   | t |
	 *         |            |            |                   | i |
	 *         |    rx0     |     row    |        tx         | l |
	 *         |            |            |                   | i |
	 *         |            |         767|___________________| t |
	 *      517|____________|         768|                   | y |
	 * row  518|            |            |        rx1        |   |
	 *         |            |        1023|___________________|___|
	 *         |            |
	 *         |    tx      |
	 *         |            |
	 *         |            |
	 *     1535|____________|
	 *     1536|            |
	 *         |    rx1     |
	 *     2047|____________|
	 *
	 */

	/* total 4096 connections */
	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;

	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
		return -ENODEV;
	}

	if (nvpibits != -1) {
		he_dev->vpibits = nvpibits;
		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
	}

	if (nvcibits != -1) {
		he_dev->vcibits = nvcibits;
		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
	}


	if (he_is622(he_dev)) {
		he_dev->cells_per_row = 40;
		he_dev->bytes_per_row = 2048;
		he_dev->r0_numrows = 256;
		he_dev->tx_numrows = 512;
		he_dev->r1_numrows = 256;
		he_dev->r0_startrow = 0;
		he_dev->tx_startrow = 256;
		he_dev->r1_startrow = 768;
	} else {
		he_dev->cells_per_row = 20;
		he_dev->bytes_per_row = 1024;
		he_dev->r0_numrows = 512;
		he_dev->tx_numrows = 1018;
		he_dev->r1_numrows = 512;
		he_dev->r0_startrow = 6;
		he_dev->tx_startrow = 518;
		he_dev->r1_startrow = 1536;
	}

	he_dev->cells_per_lbuf = 4;
	he_dev->buffer_limit = 4;
	he_dev->r0_numbuffs = he_dev->r0_numrows *
				he_dev->cells_per_row / he_dev->cells_per_lbuf;
	if (he_dev->r0_numbuffs > 2560)
		he_dev->r0_numbuffs = 2560;

	he_dev->r1_numbuffs = he_dev->r1_numrows *
				he_dev->cells_per_row / he_dev->cells_per_lbuf;
	if (he_dev->r1_numbuffs > 2560)
		he_dev->r1_numbuffs = 2560;

	he_dev->tx_numbuffs = he_dev->tx_numrows *
				he_dev->cells_per_row / he_dev->cells_per_lbuf;
	if (he_dev->tx_numbuffs > 5120)
		he_dev->tx_numbuffs = 5120;

	/* 5.1.2 configure hardware dependent registers */

	he_writel(he_dev, 
		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
								LBARB);

	he_writel(he_dev, BANK_ON |
		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
								SDRAMCON);

	he_writel(he_dev,
		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
						RM_RW_WAIT(1), RCMCONFIG);
	he_writel(he_dev,
		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
						TM_RW_WAIT(1), TCMCONFIG);

	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);

	he_writel(he_dev, 
		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
		RX_VALVP(he_dev->vpibits) |
		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);

	he_writel(he_dev, DRF_THRESH(0x20) |
		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
		TX_VCI_MASK(he_dev->vcibits) |
		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);

	he_writel(he_dev, 0x0, TXAAL5_PROTO);

	he_writel(he_dev, PHY_INT_ENB |
		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
								RH_CONFIG);

	/* 5.1.3 initialize connection memory */

	for (i = 0; i < TCM_MEM_SIZE; ++i)
		he_writel_tcm(he_dev, 0, i);

	for (i = 0; i < RCM_MEM_SIZE; ++i)
		he_writel_rcm(he_dev, 0, i);

	/*
	 *	transmit connection memory map
	 *
	 *                  tx memory
	 *          0x0 ___________________
	 *             |                   |
	 *             |                   |
	 *             |       TSRa        |
	 *             |                   |
	 *             |                   |
	 *       0x8000|___________________|
	 *             |                   |
	 *             |       TSRb        |
	 *       0xc000|___________________|
	 *             |                   |
	 *             |       TSRc        |
	 *       0xe000|___________________|
	 *             |       TSRd        |
	 *       0xf000|___________________|
	 *             |       tmABR       |
	 *      0x10000|___________________|
	 *             |                   |
	 *             |       tmTPD       |
	 *             |___________________|
	 *             |                   |
	 *                      ....
	 *      0x1ffff|___________________|
	 *
	 *
	 */

	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);


	/*
	 *	receive connection memory map
	 *
	 *          0x0 ___________________
	 *             |                   |
	 *             |                   |
	 *             |       RSRa        |
	 *             |                   |
	 *             |                   |
	 *       0x8000|___________________|
	 *             |                   |
	 *             |             rx0/1 |
	 *             |       LBM         |   link lists of local
	 *             |             tx    |   buffer memory 
	 *             |                   |
	 *       0xd000|___________________|
	 *             |                   |
	 *             |      rmABR        |
	 *       0xe000|___________________|
	 *             |                   |
	 *             |       RSRb        |
	 *             |___________________|
	 *             |                   |
	 *                      ....
	 *       0xffff|___________________|
	 */

	he_writel(he_dev, 0x08000, RCMLBM_BA);
	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
	he_writel(he_dev, 0x0d800, RCMABR_BA);

	/* 5.1.4 initialize local buffer free pools linked lists */

	he_init_rx_lbfp0(he_dev);
	he_init_rx_lbfp1(he_dev);

	he_writel(he_dev, 0x0, RLBC_H);
	he_writel(he_dev, 0x0, RLBC_T);
	he_writel(he_dev, 0x0, RLBC_H2);

	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */

	he_init_tx_lbfp(he_dev);

	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);

	/* 5.1.5 initialize intermediate receive queues */

	if (he_is622(he_dev)) {
		he_writel(he_dev, 0x000f, G0_INMQ_S);
		he_writel(he_dev, 0x200f, G0_INMQ_L);

		he_writel(he_dev, 0x001f, G1_INMQ_S);
		he_writel(he_dev, 0x201f, G1_INMQ_L);

		he_writel(he_dev, 0x002f, G2_INMQ_S);
		he_writel(he_dev, 0x202f, G2_INMQ_L);

		he_writel(he_dev, 0x003f, G3_INMQ_S);
		he_writel(he_dev, 0x203f, G3_INMQ_L);

		he_writel(he_dev, 0x004f, G4_INMQ_S);
		he_writel(he_dev, 0x204f, G4_INMQ_L);

		he_writel(he_dev, 0x005f, G5_INMQ_S);
		he_writel(he_dev, 0x205f, G5_INMQ_L);

		he_writel(he_dev, 0x006f, G6_INMQ_S);
		he_writel(he_dev, 0x206f, G6_INMQ_L);

		he_writel(he_dev, 0x007f, G7_INMQ_S);
		he_writel(he_dev, 0x207f, G7_INMQ_L);
	} else {
		he_writel(he_dev, 0x0000, G0_INMQ_S);
		he_writel(he_dev, 0x0008, G0_INMQ_L);

		he_writel(he_dev, 0x0001, G1_INMQ_S);
		he_writel(he_dev, 0x0009, G1_INMQ_L);

		he_writel(he_dev, 0x0002, G2_INMQ_S);
		he_writel(he_dev, 0x000a, G2_INMQ_L);

		he_writel(he_dev, 0x0003, G3_INMQ_S);
		he_writel(he_dev, 0x000b, G3_INMQ_L);

		he_writel(he_dev, 0x0004, G4_INMQ_S);
		he_writel(he_dev, 0x000c, G4_INMQ_L);

		he_writel(he_dev, 0x0005, G5_INMQ_S);
		he_writel(he_dev, 0x000d, G5_INMQ_L);

		he_writel(he_dev, 0x0006, G6_INMQ_S);
		he_writel(he_dev, 0x000e, G6_INMQ_L);

		he_writel(he_dev, 0x0007, G7_INMQ_S);
		he_writel(he_dev, 0x000f, G7_INMQ_L);
	}

	/* 5.1.6 application tunable parameters */

	he_writel(he_dev, 0x0, MCC);
	he_writel(he_dev, 0x0, OEC);
	he_writel(he_dev, 0x0, DCC);
	he_writel(he_dev, 0x0, CEC);
	
	/* 5.1.7 cs block initialization */

	he_init_cs_block(he_dev);

	/* 5.1.8 cs block connection memory initialization */
	
	if (he_init_cs_block_rcm(he_dev) < 0)
		return -ENOMEM;

	/* 5.1.10 initialize host structures */

	he_init_tpdrq(he_dev);

#ifdef USE_TPD_POOL
	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
	if (he_dev->tpd_pool == NULL) {
		hprintk("unable to create tpd pci_pool\n");
		return -ENOMEM;         
	}

	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
#else
	he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
			CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
	if (!he_dev->tpd_base)
		return -ENOMEM;

	for (i = 0; i < CONFIG_NUMTPDS; ++i) {
		he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
		he_dev->tpd_base[i].inuse = 0;
	}
		
	he_dev->tpd_head = he_dev->tpd_base;
	he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
#endif

	if (he_init_group(he_dev, 0) != 0)
		return -ENOMEM;

	for (group = 1; group < HE_NUM_GROUPS; ++group) {
		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
						G0_RBPS_BS + (group * 32));

		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
						G0_RBPL_QI + (group * 32));
		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));

		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
						G0_RBRQ_Q + (group * 16));
		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));

		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
		he_writel(he_dev, TBRQ_THRESH(0x1),
						G0_TBRQ_THRESH + (group * 16));
		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
	}

	/* host status page */

	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
				sizeof(struct he_hsp), &he_dev->hsp_phys);
	if (he_dev->hsp == NULL) {
		hprintk("failed to allocate host status page\n");
		return -ENOMEM;
	}
	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);

	/* initialize framer */

#ifdef CONFIG_ATM_HE_USE_SUNI
	suni_init(he_dev->atm_dev);
	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
		he_dev->atm_dev->phy->start(he_dev->atm_dev);
#endif /* CONFIG_ATM_HE_USE_SUNI */

	if (sdh) {
		/* this really should be in suni.c but for now... */
		int val;

		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
	}

	/* 5.1.12 enable transmit and receive */

	reg = he_readl_mbox(he_dev, CS_ERCTL0);
	reg |= TX_ENABLE|ER_ENABLE;
	he_writel_mbox(he_dev, reg, CS_ERCTL0);

	reg = he_readl(he_dev, RC_CONFIG);
	reg |= RX_ENABLE;
	he_writel(he_dev, reg, RC_CONFIG);

	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
		he_dev->cs_stper[i].inuse = 0;
		he_dev->cs_stper[i].pcr = -1;
	}
	he_dev->total_bw = 0;


	/* atm linux initialization */

	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;

	he_dev->irq_peak = 0;
	he_dev->rbrq_peak = 0;
	he_dev->rbpl_peak = 0;
	he_dev->tbrq_peak = 0;

	HPRINTK("hell bent for leather!\n");

	return 0;
}

static void
he_stop(struct he_dev *he_dev)
{
	u16 command;
	u32 gen_cntl_0, reg;
	struct pci_dev *pci_dev;

	pci_dev = he_dev->pci_dev;

	/* disable interrupts */

	if (he_dev->membase) {
		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);

#ifdef USE_TASKLET
		tasklet_disable(&he_dev->tasklet);
#endif

		/* disable recv and transmit */

		reg = he_readl_mbox(he_dev, CS_ERCTL0);
		reg &= ~(TX_ENABLE|ER_ENABLE);
		he_writel_mbox(he_dev, reg, CS_ERCTL0);

		reg = he_readl(he_dev, RC_CONFIG);
		reg &= ~(RX_ENABLE);
		he_writel(he_dev, reg, RC_CONFIG);
	}

#ifdef CONFIG_ATM_HE_USE_SUNI
	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
#endif /* CONFIG_ATM_HE_USE_SUNI */

	if (he_dev->irq)
		free_irq(he_dev->irq, he_dev);

	if (he_dev->irq_base)
		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);

	if (he_dev->hsp)
		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
						he_dev->hsp, he_dev->hsp_phys);

	if (he_dev->rbpl_base) {
#ifdef USE_RBPL_POOL
		for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
			void *cpuaddr = he_dev->rbpl_virt[i].virt;
			dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;

			pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
		}
#else
		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
			* CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
#endif
		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
	}

#ifdef USE_RBPL_POOL
	if (he_dev->rbpl_pool)
		pci_pool_destroy(he_dev->rbpl_pool);
#endif

#ifdef USE_RBPS
	if (he_dev->rbps_base) {
#ifdef USE_RBPS_POOL
		for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
			void *cpuaddr = he_dev->rbps_virt[i].virt;
			dma_addr_t dma_handle = he_dev->rbps_base[i].phys;

			pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
		}
#else
		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
			* CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
#endif
		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
			* sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
	}

#ifdef USE_RBPS_POOL
	if (he_dev->rbps_pool)
		pci_pool_destroy(he_dev->rbps_pool);
#endif

#endif /* USE_RBPS */

	if (he_dev->rbrq_base)
		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
							he_dev->rbrq_base, he_dev->rbrq_phys);

	if (he_dev->tbrq_base)
		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
							he_dev->tbrq_base, he_dev->tbrq_phys);

	if (he_dev->tpdrq_base)
		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
							he_dev->tpdrq_base, he_dev->tpdrq_phys);

#ifdef USE_TPD_POOL
	if (he_dev->tpd_pool)
		pci_pool_destroy(he_dev->tpd_pool);
#else
	if (he_dev->tpd_base)
		pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
							he_dev->tpd_base, he_dev->tpd_base_phys);
#endif

	if (he_dev->pci_dev) {
		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
	}
	
	if (he_dev->membase)
		iounmap(he_dev->membase);
}

static struct he_tpd *
__alloc_tpd(struct he_dev *he_dev)
{
#ifdef USE_TPD_POOL
	struct he_tpd *tpd;
	dma_addr_t dma_handle; 

	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
	if (tpd == NULL)
		return NULL;
			
	tpd->status = TPD_ADDR(dma_handle);
	tpd->reserved = 0; 
	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;

	return tpd;
#else
	int i;

	for (i = 0; i < CONFIG_NUMTPDS; ++i) {
		++he_dev->tpd_head;
		if (he_dev->tpd_head > he_dev->tpd_end) {
			he_dev->tpd_head = he_dev->tpd_base;
		}

		if (!he_dev->tpd_head->inuse) {
			he_dev->tpd_head->inuse = 1;
			he_dev->tpd_head->status &= TPD_MASK;
			he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
			he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
			he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
			return he_dev->tpd_head;
		}
	}
	hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
	return NULL;
#endif
}

#define AAL5_LEN(buf,len) 						\
			((((unsigned char *)(buf))[(len)-6] << 8) |	\
				(((unsigned char *)(buf))[(len)-5]))

/* 2.10.1.2 receive
 *
 * aal5 packets can optionally return the tcp checksum in the lower
 * 16 bits of the crc (RSR0_TCP_CKSUM)
 */

#define TCP_CKSUM(buf,len) 						\
			((((unsigned char *)(buf))[(len)-2] << 8) |	\
				(((unsigned char *)(buf))[(len-1)]))

static int
he_service_rbrq(struct he_dev *he_dev, int group)
{
	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
				((unsigned long)he_dev->rbrq_base |
					he_dev->hsp->group[group].rbrq_tail);
	struct he_rbp *rbp = NULL;
	unsigned cid, lastcid = -1;
	unsigned buf_len = 0;
	struct sk_buff *skb;
	struct atm_vcc *vcc = NULL;
	struct he_vcc *he_vcc;
	struct he_iovec *iov;
	int pdus_assembled = 0;
	int updated = 0;

	read_lock(&vcc_sklist_lock);
	while (he_dev->rbrq_head != rbrq_tail) {
		++updated;

		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
			he_dev->rbrq_head, group,
			RBRQ_ADDR(he_dev->rbrq_head),
			RBRQ_BUFLEN(he_dev->rbrq_head),
			RBRQ_CID(he_dev->rbrq_head),
			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");

#ifdef USE_RBPS
		if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
			rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
		else
#endif
			rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
		
		buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
		cid = RBRQ_CID(he_dev->rbrq_head);

		if (cid != lastcid)
			vcc = __find_vcc(he_dev, cid);
		lastcid = cid;

		if (vcc == NULL) {
			hprintk("vcc == NULL  (cid 0x%x)\n", cid);
			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
					rbp->status &= ~RBP_LOANED;
					
			goto next_rbrq_entry;
		}

		he_vcc = HE_VCC(vcc);
		if (he_vcc == NULL) {
			hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
					rbp->status &= ~RBP_LOANED;
			goto next_rbrq_entry;
		}

		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
				atomic_inc(&vcc->stats->rx_drop);
			goto return_host_buffers;
		}

		he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
		he_vcc->iov_tail->iov_len = buf_len;
		he_vcc->pdu_len += buf_len;
		++he_vcc->iov_tail;

		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
			lastcid = -1;
			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
			wake_up(&he_vcc->rx_waitq);
			goto return_host_buffers;
		}

#ifdef notdef
		if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
			hprintk("iovec full!  cid 0x%x\n", cid);
			goto return_host_buffers;
		}
#endif
		if (!RBRQ_END_PDU(he_dev->rbrq_head))
			goto next_rbrq_entry;

		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
			HPRINTK("%s%s (%d.%d)\n",
				RBRQ_CRC_ERR(he_dev->rbrq_head)
							? "CRC_ERR " : "",
				RBRQ_LEN_ERR(he_dev->rbrq_head)
							? "LEN_ERR" : "",
							vcc->vpi, vcc->vci);
			atomic_inc(&vcc->stats->rx_err);
			goto return_host_buffers;
		}

		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
							GFP_ATOMIC);
		if (!skb) {
			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
			goto return_host_buffers;
		}

		if (rx_skb_reserve > 0)
			skb_reserve(skb, rx_skb_reserve);

		__net_timestamp(skb);

		for (iov = he_vcc->iov_head;
				iov < he_vcc->iov_tail; ++iov) {
#ifdef USE_RBPS
			if (iov->iov_base & RBP_SMALLBUF)
				memcpy(skb_put(skb, iov->iov_len),
					he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
			else
#endif
				memcpy(skb_put(skb, iov->iov_len),
					he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
		}

		switch (vcc->qos.aal) {
			case ATM_AAL0:
				/* 2.10.1.5 raw cell receive */
				skb->len = ATM_AAL0_SDU;
				skb->tail = skb->data + skb->len;
				break;
			case ATM_AAL5:
				/* 2.10.1.2 aal5 receive */

				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
				skb->tail = skb->data + skb->len;
#ifdef USE_CHECKSUM_HW
				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
					skb->ip_summed = CHECKSUM_COMPLETE;
					skb->csum = TCP_CKSUM(skb->data,
							he_vcc->pdu_len);
				}
#endif
				break;
		}

#ifdef should_never_happen
		if (skb->len > vcc->qos.rxtp.max_sdu)
			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
#endif

#ifdef notdef
		ATM_SKB(skb)->vcc = vcc;
#endif
		spin_unlock(&he_dev->global_lock);
		vcc->push(vcc, skb);
		spin_lock(&he_dev->global_lock);

		atomic_inc(&vcc->stats->rx);

return_host_buffers:
		++pdus_assembled;

		for (iov = he_vcc->iov_head;
				iov < he_vcc->iov_tail; ++iov) {
#ifdef USE_RBPS
			if (iov->iov_base & RBP_SMALLBUF)
				rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
			else
#endif
				rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];

			rbp->status &= ~RBP_LOANED;
		}

		he_vcc->iov_tail = he_vcc->iov_head;
		he_vcc->pdu_len = 0;

next_rbrq_entry:
		he_dev->rbrq_head = (struct he_rbrq *)
				((unsigned long) he_dev->rbrq_base |
					RBRQ_MASK(++he_dev->rbrq_head));

	}
	read_unlock(&vcc_sklist_lock);

	if (updated) {
		if (updated > he_dev->rbrq_peak)
			he_dev->rbrq_peak = updated;

		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
						G0_RBRQ_H + (group * 16));
	}

	return pdus_assembled;
}

static void
he_service_tbrq(struct he_dev *he_dev, int group)
{
	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
				((unsigned long)he_dev->tbrq_base |
					he_dev->hsp->group[group].tbrq_tail);
	struct he_tpd *tpd;
	int slot, updated = 0;
#ifdef USE_TPD_POOL
	struct he_tpd *__tpd;
#endif

	/* 2.1.6 transmit buffer return queue */

	while (he_dev->tbrq_head != tbrq_tail) {
		++updated;

		HPRINTK("tbrq%d 0x%x%s%s\n",
			group,
			TBRQ_TPD(he_dev->tbrq_head), 
			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
#ifdef USE_TPD_POOL
		tpd = NULL;
		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
				tpd = __tpd;
				list_del(&__tpd->entry);
				break;
			}
		}

		if (tpd == NULL) {
			hprintk("unable to locate tpd for dma buffer %x\n",
						TBRQ_TPD(he_dev->tbrq_head));
			goto next_tbrq_entry;
		}
#else
		tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
#endif

		if (TBRQ_EOS(he_dev->tbrq_head)) {
			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
			if (tpd->vcc)
				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);

			goto next_tbrq_entry;
		}

		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
			if (tpd->iovec[slot].addr)
				pci_unmap_single(he_dev->pci_dev,
					tpd->iovec[slot].addr,
					tpd->iovec[slot].len & TPD_LEN_MASK,
							PCI_DMA_TODEVICE);
			if (tpd->iovec[slot].len & TPD_LST)
				break;
				
		}

		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
			if (tpd->vcc && tpd->vcc->pop)
				tpd->vcc->pop(tpd->vcc, tpd->skb);
			else
				dev_kfree_skb_any(tpd->skb);
		}

next_tbrq_entry:
#ifdef USE_TPD_POOL
		if (tpd)
			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
#else
		tpd->inuse = 0;
#endif
		he_dev->tbrq_head = (struct he_tbrq *)
				((unsigned long) he_dev->tbrq_base |
					TBRQ_MASK(++he_dev->tbrq_head));
	}

	if (updated) {
		if (updated > he_dev->tbrq_peak)
			he_dev->tbrq_peak = updated;

		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
						G0_TBRQ_H + (group * 16));
	}
}


static void
he_service_rbpl(struct he_dev *he_dev, int group)
{
	struct he_rbp *newtail;
	struct he_rbp *rbpl_head;
	int moved = 0;

	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));

	for (;;) {
		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
						RBPL_MASK(he_dev->rbpl_tail+1));

		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
		if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
			break;

		newtail->status |= RBP_LOANED;
		he_dev->rbpl_tail = newtail;
		++moved;
	} 

	if (moved)
		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
}

#ifdef USE_RBPS
static void
he_service_rbps(struct he_dev *he_dev, int group)
{
	struct he_rbp *newtail;
	struct he_rbp *rbps_head;
	int moved = 0;

	rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
					RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));

	for (;;) {
		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
						RBPS_MASK(he_dev->rbps_tail+1));

		/* table 3.42 -- rbps_tail should never be set to rbps_head */
		if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
			break;

		newtail->status |= RBP_LOANED;
		he_dev->rbps_tail = newtail;
		++moved;
	} 

	if (moved)
		he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
}
#endif /* USE_RBPS */

static void
he_tasklet(unsigned long data)
{
	unsigned long flags;
	struct he_dev *he_dev = (struct he_dev *) data;
	int group, type;
	int updated = 0;

	HPRINTK("tasklet (0x%lx)\n", data);
#ifdef USE_TASKLET
	spin_lock_irqsave(&he_dev->global_lock, flags);
#endif

	while (he_dev->irq_head != he_dev->irq_tail) {
		++updated;

		type = ITYPE_TYPE(he_dev->irq_head->isw);
		group = ITYPE_GROUP(he_dev->irq_head->isw);

		switch (type) {
			case ITYPE_RBRQ_THRESH:
				HPRINTK("rbrq%d threshold\n", group);
				/* fall through */
			case ITYPE_RBRQ_TIMER:
				if (he_service_rbrq(he_dev, group)) {
					he_service_rbpl(he_dev, group);
#ifdef USE_RBPS
					he_service_rbps(he_dev, group);
#endif /* USE_RBPS */
				}
				break;
			case ITYPE_TBRQ_THRESH:
				HPRINTK("tbrq%d threshold\n", group);
				/* fall through */
			case ITYPE_TPD_COMPLETE:
				he_service_tbrq(he_dev, group);
				break;
			case ITYPE_RBPL_THRESH:
				he_service_rbpl(he_dev, group);
				break;
			case ITYPE_RBPS_THRESH:
#ifdef USE_RBPS
				he_service_rbps(he_dev, group);
#endif /* USE_RBPS */
				break;
			case ITYPE_PHY:
				HPRINTK("phy interrupt\n");
#ifdef CONFIG_ATM_HE_USE_SUNI
				spin_unlock_irqrestore(&he_dev->global_lock, flags);
				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
				spin_lock_irqsave(&he_dev->global_lock, flags);
#endif
				break;
			case ITYPE_OTHER:
				switch (type|group) {
					case ITYPE_PARITY:
						hprintk("parity error\n");
						break;
					case ITYPE_ABORT:
						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
						break;
				}
				break;
			case ITYPE_TYPE(ITYPE_INVALID):
				/* see 8.1.1 -- check all queues */

				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);

				he_service_rbrq(he_dev, 0);
				he_service_rbpl(he_dev, 0);
#ifdef USE_RBPS
				he_service_rbps(he_dev, 0);
#endif /* USE_RBPS */
				he_service_tbrq(he_dev, 0);
				break;
			default:
				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
		}

		he_dev->irq_head->isw = ITYPE_INVALID;

		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
	}

	if (updated) {
		if (updated > he_dev->irq_peak)
			he_dev->irq_peak = updated;

		he_writel(he_dev,
			IRQ_SIZE(CONFIG_IRQ_SIZE) |
			IRQ_THRESH(CONFIG_IRQ_THRESH) |
			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
	}
#ifdef USE_TASKLET
	spin_unlock_irqrestore(&he_dev->global_lock, flags);
#endif
}

static irqreturn_t
he_irq_handler(int irq, void *dev_id)
{
	unsigned long flags;
	struct he_dev *he_dev = (struct he_dev * )dev_id;
	int handled = 0;

	if (he_dev == NULL)
		return IRQ_NONE;

	spin_lock_irqsave(&he_dev->global_lock, flags);

	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
						(*he_dev->irq_tailoffset << 2));

	if (he_dev->irq_tail == he_dev->irq_head) {
		HPRINTK("tailoffset not updated?\n");
		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
	}

#ifdef DEBUG
	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
		hprintk("spurious (or shared) interrupt?\n");
#endif

	if (he_dev->irq_head != he_dev->irq_tail) {
		handled = 1;
#ifdef USE_TASKLET
		tasklet_schedule(&he_dev->tasklet);
#else
		he_tasklet((unsigned long) he_dev);
#endif
		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
	}
	spin_unlock_irqrestore(&he_dev->global_lock, flags);
	return IRQ_RETVAL(handled);

}

static __inline__ void
__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
{
	struct he_tpdrq *new_tail;

	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
					tpd, cid, he_dev->tpdrq_tail);

	/* new_tail = he_dev->tpdrq_tail; */
	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
					TPDRQ_MASK(he_dev->tpdrq_tail+1));

	/*
	 * check to see if we are about to set the tail == head
	 * if true, update the head pointer from the adapter
	 * to see if this is really the case (reading the queue
	 * head for every enqueue would be unnecessarily slow)
	 */

	if (new_tail == he_dev->tpdrq_head) {
		he_dev->tpdrq_head = (struct he_tpdrq *)
			(((unsigned long)he_dev->tpdrq_base) |
				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));

		if (new_tail == he_dev->tpdrq_head) {
			int slot;

			hprintk("tpdrq full (cid 0x%x)\n", cid);
			/*
			 * FIXME
			 * push tpd onto a transmit backlog queue
			 * after service_tbrq, service the backlog
			 * for now, we just drop the pdu
			 */
			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
				if (tpd->iovec[slot].addr)
					pci_unmap_single(he_dev->pci_dev,
						tpd->iovec[slot].addr,
						tpd->iovec[slot].len & TPD_LEN_MASK,
								PCI_DMA_TODEVICE);
			}
			if (tpd->skb) {
				if (tpd->vcc->pop)
					tpd->vcc->pop(tpd->vcc, tpd->skb);
				else
					dev_kfree_skb_any(tpd->skb);
				atomic_inc(&tpd->vcc->stats->tx_err);
			}
#ifdef USE_TPD_POOL
			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
#else
			tpd->inuse = 0;
#endif
			return;
		}
	}

	/* 2.1.5 transmit packet descriptor ready queue */
#ifdef USE_TPD_POOL
	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
#else
	he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
				(TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
#endif
	he_dev->tpdrq_tail->cid = cid;
	wmb();

	he_dev->tpdrq_tail = new_tail;

	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
}

static int
he_open(struct atm_vcc *vcc)
{
	unsigned long flags;
	struct he_dev *he_dev = HE_DEV(vcc->dev);
	struct he_vcc *he_vcc;
	int err = 0;
	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
	short vpi = vcc->vpi;
	int vci = vcc->vci;

	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
		return 0;

	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);

	set_bit(ATM_VF_ADDR, &vcc->flags);

	cid = he_mkcid(he_dev, vpi, vci);

	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
	if (he_vcc == NULL) {
		hprintk("unable to allocate he_vcc during open\n");
		return -ENOMEM;
	}

	he_vcc->iov_tail = he_vcc->iov_head;
	he_vcc->pdu_len = 0;
	he_vcc->rc_index = -1;

	init_waitqueue_head(&he_vcc->rx_waitq);
	init_waitqueue_head(&he_vcc->tx_waitq);

	vcc->dev_data = he_vcc;

	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
		int pcr_goal;

		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
		if (pcr_goal == 0)
			pcr_goal = he_dev->atm_dev->link_rate;
		if (pcr_goal < 0)	/* means round down, technically */
			pcr_goal = -pcr_goal;

		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);

		switch (vcc->qos.aal) {
			case ATM_AAL5:
				tsr0_aal = TSR0_AAL5;
				tsr4 = TSR4_AAL5;
				break;
			case ATM_AAL0:
				tsr0_aal = TSR0_AAL0_SDU;
				tsr4 = TSR4_AAL0_SDU;
				break;
			default:
				err = -EINVAL;
				goto open_failed;
		}

		spin_lock_irqsave(&he_dev->global_lock, flags);
		tsr0 = he_readl_tsr0(he_dev, cid);
		spin_unlock_irqrestore(&he_dev->global_lock, flags);

		if (TSR0_CONN_STATE(tsr0) != 0) {
			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
			err = -EBUSY;
			goto open_failed;
		}

		switch (vcc->qos.txtp.traffic_class) {
			case ATM_UBR:
				/* 2.3.3.1 open connection ubr */

				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
					TSR0_USE_WMIN | TSR0_UPDATE_GER;
				break;

			case ATM_CBR:
				/* 2.3.3.2 open connection cbr */

				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
				if ((he_dev->total_bw + pcr_goal)
					> (he_dev->atm_dev->link_rate * 9 / 10))
				{
					err = -EBUSY;
					goto open_failed;
				}

				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */

				/* find an unused cs_stper register */
				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
					if (he_dev->cs_stper[reg].inuse == 0 || 
					    he_dev->cs_stper[reg].pcr == pcr_goal)
							break;

				if (reg == HE_NUM_CS_STPER) {
					err = -EBUSY;
					spin_unlock_irqrestore(&he_dev->global_lock, flags);
					goto open_failed;
				}

				he_dev->total_bw += pcr_goal;

				he_vcc->rc_index = reg;
				++he_dev->cs_stper[reg].inuse;
				he_dev->cs_stper[reg].pcr = pcr_goal;

				clock = he_is622(he_dev) ? 66667000 : 50000000;
				period = clock / pcr_goal;
				
				HPRINTK("rc_index = %d period = %d\n",
								reg, period);

				he_writel_mbox(he_dev, rate_to_atmf(period/2),
							CS_STPER0 + reg);
				spin_unlock_irqrestore(&he_dev->global_lock, flags);

				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
							TSR0_RC_INDEX(reg);

				break;
			default:
				err = -EINVAL;
				goto open_failed;
		}

		spin_lock_irqsave(&he_dev->global_lock, flags);

		he_writel_tsr0(he_dev, tsr0, cid);
		he_writel_tsr4(he_dev, tsr4 | 1, cid);
		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);

		he_writel_tsr3(he_dev, 0x0, cid);
		he_writel_tsr5(he_dev, 0x0, cid);
		he_writel_tsr6(he_dev, 0x0, cid);
		he_writel_tsr7(he_dev, 0x0, cid);
		he_writel_tsr8(he_dev, 0x0, cid);
		he_writel_tsr10(he_dev, 0x0, cid);
		he_writel_tsr11(he_dev, 0x0, cid);
		he_writel_tsr12(he_dev, 0x0, cid);
		he_writel_tsr13(he_dev, 0x0, cid);
		he_writel_tsr14(he_dev, 0x0, cid);
		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
		spin_unlock_irqrestore(&he_dev->global_lock, flags);
	}

	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
		unsigned aal;

		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
		 				&HE_VCC(vcc)->rx_waitq);

		switch (vcc->qos.aal) {
			case ATM_AAL5:
				aal = RSR0_AAL5;
				break;
			case ATM_AAL0:
				aal = RSR0_RAWCELL;
				break;
			default:
				err = -EINVAL;
				goto open_failed;
		}

		spin_lock_irqsave(&he_dev->global_lock, flags);

		rsr0 = he_readl_rsr0(he_dev, cid);
		if (rsr0 & RSR0_OPEN_CONN) {
			spin_unlock_irqrestore(&he_dev->global_lock, flags);

			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
			err = -EBUSY;
			goto open_failed;
		}

#ifdef USE_RBPS
		rsr1 = RSR1_GROUP(0);
		rsr4 = RSR4_GROUP(0);
#else /* !USE_RBPS */
		rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
		rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
#endif /* USE_RBPS */
		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? 
				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;

#ifdef USE_CHECKSUM_HW
		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
			rsr0 |= RSR0_TCP_CKSUM;
#endif

		he_writel_rsr4(he_dev, rsr4, cid);
		he_writel_rsr1(he_dev, rsr1, cid);
		/* 5.1.11 last parameter initialized should be
			  the open/closed indication in rsr0 */
		he_writel_rsr0(he_dev,
			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */

		spin_unlock_irqrestore(&he_dev->global_lock, flags);
	}

open_failed:

	if (err) {
		kfree(he_vcc);
		clear_bit(ATM_VF_ADDR, &vcc->flags);
	}
	else
		set_bit(ATM_VF_READY, &vcc->flags);

	return err;
}

static void
he_close(struct atm_vcc *vcc)
{
	unsigned long flags;
	DECLARE_WAITQUEUE(wait, current);
	struct he_dev *he_dev = HE_DEV(vcc->dev);
	struct he_tpd *tpd;
	unsigned cid;
	struct he_vcc *he_vcc = HE_VCC(vcc);
#define MAX_RETRY 30
	int retry = 0, sleep = 1, tx_inuse;

	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);

	clear_bit(ATM_VF_READY, &vcc->flags);
	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);

	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
		int timeout;

		HPRINTK("close rx cid 0x%x\n", cid);

		/* 2.7.2.2 close receive operation */

		/* wait for previous close (if any) to finish */

		spin_lock_irqsave(&he_dev->global_lock, flags);
		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
			udelay(250);
		}

		set_current_state(TASK_UNINTERRUPTIBLE);
		add_wait_queue(&he_vcc->rx_waitq, &wait);

		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
		spin_unlock_irqrestore(&he_dev->global_lock, flags);

		timeout = schedule_timeout(30*HZ);

		remove_wait_queue(&he_vcc->rx_waitq, &wait);
		set_current_state(TASK_RUNNING);

		if (timeout == 0)
			hprintk("close rx timeout cid 0x%x\n", cid);

		HPRINTK("close rx cid 0x%x complete\n", cid);

	}

	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
		volatile unsigned tsr4, tsr0;
		int timeout;

		HPRINTK("close tx cid 0x%x\n", cid);
		
		/* 2.1.2
		 *
		 * ... the host must first stop queueing packets to the TPDRQ
		 * on the connection to be closed, then wait for all outstanding
		 * packets to be transmitted and their buffers returned to the
		 * TBRQ. When the last packet on the connection arrives in the
		 * TBRQ, the host issues the close command to the adapter.
		 */

		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
		       (retry < MAX_RETRY)) {
			msleep(sleep);
			if (sleep < 250)
				sleep = sleep * 2;

			++retry;
		}

		if (tx_inuse)
			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);

		/* 2.3.1.1 generic close operations with flush */

		spin_lock_irqsave(&he_dev->global_lock, flags);
		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
					/* also clears TSR4_SESSION_ENDED */

		switch (vcc->qos.txtp.traffic_class) {
			case ATM_UBR:
				he_writel_tsr1(he_dev, 
					TSR1_MCR(rate_to_atmf(200000))
					| TSR1_PCR(0), cid);
				break;
			case ATM_CBR:
				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
				break;
		}
		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */

		tpd = __alloc_tpd(he_dev);
		if (tpd == NULL) {
			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
			goto close_tx_incomplete;
		}
		tpd->status |= TPD_EOS | TPD_INT;
		tpd->skb = NULL;
		tpd->vcc = vcc;
		wmb();

		set_current_state(TASK_UNINTERRUPTIBLE);
		add_wait_queue(&he_vcc->tx_waitq, &wait);
		__enqueue_tpd(he_dev, tpd, cid);
		spin_unlock_irqrestore(&he_dev->global_lock, flags);

		timeout = schedule_timeout(30*HZ);

		remove_wait_queue(&he_vcc->tx_waitq, &wait);
		set_current_state(TASK_RUNNING);

		spin_lock_irqsave(&he_dev->global_lock, flags);

		if (timeout == 0) {
			hprintk("close tx timeout cid 0x%x\n", cid);
			goto close_tx_incomplete;
		}

		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
			udelay(250);
		}

		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
			udelay(250);
		}

close_tx_incomplete:

		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
			int reg = he_vcc->rc_index;

			HPRINTK("cs_stper reg = %d\n", reg);

			if (he_dev->cs_stper[reg].inuse == 0)
				hprintk("cs_stper[%d].inuse = 0!\n", reg);
			else
				--he_dev->cs_stper[reg].inuse;

			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
		}
		spin_unlock_irqrestore(&he_dev->global_lock, flags);

		HPRINTK("close tx cid 0x%x complete\n", cid);
	}

	kfree(he_vcc);

	clear_bit(ATM_VF_ADDR, &vcc->flags);
}

static int
he_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
	unsigned long flags;
	struct he_dev *he_dev = HE_DEV(vcc->dev);
	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
	struct he_tpd *tpd;
#ifdef USE_SCATTERGATHER
	int i, slot = 0;
#endif

#define HE_TPD_BUFSIZE 0xffff

	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);

	if ((skb->len > HE_TPD_BUFSIZE) ||
	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
		if (vcc->pop)
			vcc->pop(vcc, skb);
		else
			dev_kfree_skb_any(skb);
		atomic_inc(&vcc->stats->tx_err);
		return -EINVAL;
	}

#ifndef USE_SCATTERGATHER
	if (skb_shinfo(skb)->nr_frags) {
		hprintk("no scatter/gather support\n");
		if (vcc->pop)
			vcc->pop(vcc, skb);
		else
			dev_kfree_skb_any(skb);
		atomic_inc(&vcc->stats->tx_err);
		return -EINVAL;
	}
#endif
	spin_lock_irqsave(&he_dev->global_lock, flags);

	tpd = __alloc_tpd(he_dev);
	if (tpd == NULL) {
		if (vcc->pop)
			vcc->pop(vcc, skb);
		else
			dev_kfree_skb_any(skb);
		atomic_inc(&vcc->stats->tx_err);
		spin_unlock_irqrestore(&he_dev->global_lock, flags);
		return -ENOMEM;
	}

	if (vcc->qos.aal == ATM_AAL5)
		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
	else {
		char *pti_clp = (void *) (skb->data + 3);
		int clp, pti;

		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 
		clp = (*pti_clp & ATM_HDR_CLP);
		tpd->status |= TPD_CELLTYPE(pti);
		if (clp)
			tpd->status |= TPD_CLP;

		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
	}

#ifdef USE_SCATTERGATHER
	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
				skb->len - skb->data_len, PCI_DMA_TODEVICE);
	tpd->iovec[slot].len = skb->len - skb->data_len;
	++slot;

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
			tpd->vcc = vcc;
			tpd->skb = NULL;	/* not the last fragment
						   so dont ->push() yet */
			wmb();

			__enqueue_tpd(he_dev, tpd, cid);
			tpd = __alloc_tpd(he_dev);
			if (tpd == NULL) {
				if (vcc->pop)
					vcc->pop(vcc, skb);
				else
					dev_kfree_skb_any(skb);
				atomic_inc(&vcc->stats->tx_err);
				spin_unlock_irqrestore(&he_dev->global_lock, flags);
				return -ENOMEM;
			}
			tpd->status |= TPD_USERCELL;
			slot = 0;
		}

		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
			(void *) page_address(frag->page) + frag->page_offset,
				frag->size, PCI_DMA_TODEVICE);
		tpd->iovec[slot].len = frag->size;
		++slot;

	}

	tpd->iovec[slot - 1].len |= TPD_LST;
#else
	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
	tpd->length0 = skb->len | TPD_LST;
#endif
	tpd->status |= TPD_INT;

	tpd->vcc = vcc;
	tpd->skb = skb;
	wmb();
	ATM_SKB(skb)->vcc = vcc;

	__enqueue_tpd(he_dev, tpd, cid);
	spin_unlock_irqrestore(&he_dev->global_lock, flags);

	atomic_inc(&vcc->stats->tx);

	return 0;
}

static int
he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
{
	unsigned long flags;
	struct he_dev *he_dev = HE_DEV(atm_dev);
	struct he_ioctl_reg reg;
	int err = 0;

	switch (cmd) {
		case HE_GET_REG:
			if (!capable(CAP_NET_ADMIN))
				return -EPERM;

			if (copy_from_user(&reg, arg,
					   sizeof(struct he_ioctl_reg)))
				return -EFAULT;
			
			spin_lock_irqsave(&he_dev->global_lock, flags);
			switch (reg.type) {
				case HE_REGTYPE_PCI:
					reg.val = he_readl(he_dev, reg.addr);
					break;
				case HE_REGTYPE_RCM:
					reg.val =
						he_readl_rcm(he_dev, reg.addr);
					break;
				case HE_REGTYPE_TCM:
					reg.val =
						he_readl_tcm(he_dev, reg.addr);
					break;
				case HE_REGTYPE_MBOX:
					reg.val =
						he_readl_mbox(he_dev, reg.addr);
					break;
				default:
					err = -EINVAL;
					break;
			}
			spin_unlock_irqrestore(&he_dev->global_lock, flags);
			if (err == 0)
				if (copy_to_user(arg, &reg,
							sizeof(struct he_ioctl_reg)))
					return -EFAULT;
			break;
		default:
#ifdef CONFIG_ATM_HE_USE_SUNI
			if (atm_dev->phy && atm_dev->phy->ioctl)
				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
#else /* CONFIG_ATM_HE_USE_SUNI */
			err = -EINVAL;
#endif /* CONFIG_ATM_HE_USE_SUNI */
			break;
	}

	return err;
}

static void
he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
{
	unsigned long flags;
	struct he_dev *he_dev = HE_DEV(atm_dev);

	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);

	spin_lock_irqsave(&he_dev->global_lock, flags);
	he_writel(he_dev, val, FRAMER + (addr*4));
	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
	spin_unlock_irqrestore(&he_dev->global_lock, flags);
}
 
	
static unsigned char
he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
{ 
	unsigned long flags;
	struct he_dev *he_dev = HE_DEV(atm_dev);
	unsigned reg;

	spin_lock_irqsave(&he_dev->global_lock, flags);
	reg = he_readl(he_dev, FRAMER + (addr*4));
	spin_unlock_irqrestore(&he_dev->global_lock, flags);

	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
	return reg;
}

static int
he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
{
	unsigned long flags;
	struct he_dev *he_dev = HE_DEV(dev);
	int left, i;
#ifdef notdef
	struct he_rbrq *rbrq_tail;
	struct he_tpdrq *tpdrq_head;
	int rbpl_head, rbpl_tail;
#endif
	static long mcc = 0, oec = 0, dcc = 0, cec = 0;


	left = *pos;
	if (!left--)
		return sprintf(page, "%s\n", version);

	if (!left--)
		return sprintf(page, "%s%s\n\n",
			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");

	if (!left--)
		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");

	spin_lock_irqsave(&he_dev->global_lock, flags);
	mcc += he_readl(he_dev, MCC);
	oec += he_readl(he_dev, OEC);
	dcc += he_readl(he_dev, DCC);
	cec += he_readl(he_dev, CEC);
	spin_unlock_irqrestore(&he_dev->global_lock, flags);

	if (!left--)
		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n", 
							mcc, oec, dcc, cec);

	if (!left--)
		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
				CONFIG_IRQ_SIZE, he_dev->irq_peak);

	if (!left--)
		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
						CONFIG_TPDRQ_SIZE);

	if (!left--)
		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);

	if (!left--)
		return sprintf(page, "tbrq_size = %d  peak = %d\n",
					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);


#ifdef notdef
	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));

	inuse = rbpl_head - rbpl_tail;
	if (inuse < 0)
		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
	inuse /= sizeof(struct he_rbp);

	if (!left--)
		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
						CONFIG_RBPL_SIZE, inuse);
#endif

	if (!left--)
		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");

	for (i = 0; i < HE_NUM_CS_STPER; ++i)
		if (!left--)
			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
						he_dev->cs_stper[i].pcr,
						he_dev->cs_stper[i].inuse);

	if (!left--)
		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);

	return 0;
}

/* eeprom routines  -- see 4.7 */

u8
read_prom_byte(struct he_dev *he_dev, int addr)
{
	u32 val = 0, tmp_read = 0;
	int i, j = 0;
	u8 byte_read = 0;

	val = readl(he_dev->membase + HOST_CNTL);
	val &= 0xFFFFE0FF;
       
	/* Turn on write enable */
	val |= 0x800;
	he_writel(he_dev, val, HOST_CNTL);
       
	/* Send READ instruction */
	for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
		he_writel(he_dev, val | readtab[i], HOST_CNTL);
		udelay(EEPROM_DELAY);
	}
       
	/* Next, we need to send the byte address to read from */
	for (i = 7; i >= 0; i--) {
		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
		udelay(EEPROM_DELAY);
		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
		udelay(EEPROM_DELAY);
	}
       
	j = 0;

	val &= 0xFFFFF7FF;      /* Turn off write enable */
	he_writel(he_dev, val, HOST_CNTL);
       
	/* Now, we can read data from the EEPROM by clocking it in */
	for (i = 7; i >= 0; i--) {
		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
		udelay(EEPROM_DELAY);
		tmp_read = he_readl(he_dev, HOST_CNTL);
		byte_read |= (unsigned char)
			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
		udelay(EEPROM_DELAY);
	}
       
	he_writel(he_dev, val | ID_CS, HOST_CNTL);
	udelay(EEPROM_DELAY);

	return byte_read;
}

MODULE_LICENSE("GPL");
MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
module_param(disable64, bool, 0);
MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
module_param(nvpibits, short, 0);
MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
module_param(nvcibits, short, 0);
MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
module_param(rx_skb_reserve, short, 0);
MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
module_param(irq_coalesce, bool, 0);
MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
module_param(sdh, bool, 0);
MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");

static struct pci_device_id he_pci_tbl[] = {
	{ PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
	  0, 0, 0 },
	{ 0, }
};

MODULE_DEVICE_TABLE(pci, he_pci_tbl);

static struct pci_driver he_driver = {
	.name =		"he",
	.probe =	he_init_one,
	.remove =	__devexit_p(he_remove_one),
	.id_table =	he_pci_tbl,
};

static int __init he_init(void)
{
	return pci_register_driver(&he_driver);
}

static void __exit he_cleanup(void)
{
	pci_unregister_driver(&he_driver);
}

module_init(he_init);
module_exit(he_cleanup);