summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-02 15:08:32 -0800
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-02 15:08:32 -0800
commit97be852f81c5bb114aab31974af2c061eb86a6de (patch)
tree701a9c88eef7fc3692150f5dd7edb226a6089173
parentcdb54fac35812a21943f0e506e8e3b94b469a77c (diff)
parentaae343d493df965ac3abec1bd97cccfe44a7d920 (diff)
downloadlwn-97be852f81c5bb114aab31974af2c061eb86a6de.tar.gz
lwn-97be852f81c5bb114aab31974af2c061eb86a6de.zip
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (118 commits) [netdrvr] skge: build fix [PATCH] NetXen: driver cleanup, removed unnecessary __iomem type casts [PATCH] PHY: Add support for configuring the PHY connection interface [PATCH] chelesio: transmit locking (plus bug fix). [PATCH] chelsio: statistics improvement [PATCH] chelsio: add MSI support [PATCH] chelsio: use standard CRC routines [PATCH] chelsio: cleanup pm3393 code [PATCH] chelsio: add 1G swcixw aupport [PATCH] chelsio: add support for other 10G boards [PATCH] chelsio: remove unused mutex [PATCH] chelsio: use kzalloc [PATCH] chelsio: whitespace fixes [PATCH] amd8111e use standard CRC lib [PATCH] sky2: msi enhancements. [PATCH] sky2: kfree_skb_any needed [PATCH] sky2: fixes for Yukon EC_U chip revisions [PATCH] sky2: add Dlink 560SX id [PATCH] sky2: receive error handling fix [PATCH] skge: don't clear MC state on link down ...
-rw-r--r--Documentation/networking/e1000.txt451
-rw-r--r--Documentation/networking/phy.txt13
-rw-r--r--MAINTAINERS14
-rw-r--r--drivers/net/8390.c1080
-rw-r--r--drivers/net/8390.h37
-rw-r--r--drivers/net/Kconfig39
-rw-r--r--drivers/net/Makefile13
-rw-r--r--drivers/net/amd8111e.c27
-rw-r--r--drivers/net/amd8111e.h4
-rw-r--r--drivers/net/arm/etherh.c39
-rw-r--r--drivers/net/au1000_eth.c3
-rw-r--r--drivers/net/chelsio/Makefile8
-rw-r--r--drivers/net/chelsio/common.h105
-rw-r--r--drivers/net/chelsio/cphy.h24
-rw-r--r--drivers/net/chelsio/cpl5_cmd.h510
-rw-r--r--drivers/net/chelsio/cxgb2.c591
-rw-r--r--drivers/net/chelsio/elmer0.h7
-rw-r--r--drivers/net/chelsio/espi.c205
-rw-r--r--drivers/net/chelsio/espi.h1
-rw-r--r--drivers/net/chelsio/fpga_defs.h232
-rw-r--r--drivers/net/chelsio/gmac.h5
-rw-r--r--drivers/net/chelsio/ixf1010.c485
-rw-r--r--drivers/net/chelsio/mac.c368
-rw-r--r--drivers/net/chelsio/mv88e1xxx.c397
-rw-r--r--drivers/net/chelsio/mv88e1xxx.h127
-rw-r--r--drivers/net/chelsio/mv88x201x.c36
-rw-r--r--drivers/net/chelsio/my3126.c204
-rw-r--r--drivers/net/chelsio/pm3393.c125
-rw-r--r--drivers/net/chelsio/regs.h1718
-rw-r--r--drivers/net/chelsio/sge.c867
-rw-r--r--drivers/net/chelsio/sge.h33
-rw-r--r--drivers/net/chelsio/subr.c494
-rw-r--r--drivers/net/chelsio/suni1x10gexp_regs.h1430
-rw-r--r--drivers/net/chelsio/tp.c178
-rw-r--r--drivers/net/chelsio/tp.h73
-rw-r--r--drivers/net/chelsio/vsc7326.c725
-rw-r--r--drivers/net/chelsio/vsc7326_reg.h286
-rw-r--r--drivers/net/chelsio/vsc8244.c368
-rw-r--r--drivers/net/chelsio/vsc8244_reg.h172
-rw-r--r--drivers/net/defxx.c39
-rw-r--r--drivers/net/defxx.h15
-rw-r--r--drivers/net/depca.c28
-rw-r--r--drivers/net/e1000/e1000.h17
-rw-r--r--drivers/net/e1000/e1000_ethtool.c36
-rw-r--r--drivers/net/e1000/e1000_hw.c137
-rw-r--r--drivers/net/e1000/e1000_hw.h90
-rw-r--r--drivers/net/e1000/e1000_main.c488
-rw-r--r--drivers/net/e1000/e1000_osdep.h9
-rw-r--r--drivers/net/e1000/e1000_param.c98
-rw-r--r--drivers/net/forcedeth.c290
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/gianfar.c72
-rw-r--r--drivers/net/gianfar.h3
-rw-r--r--drivers/net/hydra.c23
-rw-r--r--drivers/net/lib8390.c1097
-rw-r--r--drivers/net/mac8390.c26
-rw-r--r--drivers/net/macb.c1210
-rw-r--r--drivers/net/macb.h387
-rw-r--r--drivers/net/ne-h8300.c23
-rw-r--r--drivers/net/netxen/Makefile35
-rw-r--r--drivers/net/netxen/netxen_nic.h1028
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c741
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h678
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c1010
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h482
-rw-r--r--drivers/net/netxen/netxen_nic_init.c1304
-rw-r--r--drivers/net/netxen/netxen_nic_ioctl.h77
-rw-r--r--drivers/net/netxen/netxen_nic_isr.c215
-rw-r--r--drivers/net/netxen/netxen_nic_main.c1161
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c894
-rw-r--r--drivers/net/netxen/netxen_nic_phan_reg.h215
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/broadcom.c175
-rw-r--r--drivers/net/phy/phy.c113
-rw-r--r--drivers/net/phy/phy_device.c30
-rw-r--r--drivers/net/r8169.c22
-rw-r--r--drivers/net/sk98lin/skethtool.c26
-rw-r--r--drivers/net/sk98lin/skge.c54
-rw-r--r--drivers/net/skge.c6
-rw-r--r--drivers/net/sky2.c39
-rw-r--r--drivers/net/sky2.h11
-rw-r--r--drivers/net/sundance.c58
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/tsi108_eth.c1708
-rw-r--r--drivers/net/tsi108_eth.h365
-rw-r--r--drivers/net/tulip/de2104x.c4
-rw-r--r--drivers/net/tulip/dmfe.c9
-rw-r--r--drivers/net/ucc_geth.c6
-rw-r--r--drivers/net/wan/Kconfig76
-rw-r--r--drivers/net/wireless/atmel.c36
-rw-r--r--drivers/net/wireless/atmel_cs.c74
-rw-r--r--drivers/net/wireless/atmel_pci.c10
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h32
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c207
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_power.c28
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c4
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_xmit.c18
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c8
-rw-r--r--drivers/net/wireless/ipw2100.c25
-rw-r--r--drivers/net/wireless/ipw2200.c8
-rw-r--r--drivers/net/wireless/orinoco_pci.h7
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.c17
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.h7
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c61
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h1
-rw-r--r--drivers/net/wireless/prism54/isl_oid.h48
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c13
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h11
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c30
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.h1
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c43
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.h5
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c6
-rw-r--r--drivers/net/wireless/prism54/prismcompat.h4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c38
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h104
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.c10
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.h1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c402
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h34
-rw-r--r--drivers/net/wireless/zd1211rw/zd_netdev.c13
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c19
-rw-r--r--drivers/net/zorro8390.c24
-rw-r--r--include/linux/mv643xx.h4
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/phy.h30
-rw-r--r--include/linux/wireless.h2
-rw-r--r--include/net/ieee80211.h6
-rw-r--r--net/core/dev.c9
-rw-r--r--net/ieee80211/ieee80211_module.c25
-rw-r--r--net/ieee80211/ieee80211_rx.c68
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c24
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_scan.c5
137 files changed, 24511 insertions, 3156 deletions
diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt
index 5c0a5cc03998..61b171cf5313 100644
--- a/Documentation/networking/e1000.txt
+++ b/Documentation/networking/e1000.txt
@@ -1,7 +1,7 @@
Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters
===============================================================
-November 15, 2005
+September 26, 2006
Contents
@@ -9,6 +9,7 @@ Contents
- In This Release
- Identifying Your Adapter
+- Building and Installation
- Command Line Parameters
- Speed and Duplex Configuration
- Additional Configurations
@@ -41,6 +42,9 @@ or later), lspci, and ifconfig to obtain the same information.
Instructions on updating ethtool can be found in the section "Additional
Configurations" later in this document.
+NOTE: The Intel(R) 82562v 10/100 Network Connection only provides 10/100
+support.
+
Identifying Your Adapter
========================
@@ -51,28 +55,27 @@ Driver ID Guide at:
http://support.intel.com/support/network/adapter/pro100/21397.htm
For the latest Intel network drivers for Linux, refer to the following
-website. In the search field, enter your adapter name or type, or use the
+website. In the search field, enter your adapter name or type, or use the
networking link on the left to search for your adapter:
http://downloadfinder.intel.com/scripts-df/support_intel.asp
-Command Line Parameters =======================
+Command Line Parameters
+=======================
If the driver is built as a module, the following optional parameters
-are used by entering them on the command line with the modprobe or insmod
-command using this syntax:
+are used by entering them on the command line with the modprobe command
+using this syntax:
modprobe e1000 [<option>=<VAL1>,<VAL2>,...]
- insmod e1000 [<option>=<VAL1>,<VAL2>,...]
-
For example, with two PRO/1000 PCI adapters, entering:
- insmod e1000 TxDescriptors=80,128
+ modprobe e1000 TxDescriptors=80,128
-loads the e1000 driver with 80 TX descriptors for the first adapter and 128
-TX descriptors for the second adapter.
+loads the e1000 driver with 80 TX descriptors for the first adapter and
+128 TX descriptors for the second adapter.
The default value for each parameter is generally the recommended setting,
unless otherwise noted.
@@ -87,7 +90,7 @@ NOTES: For more information about the AutoNeg, Duplex, and Speed
http://www.intel.com/design/network/applnots/ap450.htm
A descriptor describes a data buffer and attributes related to
- the data buffer. This information is accessed by the hardware.
+ the data buffer. This information is accessed by the hardware.
AutoNeg
@@ -96,9 +99,9 @@ AutoNeg
Valid Range: 0x01-0x0F, 0x20-0x2F
Default Value: 0x2F
-This parameter is a bit mask that specifies which speed and duplex
-settings the board advertises. When this parameter is used, the Speed
-and Duplex parameters must not be specified.
+This parameter is a bit-mask that specifies the speed and duplex settings
+advertised by the adapter. When this parameter is used, the Speed and
+Duplex parameters must not be specified.
NOTE: Refer to the Speed and Duplex section of this readme for more
information on the AutoNeg parameter.
@@ -110,14 +113,15 @@ Duplex
Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full)
Default Value: 0
-Defines the direction in which data is allowed to flow. Can be either
-one or two-directional. If both Duplex and the link partner are set to
-auto-negotiate, the board auto-detects the correct duplex. If the link
-partner is forced (either full or half), Duplex defaults to half-duplex.
+This defines the direction in which data is allowed to flow. Can be
+either one or two-directional. If both Duplex and the link partner are
+set to auto-negotiate, the board auto-detects the correct duplex. If the
+link partner is forced (either full or half), Duplex defaults to half-
+duplex.
FlowControl
-----------
+-----------
Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
Default Value: Reads flow control settings from the EEPROM
@@ -127,57 +131,107 @@ to Ethernet PAUSE frames.
InterruptThrottleRate
---------------------
-(not supported on Intel 82542, 82543 or 82544-based adapters)
-Valid Range: 100-100000 (0=off, 1=dynamic)
-Default Value: 8000
-
-This value represents the maximum number of interrupts per second the
-controller generates. InterruptThrottleRate is another setting used in
-interrupt moderation. Dynamic mode uses a heuristic algorithm to adjust
-InterruptThrottleRate based on the current traffic load.
+(not supported on Intel(R) 82542, 82543 or 82544-based adapters)
+Valid Range: 0,1,3,100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+Default Value: 3
+
+The driver can limit the amount of interrupts per second that the adapter
+will generate for incoming packets. It does this by writing a value to the
+adapter that is based on the maximum amount of interrupts that the adapter
+will generate per second.
+
+Setting InterruptThrottleRate to a value greater or equal to 100
+will program the adapter to send out a maximum of that many interrupts
+per second, even if more packets have come in. This reduces interrupt
+load on the system and can lower CPU utilization under heavy load,
+but will increase latency as packets are not processed as quickly.
+
+The default behaviour of the driver previously assumed a static
+InterruptThrottleRate value of 8000, providing a good fallback value for
+all traffic types,but lacking in small packet performance and latency.
+The hardware can handle many more small packets per second however, and
+for this reason an adaptive interrupt moderation algorithm was implemented.
+
+Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in which
+it dynamically adjusts the InterruptThrottleRate value based on the traffic
+that it receives. After determining the type of incoming traffic in the last
+timeframe, it will adjust the InterruptThrottleRate to an appropriate value
+for that traffic.
+
+The algorithm classifies the incoming traffic every interval into
+classes. Once the class is determined, the InterruptThrottleRate value is
+adjusted to suit that traffic type the best. There are three classes defined:
+"Bulk traffic", for large amounts of packets of normal size; "Low latency",
+for small amounts of traffic and/or a significant percentage of small
+packets; and "Lowest latency", for almost completely small packets or
+minimal traffic.
+
+In dynamic conservative mode, the InterruptThrottleRate value is set to 4000
+for traffic that falls in class "Bulk traffic". If traffic falls in the "Low
+latency" or "Lowest latency" class, the InterruptThrottleRate is increased
+stepwise to 20000. This default mode is suitable for most applications.
+
+For situations where low latency is vital such as cluster or
+grid computing, the algorithm can reduce latency even more when
+InterruptThrottleRate is set to mode 1. In this mode, which operates
+the same as mode 3, the InterruptThrottleRate will be increased stepwise to
+70000 for traffic in class "Lowest latency".
+
+Setting InterruptThrottleRate to 0 turns off any interrupt moderation
+and may improve small packet latency, but is generally not suitable
+for bulk throughput traffic.
NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and
- RxAbsIntDelay parameters. In other words, minimizing the receive
+ RxAbsIntDelay parameters. In other words, minimizing the receive
and/or transmit absolute delays does not force the controller to
generate more interrupts than what the Interrupt Throttle Rate
allows.
-CAUTION: If you are using the Intel PRO/1000 CT Network Connection
+CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection
(controller 82547), setting InterruptThrottleRate to a value
greater than 75,000, may hang (stop transmitting) adapters
- under certain network conditions. If this occurs a NETDEV
- WATCHDOG message is logged in the system event log. In
+ under certain network conditions. If this occurs a NETDEV
+ WATCHDOG message is logged in the system event log. In
addition, the controller is automatically reset, restoring
- the network connection. To eliminate the potential for the
+ the network connection. To eliminate the potential for the
hang, ensure that InterruptThrottleRate is set no greater
than 75,000 and is not set to 0.
NOTE: When e1000 is loaded with default settings and multiple adapters
are in use simultaneously, the CPU utilization may increase non-
- linearly. In order to limit the CPU utilization without impacting
+ linearly. In order to limit the CPU utilization without impacting
the overall throughput, we recommend that you load the driver as
follows:
- insmod e1000.o InterruptThrottleRate=3000,3000,3000
+ modprobe e1000 InterruptThrottleRate=3000,3000,3000
This sets the InterruptThrottleRate to 3000 interrupts/sec for
- the first, second, and third instances of the driver. The range
+ the first, second, and third instances of the driver. The range
of 2000 to 3000 interrupts per second works on a majority of
systems and is a good starting point, but the optimal value will
- be platform-specific. If CPU utilization is not a concern, use
+ be platform-specific. If CPU utilization is not a concern, use
RX_POLLING (NAPI) and default driver settings.
+
RxDescriptors
-------------
Valid Range: 80-256 for 82542 and 82543-based adapters
80-4096 for all other supported adapters
Default Value: 256
-This value specifies the number of receive descriptors allocated by the
-driver. Increasing this value allows the driver to buffer more incoming
-packets. Each descriptor is 16 bytes. A receive buffer is also
-allocated for each descriptor and is 2048.
+This value specifies the number of receive buffer descriptors allocated
+by the driver. Increasing this value allows the driver to buffer more
+incoming packets, at the expense of increased system memory utilization.
+
+Each descriptor is 16 bytes. A receive buffer is also allocated for each
+descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
+on the MTU setting. The maximum MTU size is 16110.
+
+NOTE: MTU designates the frame size. It only needs to be set for Jumbo
+ Frames. Depending on the available system resources, the request
+ for a higher number of receive descriptors may be denied. In this
+ case, use a lower number.
RxIntDelay
@@ -187,17 +241,17 @@ Default Value: 0
This value delays the generation of receive interrupts in units of 1.024
microseconds. Receive interrupt reduction can improve CPU efficiency if
-properly tuned for specific network traffic. Increasing this value adds
+properly tuned for specific network traffic. Increasing this value adds
extra latency to frame reception and can end up decreasing the throughput
-of TCP traffic. If the system is reporting dropped receives, this value
+of TCP traffic. If the system is reporting dropped receives, this value
may be set too high, causing the driver to run out of available receive
descriptors.
CAUTION: When setting RxIntDelay to a value other than 0, adapters may
- hang (stop transmitting) under certain network conditions. If
+ hang (stop transmitting) under certain network conditions. If
this occurs a NETDEV WATCHDOG message is logged in the system
- event log. In addition, the controller is automatically reset,
- restoring the network connection. To eliminate the potential
+ event log. In addition, the controller is automatically reset,
+ restoring the network connection. To eliminate the potential
for the hang ensure that RxIntDelay is set to 0.
@@ -208,7 +262,7 @@ Valid Range: 0-65535 (0=off)
Default Value: 128
This value, in units of 1.024 microseconds, limits the delay in which a
-receive interrupt is generated. Useful only if RxIntDelay is non-zero,
+receive interrupt is generated. Useful only if RxIntDelay is non-zero,
this value ensures that an interrupt is generated after the initial
packet is received within the set amount of time. Proper tuning,
along with RxIntDelay, may improve traffic throughput in specific network
@@ -222,9 +276,9 @@ Valid Settings: 0, 10, 100, 1000
Default Value: 0 (auto-negotiate at all supported speeds)
Speed forces the line speed to the specified value in megabits per second
-(Mbps). If this parameter is not specified or is set to 0 and the link
+(Mbps). If this parameter is not specified or is set to 0 and the link
partner is set to auto-negotiate, the board will auto-detect the correct
-speed. Duplex should also be set when Speed is set to either 10 or 100.
+speed. Duplex should also be set when Speed is set to either 10 or 100.
TxDescriptors
@@ -234,7 +288,7 @@ Valid Range: 80-256 for 82542 and 82543-based adapters
Default Value: 256
This value is the number of transmit descriptors allocated by the driver.
-Increasing this value allows the driver to queue more transmits. Each
+Increasing this value allows the driver to queue more transmits. Each
descriptor is 16 bytes.
NOTE: Depending on the available system resources, the request for a
@@ -248,8 +302,8 @@ Valid Range: 0-65535 (0=off)
Default Value: 64
This value delays the generation of transmit interrupts in units of
-1.024 microseconds. Transmit interrupt reduction can improve CPU
-efficiency if properly tuned for specific network traffic. If the
+1.024 microseconds. Transmit interrupt reduction can improve CPU
+efficiency if properly tuned for specific network traffic. If the
system is reporting dropped transmits, this value may be set too high
causing the driver to run out of available transmit descriptors.
@@ -261,7 +315,7 @@ Valid Range: 0-65535 (0=off)
Default Value: 64
This value, in units of 1.024 microseconds, limits the delay in which a
-transmit interrupt is generated. Useful only if TxIntDelay is non-zero,
+transmit interrupt is generated. Useful only if TxIntDelay is non-zero,
this value ensures that an interrupt is generated after the initial
packet is sent on the wire within the set amount of time. Proper tuning,
along with TxIntDelay, may improve traffic throughput in specific
@@ -288,15 +342,15 @@ fiber interface board only links at 1000 Mbps full-duplex.
For copper-based boards, the keywords interact as follows:
- The default operation is auto-negotiate. The board advertises all
+ The default operation is auto-negotiate. The board advertises all
supported speed and duplex combinations, and it links at the highest
common speed and duplex mode IF the link partner is set to auto-negotiate.
If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
is advertised (The 1000BaseT spec requires auto-negotiation.)
- If Speed = 10 or 100, then both Speed and Duplex should be set. Auto-
- negotiation is disabled, and the AutoNeg parameter is ignored. Partner
+ If Speed = 10 or 100, then both Speed and Duplex should be set. Auto-
+ negotiation is disabled, and the AutoNeg parameter is ignored. Partner
SHOULD also be forced.
The AutoNeg parameter is used when more control is required over the
@@ -304,7 +358,7 @@ auto-negotiation process. It should be used when you wish to control which
speed and duplex combinations are advertised during the auto-negotiation
process.
-The parameter may be specified as either a decimal or hexidecimal value as
+The parameter may be specified as either a decimal or hexadecimal value as
determined by the bitmap below.
Bit position 7 6 5 4 3 2 1 0
@@ -337,20 +391,19 @@ Additional Configurations
Configuring the Driver on Different Distributions
-------------------------------------------------
-
Configuring a network driver to load properly when the system is started
- is distribution dependent. Typically, the configuration process involves
+ is distribution dependent. Typically, the configuration process involves
adding an alias line to /etc/modules.conf or /etc/modprobe.conf as well
- as editing other system startup scripts and/or configuration files. Many
+ as editing other system startup scripts and/or configuration files. Many
popular Linux distributions ship with tools to make these changes for you.
To learn the proper way to configure a network device for your system,
- refer to your distribution documentation. If during this process you are
+ refer to your distribution documentation. If during this process you are
asked for the driver or module name, the name for the Linux Base Driver
- for the Intel PRO/1000 Family of Adapters is e1000.
+ for the Intel(R) PRO/1000 Family of Adapters is e1000.
As an example, if you install the e1000 driver for two PRO/1000 adapters
(eth0 and eth1) and set the speed and duplex to 10full and 100half, add
- the following to modules.conf or modprobe.conf:
+ the following to modules.conf or or modprobe.conf:
alias eth0 e1000
alias eth1 e1000
@@ -358,9 +411,8 @@ Additional Configurations
Viewing Link Messages
---------------------
-
Link messages will not be displayed to the console if the distribution is
- restricting system messages. In order to see network driver link messages
+ restricting system messages. In order to see network driver link messages
on your console, set dmesg to eight by entering the following:
dmesg -n 8
@@ -369,11 +421,9 @@ Additional Configurations
Jumbo Frames
------------
-
- The driver supports Jumbo Frames for all adapters except 82542 and
- 82573-based adapters. Jumbo Frames support is enabled by changing the
- MTU to a value larger than the default of 1500. Use the ifconfig command
- to increase the MTU size. For example:
+ Jumbo Frames support is enabled by changing the MTU to a value larger than
+ the default of 1500. Use the ifconfig command to increase the MTU size.
+ For example:
ifconfig eth<x> mtu 9000 up
@@ -390,26 +440,49 @@ Additional Configurations
- To enable Jumbo Frames, increase the MTU size on the interface beyond
1500.
- - The maximum MTU setting for Jumbo Frames is 16110. This value coincides
+
+ - The maximum MTU setting for Jumbo Frames is 16110. This value coincides
with the maximum Jumbo Frames size of 16128.
+
- Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or
loss of link.
+
- Some Intel gigabit adapters that support Jumbo Frames have a frame size
limit of 9238 bytes, with a corresponding MTU size limit of 9216 bytes.
- The adapters with this limitation are based on the Intel 82571EB and
- 82572EI controllers, which correspond to these product names:
- Intel PRO/1000 PT Dual Port Server Adapter
- Intel PRO/1000 PF Dual Port Server Adapter
- Intel PRO/1000 PT Server Adapter
- Intel PRO/1000 PT Desktop Adapter
- Intel PRO/1000 PF Server Adapter
-
- - The Intel PRO/1000 PM Network Connection does not support jumbo frames.
+ The adapters with this limitation are based on the Intel(R) 82571EB,
+ 82572EI, 82573L and 80003ES2LAN controller. These correspond to the
+ following product names:
+ Intel(R) PRO/1000 PT Server Adapter
+ Intel(R) PRO/1000 PT Desktop Adapter
+ Intel(R) PRO/1000 PT Network Connection
+ Intel(R) PRO/1000 PT Dual Port Server Adapter
+ Intel(R) PRO/1000 PT Dual Port Network Connection
+ Intel(R) PRO/1000 PF Server Adapter
+ Intel(R) PRO/1000 PF Network Connection
+ Intel(R) PRO/1000 PF Dual Port Server Adapter
+ Intel(R) PRO/1000 PB Server Connection
+ Intel(R) PRO/1000 PL Network Connection
+ Intel(R) PRO/1000 EB Network Connection with I/O Acceleration
+ Intel(R) PRO/1000 EB Backplane Connection with I/O Acceleration
+ Intel(R) PRO/1000 PT Quad Port Server Adapter
+
+ - Adapters based on the Intel(R) 82542 and 82573V/E controller do not
+ support Jumbo Frames. These correspond to the following product names:
+ Intel(R) PRO/1000 Gigabit Server Adapter
+ Intel(R) PRO/1000 PM Network Connection
+
+ - The following adapters do not support Jumbo Frames:
+ Intel(R) 82562V 10/100 Network Connection
+ Intel(R) 82566DM Gigabit Network Connection
+ Intel(R) 82566DC Gigabit Network Connection
+ Intel(R) 82566MM Gigabit Network Connection
+ Intel(R) 82566MC Gigabit Network Connection
+ Intel(R) 82562GT 10/100 Network Connection
+ Intel(R) 82562G 10/100 Network Connection
Ethtool
-------
-
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. Ethtool
version 1.6 or later is required for this functionality.
@@ -417,15 +490,14 @@ Additional Configurations
The latest release of ethtool can be found from
http://sourceforge.net/projects/gkernel.
- NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support
+ NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support
for a more complete ethtool feature set can be enabled by upgrading
ethtool to ethtool-1.8.1.
Enabling Wake on LAN* (WoL)
---------------------------
-
- WoL is configured through the Ethtool* utility. Ethtool is included with
- all versions of Red Hat after Red Hat 7.2. For other Linux distributions,
+ WoL is configured through the Ethtool* utility. Ethtool is included with
+ all versions of Red Hat after Red Hat 7.2. For other Linux distributions,
download and install Ethtool from the following website:
http://sourceforge.net/projects/gkernel.
@@ -436,11 +508,17 @@ Additional Configurations
For this driver version, in order to enable WoL, the e1000 driver must be
loaded when shutting down or rebooting the system.
+ Wake On LAN is only supported on port A for the following devices:
+ Intel(R) PRO/1000 PT Dual Port Network Connection
+ Intel(R) PRO/1000 PT Dual Port Server Connection
+ Intel(R) PRO/1000 PT Dual Port Server Adapter
+ Intel(R) PRO/1000 PF Dual Port Server Adapter
+ Intel(R) PRO/1000 PT Quad Port Server Adapter
+
NAPI
----
-
- NAPI (Rx polling mode) is supported in the e1000 driver. NAPI is enabled
- or disabled based on the configuration of the kernel. To override
+ NAPI (Rx polling mode) is supported in the e1000 driver. NAPI is enabled
+ or disabled based on the configuration of the kernel. To override
the default, use the following compile-time flags.
To enable NAPI, compile the driver module, passing in a configuration option:
@@ -457,88 +535,105 @@ Additional Configurations
Known Issues
============
- Jumbo Frames System Requirement
- -------------------------------
-
- Memory allocation failures have been observed on Linux systems with 64 MB
- of RAM or less that are running Jumbo Frames. If you are using Jumbo
- Frames, your system may require more than the advertised minimum
- requirement of 64 MB of system memory.
-
- Performance Degradation with Jumbo Frames
- -----------------------------------------
-
- Degradation in throughput performance may be observed in some Jumbo frames
- environments. If this is observed, increasing the application's socket
- buffer size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values
- may help. See the specific application manual and
- /usr/src/linux*/Documentation/
- networking/ip-sysctl.txt for more details.
-
- Jumbo frames on Foundry BigIron 8000 switch
- -------------------------------------------
- There is a known issue using Jumbo frames when connected to a Foundry
- BigIron 8000 switch. This is a 3rd party limitation. If you experience
- loss of packets, lower the MTU size.
-
- Multiple Interfaces on Same Ethernet Broadcast Network
- ------------------------------------------------------
-
- Due to the default ARP behavior on Linux, it is not possible to have
- one system on two IP networks in the same Ethernet broadcast domain
- (non-partitioned switch) behave as expected. All Ethernet interfaces
- will respond to IP traffic for any IP address assigned to the system.
- This results in unbalanced receive traffic.
-
- If you have multiple interfaces in a server, either turn on ARP
- filtering by entering:
-
- echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
- (this only works if your kernel's version is higher than 2.4.5),
-
- NOTE: This setting is not saved across reboots. The configuration
- change can be made permanent by adding the line:
- net.ipv4.conf.all.arp_filter = 1
- to the file /etc/sysctl.conf
-
- or,
-
- install the interfaces in separate broadcast domains (either in
- different switches or in a switch partitioned to VLANs).
-
- 82541/82547 can't link or are slow to link with some link partners
- -----------------------------------------------------------------
-
- There is a known compatibility issue with 82541/82547 and some
- low-end switches where the link will not be established, or will
- be slow to establish. In particular, these switches are known to
- be incompatible with 82541/82547:
-
- Planex FXG-08TE
- I-O Data ETG-SH8
-
- To workaround this issue, the driver can be compiled with an override
- of the PHY's master/slave setting. Forcing master or forcing slave
- mode will improve time-to-link.
-
- # make EXTRA_CFLAGS=-DE1000_MASTER_SLAVE=<n>
-
- Where <n> is:
-
- 0 = Hardware default
- 1 = Master mode
- 2 = Slave mode
- 3 = Auto master/slave
-
- Disable rx flow control with ethtool
- ------------------------------------
-
- In order to disable receive flow control using ethtool, you must turn
- off auto-negotiation on the same command line.
-
- For example:
-
- ethtool -A eth? autoneg off rx off
+Dropped Receive Packets on Half-duplex 10/100 Networks
+------------------------------------------------------
+If you have an Intel PCI Express adapter running at 10mbps or 100mbps, half-
+duplex, you may observe occasional dropped receive packets. There are no
+workarounds for this problem in this network configuration. The network must
+be updated to operate in full-duplex, and/or 1000mbps only.
+
+Jumbo Frames System Requirement
+-------------------------------
+Memory allocation failures have been observed on Linux systems with 64 MB
+of RAM or less that are running Jumbo Frames. If you are using Jumbo
+Frames, your system may require more than the advertised minimum
+requirement of 64 MB of system memory.
+
+Performance Degradation with Jumbo Frames
+-----------------------------------------
+Degradation in throughput performance may be observed in some Jumbo frames
+environments. If this is observed, increasing the application's socket
+buffer size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values
+may help. See the specific application manual and
+/usr/src/linux*/Documentation/
+networking/ip-sysctl.txt for more details.
+
+Jumbo Frames on Foundry BigIron 8000 switch
+-------------------------------------------
+There is a known issue using Jumbo frames when connected to a Foundry
+BigIron 8000 switch. This is a 3rd party limitation. If you experience
+loss of packets, lower the MTU size.
+
+Allocating Rx Buffers when Using Jumbo Frames
+---------------------------------------------
+Allocating Rx buffers when using Jumbo Frames on 2.6.x kernels may fail if
+the available memory is heavily fragmented. This issue may be seen with PCI-X
+adapters or with packet split disabled. This can be reduced or eliminated
+by changing the amount of available memory for receive buffer allocation, by
+increasing /proc/sys/vm/min_free_kbytes.
+
+Multiple Interfaces on Same Ethernet Broadcast Network
+------------------------------------------------------
+Due to the default ARP behavior on Linux, it is not possible to have
+one system on two IP networks in the same Ethernet broadcast domain
+(non-partitioned switch) behave as expected. All Ethernet interfaces
+will respond to IP traffic for any IP address assigned to the system.
+This results in unbalanced receive traffic.
+
+If you have multiple interfaces in a server, either turn on ARP
+filtering by entering:
+
+ echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
+(this only works if your kernel's version is higher than 2.4.5),
+
+NOTE: This setting is not saved across reboots. The configuration
+change can be made permanent by adding the line:
+ net.ipv4.conf.all.arp_filter = 1
+to the file /etc/sysctl.conf
+
+ or,
+
+install the interfaces in separate broadcast domains (either in
+different switches or in a switch partitioned to VLANs).
+
+82541/82547 can't link or are slow to link with some link partners
+-----------------------------------------------------------------
+There is a known compatibility issue with 82541/82547 and some
+low-end switches where the link will not be established, or will
+be slow to establish. In particular, these switches are known to
+be incompatible with 82541/82547:
+
+ Planex FXG-08TE
+ I-O Data ETG-SH8
+
+To workaround this issue, the driver can be compiled with an override
+of the PHY's master/slave setting. Forcing master or forcing slave
+mode will improve time-to-link.
+
+ # make CFLAGS_EXTRA=-DE1000_MASTER_SLAVE=<n>
+
+Where <n> is:
+
+ 0 = Hardware default
+ 1 = Master mode
+ 2 = Slave mode
+ 3 = Auto master/slave
+
+Disable rx flow control with ethtool
+------------------------------------
+In order to disable receive flow control using ethtool, you must turn
+off auto-negotiation on the same command line.
+
+For example:
+
+ ethtool -A eth? autoneg off rx off
+
+Unplugging network cable while ethtool -p is running
+----------------------------------------------------
+In kernel versions 2.5.50 and later (including 2.6 kernel), unplugging
+the network cable while ethtool -p is running will cause the system to
+become unresponsive to keyboard commands, except for control-alt-delete.
+Restarting the system appears to be the only remedy.
Support
@@ -548,24 +643,10 @@ For general information, go to the Intel support website at:
http://support.intel.com
- or the Intel Wired Networking project hosted by Sourceforge at:
+or the Intel Wired Networking project hosted by Sourceforge at:
http://sourceforge.net/projects/e1000
If an issue is identified with the released source code on the supported
kernel with a supported adapter, email the specific information related
-to the issue to e1000-devel@lists.sourceforge.net
-
-
-License
-=======
-
-This software program is released under the terms of a license agreement
-between you ('Licensee') and Intel. Do not use or load this software or any
-associated materials (collectively, the 'Software') until you have carefully
-read the full terms and conditions of the file COPYING located in this software
-package. By loading or using the Software, you agree to the terms of this
-Agreement. If you do not agree with the terms of this Agreement, do not
-install or use the Software.
-
-* Other names and brands may be claimed as the property of others.
+to the issue to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index 29ccae409031..0bc95eab1512 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -1,7 +1,7 @@
-------
PHY Abstraction Layer
-(Updated 2005-07-21)
+(Updated 2006-11-30)
Purpose
@@ -97,11 +97,12 @@ Letting the PHY Abstraction Layer do Everything
Next, you need to know the device name of the PHY connected to this device.
The name will look something like, "phy0:0", where the first number is the
- bus id, and the second is the PHY's address on that bus.
+ bus id, and the second is the PHY's address on that bus. Typically,
+ the bus is responsible for making its ID unique.
Now, to connect, just call this function:
- phydev = phy_connect(dev, phy_name, &adjust_link, flags);
+ phydev = phy_connect(dev, phy_name, &adjust_link, flags, interface);
phydev is a pointer to the phy_device structure which represents the PHY. If
phy_connect is successful, it will return the pointer. dev, here, is the
@@ -115,6 +116,10 @@ Letting the PHY Abstraction Layer do Everything
This is useful if the system has put hardware restrictions on
the PHY/controller, of which the PHY needs to be aware.
+ interface is a u32 which specifies the connection type used
+ between the controller and the PHY. Examples are GMII, MII,
+ RGMII, and SGMII. For a full list, see include/linux/phy.h
+
Now just make sure that phydev->supported and phydev->advertising have any
values pruned from them which don't make sense for your controller (a 10/100
controller may be connected to a gigabit capable PHY, so you would need to
@@ -191,7 +196,7 @@ Doing it all yourself
start, or disables then frees them for stop.
struct phy_device * phy_attach(struct net_device *dev, const char *phy_id,
- u32 flags);
+ u32 flags, phy_interface_t interface);
Attaches a network device to a particular PHY, binding the PHY to a generic
driver if none was found during bus initialization. Passes in
diff --git a/MAINTAINERS b/MAINTAINERS
index 846e77a78710..45df5d4e2ab3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -432,6 +432,13 @@ L: linux-atm-general@lists.sourceforge.net (subscribers-only)
W: http://linux-atm.sourceforge.net
S: Maintained
+ATMEL MACB ETHERNET DRIVER
+P: Atmel AVR32 Support Team
+M: avr32@atmel.com
+P: Haavard Skinnemoen
+M: hskinnemoen@atmel.com
+S: Supported
+
ATMEL WIRELESS DRIVER
P: Simon Kelley
M: simon@thekelleys.org.uk
@@ -2132,6 +2139,13 @@ L: netdev@vger.kernel.org
T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git
S: Maintained
+NETXEN (1/10) GbE SUPPORT
+P: Amit S. Kale
+M: amitkale@netxen.com
+L: netdev@vger.kernel.org
+W: http://www.netxen.com
+S: Supported
+
IPVS
P: Wensong Zhang
M: wensong@linux-vs.org
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index 3d1c599ac3cb..a82807641dcf 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -1,1104 +1,40 @@
-/* 8390.c: A general NS8390 ethernet driver core for linux. */
-/*
- Written 1992-94 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
-
- This is the chip-specific code for many 8390-based ethernet adaptors.
- This is not a complete driver, it must be combined with board-specific
- code such as ne.c, wd.c, 3c503.c, etc.
-
- Seeing how at least eight drivers use this code, (not counting the
- PCMCIA ones either) it is easy to break some card by what seems like
- a simple innocent change. Please contact me or Donald if you think
- you have found something that needs changing. -- PG
-
-
- Changelog:
-
- Paul Gortmaker : remove set_bit lock, other cleanups.
- Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
- ei_block_input() for eth_io_copy_and_sum().
- Paul Gortmaker : exchange static int ei_pingpong for a #define,
- also add better Tx error handling.
- Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
- Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
- Paul Gortmaker : tweak ANK's above multicast changes a bit.
- Paul Gortmaker : update packet statistics for v2.1.x
- Alan Cox : support arbitary stupid port mappings on the
- 68K Macintosh. Support >16bit I/O spaces
- Paul Gortmaker : add kmod support for auto-loading of the 8390
- module by all drivers that require it.
- Alan Cox : Spinlocking work, added 'BUG_83C690'
- Paul Gortmaker : Separate out Tx timeout code from Tx path.
- Paul Gortmaker : Remove old unused single Tx buffer code.
- Hayato Fujiwara : Add m32r support.
- Paul Gortmaker : use skb_padto() instead of stack scratch area
-
- Sources:
- The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
-
- */
+/* 8390 core for usual drivers */
static const char version[] =
"8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/jiffies.h>
-#include <linux/fs.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/bitops.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fcntl.h>
-#include <linux/in.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-#define NS8390_CORE
-#include "8390.h"
-
-#define BUG_83C690
-
-/* These are the operational function interfaces to board-specific
- routines.
- void reset_8390(struct net_device *dev)
- Resets the board associated with DEV, including a hardware reset of
- the 8390. This is only called when there is a transmit timeout, and
- it is always followed by 8390_init().
- void block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
- Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
- "page" value uses the 8390's 256-byte pages.
- void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
- Read the 4 byte, page aligned 8390 header. *If* there is a
- subsequent read, it will be of the rest of the packet.
- void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
- Read COUNT bytes from the packet buffer into the skb data area. Start
- reading from RING_OFFSET, the address as the 8390 sees it. This will always
- follow the read of the 8390 header.
-*/
-#define ei_reset_8390 (ei_local->reset_8390)
-#define ei_block_output (ei_local->block_output)
-#define ei_block_input (ei_local->block_input)
-#define ei_get_8390_hdr (ei_local->get_8390_hdr)
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
-/* Index to functions. */
-static void ei_tx_intr(struct net_device *dev);
-static void ei_tx_err(struct net_device *dev);
-static void ei_tx_timeout(struct net_device *dev);
-static void ei_receive(struct net_device *dev);
-static void ei_rx_overrun(struct net_device *dev);
-
-/* Routines generic to NS8390-based boards. */
-static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
- int start_page);
-static void set_multicast_list(struct net_device *dev);
-static void do_set_multicast_list(struct net_device *dev);
-
-/*
- * SMP and the 8390 setup.
- *
- * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
- * a page register that controls bank and packet buffer access. We guard
- * this with ei_local->page_lock. Nobody should assume or set the page other
- * than zero when the lock is not held. Lock holders must restore page 0
- * before unlocking. Even pure readers must take the lock to protect in
- * page 0.
- *
- * To make life difficult the chip can also be very slow. We therefore can't
- * just use spinlocks. For the longer lockups we disable the irq the device
- * sits on and hold the lock. We must hold the lock because there is a dual
- * processor case other than interrupts (get stats/set multicast list in
- * parallel with each other and transmit).
- *
- * Note: in theory we can just disable the irq on the card _but_ there is
- * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
- * enter lock, take the queued irq. So we waddle instead of flying.
- *
- * Finally by special arrangement for the purpose of being generally
- * annoying the transmit function is called bh atomic. That places
- * restrictions on the user context callers as disable_irq won't save
- * them.
- */
-
-
+#include "lib8390.c"
-/**
- * ei_open - Open/initialize the board.
- * @dev: network device to initialize
- *
- * This routine goes all-out, setting everything
- * up anew at each open, even though many of these registers should only
- * need to be set once at boot.
- */
int ei_open(struct net_device *dev)
{
- unsigned long flags;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-
- /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout
- wrapper that does e.g. media check & then calls ei_tx_timeout. */
- if (dev->tx_timeout == NULL)
- dev->tx_timeout = ei_tx_timeout;
- if (dev->watchdog_timeo <= 0)
- dev->watchdog_timeo = TX_TIMEOUT;
-
- /*
- * Grab the page lock so we own the register set, then call
- * the init function.
- */
-
- spin_lock_irqsave(&ei_local->page_lock, flags);
- NS8390_init(dev, 1);
- /* Set the flag before we drop the lock, That way the IRQ arrives
- after its set and we get no silly warnings */
- netif_start_queue(dev);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
- ei_local->irqlock = 0;
- return 0;
+ return __ei_open(dev);
}
-/**
- * ei_close - shut down network device
- * @dev: network device to close
- *
- * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
- */
int ei_close(struct net_device *dev)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- unsigned long flags;
-
- /*
- * Hold the page lock during close
- */
-
- spin_lock_irqsave(&ei_local->page_lock, flags);
- NS8390_init(dev, 0);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
- netif_stop_queue(dev);
- return 0;
-}
-
-/**
- * ei_tx_timeout - handle transmit time out condition
- * @dev: network device which has apparently fallen asleep
- *
- * Called by kernel when device never acknowledges a transmit has
- * completed (or failed) - i.e. never posted a Tx related interrupt.
- */
-
-void ei_tx_timeout(struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- int txsr, isr, tickssofar = jiffies - dev->trans_start;
- unsigned long flags;
-
-#if defined(CONFIG_M32R) && defined(CONFIG_SMP)
- unsigned long icucr;
-
- local_irq_save(flags);
- icucr = inl(M32R_ICU_CR1_PORTL);
- icucr |= M32R_ICUCR_ISMOD11;
- outl(icucr, M32R_ICU_CR1_PORTL);
- local_irq_restore(flags);
-#endif
- ei_local->stat.tx_errors++;
-
- spin_lock_irqsave(&ei_local->page_lock, flags);
- txsr = inb(e8390_base+EN0_TSR);
- isr = inb(e8390_base+EN0_ISR);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
-
- printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
- dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
- (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
-
- if (!isr && !ei_local->stat.tx_packets)
- {
- /* The 8390 probably hasn't gotten on the cable yet. */
- ei_local->interface_num ^= 1; /* Try a different xcvr. */
- }
-
- /* Ugly but a reset can be slow, yet must be protected */
-
- disable_irq_nosync_lockdep(dev->irq);
- spin_lock(&ei_local->page_lock);
-
- /* Try to restart the card. Perhaps the user has fixed something. */
- ei_reset_8390(dev);
- NS8390_init(dev, 1);
-
- spin_unlock(&ei_local->page_lock);
- enable_irq_lockdep(dev->irq);
- netif_wake_queue(dev);
-}
-
-/**
- * ei_start_xmit - begin packet transmission
- * @skb: packet to be sent
- * @dev: network device to which packet is sent
- *
- * Sends a packet to an 8390 network device.
- */
-
-static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- int send_length = skb->len, output_page;
- unsigned long flags;
- char buf[ETH_ZLEN];
- char *data = skb->data;
-
- if (skb->len < ETH_ZLEN) {
- memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */
- memcpy(buf, data, skb->len);
- send_length = ETH_ZLEN;
- data = buf;
- }
-
- /* Mask interrupts from the ethercard.
- SMP: We have to grab the lock here otherwise the IRQ handler
- on another CPU can flip window and race the IRQ mask set. We end
- up trashing the mcast filter not disabling irqs if we don't lock */
-
- spin_lock_irqsave(&ei_local->page_lock, flags);
- outb_p(0x00, e8390_base + EN0_IMR);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
-
-
- /*
- * Slow phase with lock held.
- */
-
- disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
-
- spin_lock(&ei_local->page_lock);
-
- ei_local->irqlock = 1;
-
- /*
- * We have two Tx slots available for use. Find the first free
- * slot, and then perform some sanity checks. With two Tx bufs,
- * you get very close to transmitting back-to-back packets. With
- * only one Tx buf, the transmitter sits idle while you reload the
- * card, leaving a substantial gap between each transmitted packet.
- */
-
- if (ei_local->tx1 == 0)
- {
- output_page = ei_local->tx_start_page;
- ei_local->tx1 = send_length;
- if (ei_debug && ei_local->tx2 > 0)
- printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
- dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
- }
- else if (ei_local->tx2 == 0)
- {
- output_page = ei_local->tx_start_page + TX_PAGES/2;
- ei_local->tx2 = send_length;
- if (ei_debug && ei_local->tx1 > 0)
- printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
- dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
- }
- else
- { /* We should never get here. */
- if (ei_debug)
- printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
- ei_local->irqlock = 0;
- netif_stop_queue(dev);
- outb_p(ENISR_ALL, e8390_base + EN0_IMR);
- spin_unlock(&ei_local->page_lock);
- enable_irq_lockdep_irqrestore(dev->irq, &flags);
- ei_local->stat.tx_errors++;
- return 1;
- }
-
- /*
- * Okay, now upload the packet and trigger a send if the transmitter
- * isn't already sending. If it is busy, the interrupt handler will
- * trigger the send later, upon receiving a Tx done interrupt.
- */
-
- ei_block_output(dev, send_length, data, output_page);
-
- if (! ei_local->txing)
- {
- ei_local->txing = 1;
- NS8390_trigger_send(dev, send_length, output_page);
- dev->trans_start = jiffies;
- if (output_page == ei_local->tx_start_page)
- {
- ei_local->tx1 = -1;
- ei_local->lasttx = -1;
- }
- else
- {
- ei_local->tx2 = -1;
- ei_local->lasttx = -2;
- }
- }
- else ei_local->txqueue++;
-
- if (ei_local->tx1 && ei_local->tx2)
- netif_stop_queue(dev);
- else
- netif_start_queue(dev);
-
- /* Turn 8390 interrupts back on. */
- ei_local->irqlock = 0;
- outb_p(ENISR_ALL, e8390_base + EN0_IMR);
-
- spin_unlock(&ei_local->page_lock);
- enable_irq_lockdep_irqrestore(dev->irq, &flags);
-
- dev_kfree_skb (skb);
- ei_local->stat.tx_bytes += send_length;
-
- return 0;
+ return __ei_close(dev);
}
-/**
- * ei_interrupt - handle the interrupts from an 8390
- * @irq: interrupt number
- * @dev_id: a pointer to the net_device
- *
- * Handle the ether interface interrupts. We pull packets from
- * the 8390 via the card specific functions and fire them at the networking
- * stack. We also handle transmit completions and wake the transmit path if
- * necessary. We also update the counters and do other housekeeping as
- * needed.
- */
-
irqreturn_t ei_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = dev_id;
- long e8390_base;
- int interrupts, nr_serviced = 0;
- struct ei_device *ei_local;
-
- e8390_base = dev->base_addr;
- ei_local = netdev_priv(dev);
-
- /*
- * Protect the irq test too.
- */
-
- spin_lock(&ei_local->page_lock);
-
- if (ei_local->irqlock)
- {
-#if 1 /* This might just be an interrupt for a PCI device sharing this line */
- /* The "irqlock" check is only for testing. */
- printk(ei_local->irqlock
- ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
- : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
- dev->name, inb_p(e8390_base + EN0_ISR),
- inb_p(e8390_base + EN0_IMR));
-#endif
- spin_unlock(&ei_local->page_lock);
- return IRQ_NONE;
- }
-
- /* Change to page 0 and read the intr status reg. */
- outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
- if (ei_debug > 3)
- printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
- inb_p(e8390_base + EN0_ISR));
-
- /* !!Assumption!! -- we stay in page 0. Don't break this. */
- while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
- && ++nr_serviced < MAX_SERVICE)
- {
- if (!netif_running(dev)) {
- printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
- /* rmk - acknowledge the interrupts */
- outb_p(interrupts, e8390_base + EN0_ISR);
- interrupts = 0;
- break;
- }
- if (interrupts & ENISR_OVER)
- ei_rx_overrun(dev);
- else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
- {
- /* Got a good (?) packet. */
- ei_receive(dev);
- }
- /* Push the next to-transmit packet through. */
- if (interrupts & ENISR_TX)
- ei_tx_intr(dev);
- else if (interrupts & ENISR_TX_ERR)
- ei_tx_err(dev);
-
- if (interrupts & ENISR_COUNTERS)
- {
- ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
- ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
- ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
- outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
- }
-
- /* Ignore any RDC interrupts that make it back to here. */
- if (interrupts & ENISR_RDC)
- {
- outb_p(ENISR_RDC, e8390_base + EN0_ISR);
- }
-
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
- }
-
- if (interrupts && ei_debug)
- {
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
- if (nr_serviced >= MAX_SERVICE)
- {
- /* 0xFF is valid for a card removal */
- if(interrupts!=0xFF)
- printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
- dev->name, interrupts);
- outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
- } else {
- printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
- outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
- }
- }
- spin_unlock(&ei_local->page_lock);
- return IRQ_RETVAL(nr_serviced > 0);
+ return __ei_interrupt(irq, dev_id);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
void ei_poll(struct net_device *dev)
{
- disable_irq_lockdep(dev->irq);
- ei_interrupt(dev->irq, dev);
- enable_irq_lockdep(dev->irq);
+ __ei_poll(dev);
}
#endif
-/**
- * ei_tx_err - handle transmitter error
- * @dev: network device which threw the exception
- *
- * A transmitter error has happened. Most likely excess collisions (which
- * is a fairly normal condition). If the error is one where the Tx will
- * have been aborted, we try and send another one right away, instead of
- * letting the failed packet sit and collect dust in the Tx buffer. This
- * is a much better solution as it avoids kernel based Tx timeouts, and
- * an unnecessary card reset.
- *
- * Called with lock held.
- */
-
-static void ei_tx_err(struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- unsigned char txsr = inb_p(e8390_base+EN0_TSR);
- unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
-
-#ifdef VERBOSE_ERROR_DUMP
- printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
- if (txsr & ENTSR_ABT)
- printk("excess-collisions ");
- if (txsr & ENTSR_ND)
- printk("non-deferral ");
- if (txsr & ENTSR_CRS)
- printk("lost-carrier ");
- if (txsr & ENTSR_FU)
- printk("FIFO-underrun ");
- if (txsr & ENTSR_CDH)
- printk("lost-heartbeat ");
- printk("\n");
-#endif
-
- outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
-
- if (tx_was_aborted)
- ei_tx_intr(dev);
- else
- {
- ei_local->stat.tx_errors++;
- if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
- if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
- if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++;
- }
-}
-
-/**
- * ei_tx_intr - transmit interrupt handler
- * @dev: network device for which tx intr is handled
- *
- * We have finished a transmit: check for errors and then trigger the next
- * packet to be sent. Called with lock held.
- */
-
-static void ei_tx_intr(struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- int status = inb(e8390_base + EN0_TSR);
-
- outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
-
- /*
- * There are two Tx buffers, see which one finished, and trigger
- * the send of another one if it exists.
- */
- ei_local->txqueue--;
-
- if (ei_local->tx1 < 0)
- {
- if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
- printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
- ei_local->name, ei_local->lasttx, ei_local->tx1);
- ei_local->tx1 = 0;
- if (ei_local->tx2 > 0)
- {
- ei_local->txing = 1;
- NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
- dev->trans_start = jiffies;
- ei_local->tx2 = -1,
- ei_local->lasttx = 2;
- }
- else ei_local->lasttx = 20, ei_local->txing = 0;
- }
- else if (ei_local->tx2 < 0)
- {
- if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
- printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
- ei_local->name, ei_local->lasttx, ei_local->tx2);
- ei_local->tx2 = 0;
- if (ei_local->tx1 > 0)
- {
- ei_local->txing = 1;
- NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
- dev->trans_start = jiffies;
- ei_local->tx1 = -1;
- ei_local->lasttx = 1;
- }
- else
- ei_local->lasttx = 10, ei_local->txing = 0;
- }
-// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
-// dev->name, ei_local->lasttx);
-
- /* Minimize Tx latency: update the statistics after we restart TXing. */
- if (status & ENTSR_COL)
- ei_local->stat.collisions++;
- if (status & ENTSR_PTX)
- ei_local->stat.tx_packets++;
- else
- {
- ei_local->stat.tx_errors++;
- if (status & ENTSR_ABT)
- {
- ei_local->stat.tx_aborted_errors++;
- ei_local->stat.collisions += 16;
- }
- if (status & ENTSR_CRS)
- ei_local->stat.tx_carrier_errors++;
- if (status & ENTSR_FU)
- ei_local->stat.tx_fifo_errors++;
- if (status & ENTSR_CDH)
- ei_local->stat.tx_heartbeat_errors++;
- if (status & ENTSR_OWC)
- ei_local->stat.tx_window_errors++;
- }
- netif_wake_queue(dev);
-}
-
-/**
- * ei_receive - receive some packets
- * @dev: network device with which receive will be run
- *
- * We have a good packet(s), get it/them out of the buffers.
- * Called with lock held.
- */
-
-static void ei_receive(struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- unsigned char rxing_page, this_frame, next_frame;
- unsigned short current_offset;
- int rx_pkt_count = 0;
- struct e8390_pkt_hdr rx_frame;
- int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
-
- while (++rx_pkt_count < 10)
- {
- int pkt_len, pkt_stat;
-
- /* Get the rx page (incoming packet pointer). */
- outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
- rxing_page = inb_p(e8390_base + EN1_CURPAG);
- outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
-
- /* Remove one frame from the ring. Boundary is always a page behind. */
- this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
- if (this_frame >= ei_local->stop_page)
- this_frame = ei_local->rx_start_page;
-
- /* Someday we'll omit the previous, iff we never get this message.
- (There is at least one clone claimed to have a problem.)
-
- Keep quiet if it looks like a card removal. One problem here
- is that some clones crash in roughly the same way.
- */
- if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
- printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
- dev->name, this_frame, ei_local->current_page);
-
- if (this_frame == rxing_page) /* Read all the frames? */
- break; /* Done for now */
-
- current_offset = this_frame << 8;
- ei_get_8390_hdr(dev, &rx_frame, this_frame);
-
- pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
- pkt_stat = rx_frame.status;
-
- next_frame = this_frame + 1 + ((pkt_len+4)>>8);
-
- /* Check for bogosity warned by 3c503 book: the status byte is never
- written. This happened a lot during testing! This code should be
- cleaned up someday. */
- if (rx_frame.next != next_frame
- && rx_frame.next != next_frame + 1
- && rx_frame.next != next_frame - num_rx_pages
- && rx_frame.next != next_frame + 1 - num_rx_pages) {
- ei_local->current_page = rxing_page;
- outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
- ei_local->stat.rx_errors++;
- continue;
- }
-
- if (pkt_len < 60 || pkt_len > 1518)
- {
- if (ei_debug)
- printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
- dev->name, rx_frame.count, rx_frame.status,
- rx_frame.next);
- ei_local->stat.rx_errors++;
- ei_local->stat.rx_length_errors++;
- }
- else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
- {
- struct sk_buff *skb;
-
- skb = dev_alloc_skb(pkt_len+2);
- if (skb == NULL)
- {
- if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
- dev->name, pkt_len);
- ei_local->stat.rx_dropped++;
- break;
- }
- else
- {
- skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
- skb->dev = dev;
- skb_put(skb, pkt_len); /* Make room */
- ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->last_rx = jiffies;
- ei_local->stat.rx_packets++;
- ei_local->stat.rx_bytes += pkt_len;
- if (pkt_stat & ENRSR_PHY)
- ei_local->stat.multicast++;
- }
- }
- else
- {
- if (ei_debug)
- printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- dev->name, rx_frame.status, rx_frame.next,
- rx_frame.count);
- ei_local->stat.rx_errors++;
- /* NB: The NIC counts CRC, frame and missed errors. */
- if (pkt_stat & ENRSR_FO)
- ei_local->stat.rx_fifo_errors++;
- }
- next_frame = rx_frame.next;
-
- /* This _should_ never happen: it's here for avoiding bad clones. */
- if (next_frame >= ei_local->stop_page) {
- printk("%s: next frame inconsistency, %#2x\n", dev->name,
- next_frame);
- next_frame = ei_local->rx_start_page;
- }
- ei_local->current_page = next_frame;
- outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
- }
-
- /* We used to also ack ENISR_OVER here, but that would sometimes mask
- a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
- outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
- return;
-}
-
-/**
- * ei_rx_overrun - handle receiver overrun
- * @dev: network device which threw exception
- *
- * We have a receiver overrun: we have to kick the 8390 to get it started
- * again. Problem is that you have to kick it exactly as NS prescribes in
- * the updated datasheets, or "the NIC may act in an unpredictable manner."
- * This includes causing "the NIC to defer indefinitely when it is stopped
- * on a busy network." Ugh.
- * Called with lock held. Don't call this with the interrupts off or your
- * computer will hate you - it takes 10ms or so.
- */
-
-static void ei_rx_overrun(struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- unsigned char was_txing, must_resend = 0;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-
- /*
- * Record whether a Tx was in progress and then issue the
- * stop command.
- */
- was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
-
- if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
- ei_local->stat.rx_over_errors++;
-
- /*
- * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
- * Early datasheets said to poll the reset bit, but now they say that
- * it "is not a reliable indicator and subsequently should be ignored."
- * We wait at least 10ms.
- */
-
- mdelay(10);
-
- /*
- * Reset RBCR[01] back to zero as per magic incantation.
- */
- outb_p(0x00, e8390_base+EN0_RCNTLO);
- outb_p(0x00, e8390_base+EN0_RCNTHI);
-
- /*
- * See if any Tx was interrupted or not. According to NS, this
- * step is vital, and skipping it will cause no end of havoc.
- */
-
- if (was_txing)
- {
- unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
- if (!tx_completed)
- must_resend = 1;
- }
-
- /*
- * Have to enter loopback mode and then restart the NIC before
- * you are allowed to slurp packets up off the ring.
- */
- outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
- outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
-
- /*
- * Clear the Rx ring of all the debris, and ack the interrupt.
- */
- ei_receive(dev);
- outb_p(ENISR_OVER, e8390_base+EN0_ISR);
-
- /*
- * Leave loopback mode, and resend any packet that got stopped.
- */
- outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
- if (must_resend)
- outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
-}
-
-/*
- * Collect the stats. This is called unlocked and from several contexts.
- */
-
-static struct net_device_stats *get_stats(struct net_device *dev)
-{
- long ioaddr = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- unsigned long flags;
-
- /* If the card is stopped, just return the present stats. */
- if (!netif_running(dev))
- return &ei_local->stat;
-
- spin_lock_irqsave(&ei_local->page_lock,flags);
- /* Read the counter registers, assuming we are in page 0. */
- ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
- ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
- ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
-
- return &ei_local->stat;
-}
-
-/*
- * Form the 64 bit 8390 multicast table from the linked list of addresses
- * associated with this dev structure.
- */
-
-static inline void make_mc_bits(u8 *bits, struct net_device *dev)
-{
- struct dev_mc_list *dmi;
-
- for (dmi=dev->mc_list; dmi; dmi=dmi->next)
- {
- u32 crc;
- if (dmi->dmi_addrlen != ETH_ALEN)
- {
- printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
- continue;
- }
- crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
- /*
- * The 8390 uses the 6 most significant bits of the
- * CRC to index the multicast table.
- */
- bits[crc>>29] |= (1<<((crc>>26)&7));
- }
-}
-
-/**
- * do_set_multicast_list - set/clear multicast filter
- * @dev: net device for which multicast filter is adjusted
- *
- * Set or clear the multicast filter for this adaptor. May be called
- * from a BH in 2.1.x. Must be called with lock held.
- */
-
-static void do_set_multicast_list(struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- int i;
- struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
-
- if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
- {
- memset(ei_local->mcfilter, 0, 8);
- if (dev->mc_list)
- make_mc_bits(ei_local->mcfilter, dev);
- }
- else
- memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
-
- /*
- * DP8390 manuals don't specify any magic sequence for altering
- * the multicast regs on an already running card. To be safe, we
- * ensure multicast mode is off prior to loading up the new hash
- * table. If this proves to be not enough, we can always resort
- * to stopping the NIC, loading the table and then restarting.
- *
- * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC
- * Elite16) appear to be write-only. The NS 8390 data sheet lists
- * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and
- * Ultra32 EISA) appears to have this bug fixed.
- */
-
- if (netif_running(dev))
- outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
- outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
- for(i = 0; i < 8; i++)
- {
- outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
-#ifndef BUG_83C690
- if(inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i])
- printk(KERN_ERR "Multicast filter read/write mismap %d\n",i);
-#endif
- }
- outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
-
- if(dev->flags&IFF_PROMISC)
- outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
- else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
- outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
- else
- outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
- }
-
-/*
- * Called without lock held. This is invoked from user context and may
- * be parallel to just about everything else. Its also fairly quick and
- * not called too often. Must protect against both bh and irq users
- */
-
-static void set_multicast_list(struct net_device *dev)
-{
- unsigned long flags;
- struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
-
- spin_lock_irqsave(&ei_local->page_lock, flags);
- do_set_multicast_list(dev);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
-}
-
-/**
- * ethdev_setup - init rest of 8390 device struct
- * @dev: network device structure to init
- *
- * Initialize the rest of the 8390 device structure. Do NOT __init
- * this, as it is used by 8390 based modular drivers too.
- */
-
-static void ethdev_setup(struct net_device *dev)
-{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- if (ei_debug > 1)
- printk(version);
-
- dev->hard_start_xmit = &ei_start_xmit;
- dev->get_stats = get_stats;
- dev->set_multicast_list = &set_multicast_list;
-
- ether_setup(dev);
-
- spin_lock_init(&ei_local->page_lock);
-}
-
-/**
- * alloc_ei_netdev - alloc_etherdev counterpart for 8390
- * @size: extra bytes to allocate
- *
- * Allocate 8390-specific net_device.
- */
struct net_device *__alloc_ei_netdev(int size)
{
- return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
- ethdev_setup);
+ return ____alloc_ei_netdev(size);
}
-
-
-
-/* This page of functions should be 8390 generic */
-/* Follow National Semi's recommendations for initializing the "NIC". */
-
-/**
- * NS8390_init - initialize 8390 hardware
- * @dev: network device to initialize
- * @startp: boolean. non-zero value to initiate chip processing
- *
- * Must be called with lock held.
- */
-
void NS8390_init(struct net_device *dev, int startp)
{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- int i;
- int endcfg = ei_local->word16
- ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
- : 0x48;
-
- if(sizeof(struct e8390_pkt_hdr)!=4)
- panic("8390.c: header struct mispacked\n");
- /* Follow National Semi's recommendations for initing the DP83902. */
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
- outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
- /* Clear the remote byte count registers. */
- outb_p(0x00, e8390_base + EN0_RCNTLO);
- outb_p(0x00, e8390_base + EN0_RCNTHI);
- /* Set to monitor and loopback mode -- this is vital!. */
- outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
- outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
- /* Set the transmit page and receive ring. */
- outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
- ei_local->tx1 = ei_local->tx2 = 0;
- outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
- outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
- ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
- outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
- /* Clear the pending interrupts and mask. */
- outb_p(0xFF, e8390_base + EN0_ISR);
- outb_p(0x00, e8390_base + EN0_IMR);
-
- /* Copy the station address into the DS8390 registers. */
-
- outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
- for(i = 0; i < 6; i++)
- {
- outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
- if (ei_debug > 1 && inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
- printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
- }
-
- outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
-
- netif_start_queue(dev);
- ei_local->tx1 = ei_local->tx2 = 0;
- ei_local->txing = 0;
-
- if (startp)
- {
- outb_p(0xff, e8390_base + EN0_ISR);
- outb_p(ENISR_ALL, e8390_base + EN0_IMR);
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
- outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
- /* 3c503 TechMan says rxconfig only after the NIC is started. */
- outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
- do_set_multicast_list(dev); /* (re)load the mcast table */
- }
-}
-
-/* Trigger a transmit start, assuming the length is valid.
- Always called with the page lock held */
-
-static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
- int start_page)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
-
- outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
-
- if (inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
- {
- printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
- dev->name);
- return;
- }
- outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
- outb_p(length >> 8, e8390_base + EN0_TCNTHI);
- outb_p(start_page, e8390_base + EN0_TPSR);
- outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
+ return __NS8390_init(dev, startp);
}
EXPORT_SYMBOL(ei_open);
diff --git a/drivers/net/8390.h b/drivers/net/8390.h
index f44f1220b3a5..414de5bd228f 100644
--- a/drivers/net/8390.h
+++ b/drivers/net/8390.h
@@ -107,35 +107,14 @@ struct ei_device {
* - removed AMIGA_PCMCIA from this list, handled as ISA io now
*/
-#if defined(CONFIG_MAC) || \
- defined(CONFIG_ZORRO8390) || defined(CONFIG_ZORRO8390_MODULE) || \
- defined(CONFIG_HYDRA) || defined(CONFIG_HYDRA_MODULE)
-#define EI_SHIFT(x) (ei_local->reg_offset[x])
-#undef inb
-#undef inb_p
-#undef outb
-#undef outb_p
-
-#define inb(port) in_8(port)
-#define outb(val,port) out_8(port,val)
-#define inb_p(port) in_8(port)
-#define outb_p(val,port) out_8(port,val)
-
-#elif defined(CONFIG_ARM_ETHERH) || defined(CONFIG_ARM_ETHERH_MODULE)
-#define EI_SHIFT(x) (ei_local->reg_offset[x])
-#undef inb
-#undef inb_p
-#undef outb
-#undef outb_p
-
-#define inb(_p) readb(_p)
-#define outb(_v,_p) writeb(_v,_p)
-#define inb_p(_p) inb(_p)
-#define outb_p(_v,_p) outb(_v,_p)
-
-#elif defined(CONFIG_NE_H8300) || defined(CONFIG_NE_H8300_MODULE)
-#define EI_SHIFT(x) (ei_local->reg_offset[x])
-#else
+#ifndef ei_inb
+#define ei_inb(_p) inb(_p)
+#define ei_outb(_v,_p) outb(_v,_p)
+#define ei_inb_p(_p) inb_p(_p)
+#define ei_outb_p(_v,_p) outb_p(_v,_p)
+#endif
+
+#ifndef EI_SHIFT
#define EI_SHIFT(x) (x)
#endif
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d3abf80ea3e2..9de0eed6755b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -188,6 +188,17 @@ config MII
or internal device. It is safe to say Y or M here even if your
ethernet card lack MII.
+config MACB
+ tristate "Atmel MACB support"
+ depends on NET_ETHERNET && AVR32
+ select MII
+ help
+ The Atmel MACB ethernet interface is found on many AT32 and AT91
+ parts. Say Y to include support for the MACB chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called macb.
+
source "drivers/net/arm/Kconfig"
config MACE
@@ -2251,6 +2262,14 @@ config SPIDER_NET
This driver supports the Gigabit Ethernet chips present on the
Cell Processor-Based Blades from IBM.
+config TSI108_ETH
+ tristate "Tundra TSI108 gigabit Ethernet support"
+ depends on TSI108_BRIDGE
+ help
+ This driver supports Tundra TSI108 gigabit Ethernet ports.
+ To compile this driver as a module, choose M here: the module
+ will be called tsi108_eth.
+
config GIANFAR
tristate "Gianfar Ethernet"
depends on 85xx || 83xx || PPC_86xx
@@ -2341,10 +2360,11 @@ menu "Ethernet (10000 Mbit)"
config CHELSIO_T1
tristate "Chelsio 10Gb Ethernet support"
depends on PCI
+ select CRC32
help
- This driver supports Chelsio N110 and N210 models 10Gb Ethernet
- cards. More information about adapter features and performance
- tuning is in <file:Documentation/networking/cxgb.txt>.
+ This driver supports Chelsio gigabit and 10-gigabit
+ Ethernet cards. More information about adapter features and
+ performance tuning is in <file:Documentation/networking/cxgb.txt>.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
@@ -2357,6 +2377,13 @@ config CHELSIO_T1
To compile this driver as a module, choose M here: the module
will be called cxgb.
+config CHELSIO_T1_1G
+ bool "Chelsio gigabit Ethernet support"
+ depends on CHELSIO_T1
+ help
+ Enables support for Chelsio's gigabit Ethernet PCI cards. If you
+ are using only 10G cards say 'N' here.
+
config EHEA
tristate "eHEA Ethernet support"
depends on IBMEBUS
@@ -2447,6 +2474,12 @@ config MYRI10GE
<file:Documentation/networking/net-modules.txt>. The module
will be called myri10ge.
+config NETXEN_NIC
+ tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
+ depends on PCI
+ help
+ This enables the support for NetXen's Gigabit Ethernet card.
+
endmenu
source "drivers/net/tokenring/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f270bc49e571..4c0d4e5ce42b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -82,7 +82,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o
obj-$(CONFIG_NET) += Space.o loopback.o
obj-$(CONFIG_SEEQ8005) += seeq8005.o
obj-$(CONFIG_NET_SB1000) += sb1000.o
-obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
+obj-$(CONFIG_MAC8390) += mac8390.o
obj-$(CONFIG_APNE) += apne.o 8390.o
obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
obj-$(CONFIG_SHAPER) += shaper.o
@@ -90,7 +90,6 @@ obj-$(CONFIG_HP100) += hp100.o
obj-$(CONFIG_SMC9194) += smc9194.o
obj-$(CONFIG_FEC) += fec.o
obj-$(CONFIG_68360_ENET) += 68360enet.o
-obj-$(CONFIG_ARM_ETHERH) += 8390.o
obj-$(CONFIG_WD80x3) += wd.o 8390.o
obj-$(CONFIG_EL2) += 3c503.o 8390.o
obj-$(CONFIG_NE2000) += ne.o 8390.o
@@ -107,8 +106,9 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o
obj-$(CONFIG_NET_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_FORCEDETH) += forcedeth.o
-obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
+obj-$(CONFIG_NE_H8300) += ne-h8300.o
+obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
obj-$(CONFIG_QLA3XXX) += qla3xxx.o
@@ -165,7 +165,7 @@ obj-$(CONFIG_BVME6000_NET) += 82596.o
obj-$(CONFIG_LP486E) += lp486e.o
obj-$(CONFIG_ETH16I) += eth16i.o
-obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
+obj-$(CONFIG_ZORRO8390) += zorro8390.o
obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
obj-$(CONFIG_EQUALIZER) += eql.o
@@ -178,7 +178,7 @@ obj-$(CONFIG_ATARILANCE) += atarilance.o
obj-$(CONFIG_ATARI_BIONET) += atari_bionet.o
obj-$(CONFIG_ATARI_PAMSNET) += atari_pamsnet.o
obj-$(CONFIG_A2065) += a2065.o
-obj-$(CONFIG_HYDRA) += hydra.o 8390.o
+obj-$(CONFIG_HYDRA) += hydra.o
obj-$(CONFIG_ARIADNE) += ariadne.o
obj-$(CONFIG_CS89x0) += cs89x0.o
obj-$(CONFIG_MACSONIC) += macsonic.o
@@ -197,6 +197,8 @@ obj-$(CONFIG_SMC911X) += smc911x.o
obj-$(CONFIG_DM9000) += dm9000.o
obj-$(CONFIG_FEC_8XX) += fec_8xx/
+obj-$(CONFIG_MACB) += macb.o
+
obj-$(CONFIG_ARM) += arm/
obj-$(CONFIG_DEV_APPLETALK) += appletalk/
obj-$(CONFIG_TR) += tokenring/
@@ -214,3 +216,4 @@ obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-$(CONFIG_FS_ENET) += fs_enet/
+obj-$(CONFIG_NETXEN_NIC) += netxen/
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index ef65e5917c8f..18896f24d407 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1490,32 +1490,7 @@ static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
buf[12] = readl(mmio + STAT0);
}
-/*
-amd8111e crc generator implementation is different from the kernel
-ether_crc() function.
-*/
-static int amd8111e_ether_crc(int len, char* mac_addr)
-{
- int i,byte;
- unsigned char octet;
- u32 crc= INITCRC;
-
- for(byte=0; byte < len; byte++){
- octet = mac_addr[byte];
- for( i=0;i < 8; i++){
- /*If the next bit form the input stream is 1,subtract the divisor (CRC32) from the dividend(crc).*/
- if( (octet & 0x1) ^ (crc & 0x1) ){
- crc >>= 1;
- crc ^= CRC32;
- }
- else
- crc >>= 1;
- octet >>= 1;
- }
- }
- return crc;
-}
/*
This function sets promiscuos mode, all-multi mode or the multicast address
list to the device.
@@ -1556,7 +1531,7 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
mc_filter[1] = mc_filter[0] = 0;
for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
i++, mc_ptr = mc_ptr->next) {
- bit_num = ( amd8111e_ether_crc(ETH_ALEN,mc_ptr->dmi_addr) >> 26 ) & 0x3f;
+ bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
}
amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index 7727d328f65e..2007510c4eb6 100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
@@ -651,10 +651,6 @@ typedef enum {
/* driver ioctl parameters */
#define AMD8111E_REG_DUMP_LEN 13*sizeof(u32)
-/* crc generator constants */
-#define CRC32 0xedb88320
-#define INITCRC 0xFFFFFFFF
-
/* amd8111e desriptor format */
struct amd8111e_tx_dr{
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 4ae98970b282..f3faa4fe58e7 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -52,7 +52,12 @@
#include <asm/ecard.h>
#include <asm/io.h>
-#include "../8390.h"
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+
+#define ei_inb(_p) readb((void __iomem *)_p)
+#define ei_outb(_v,_p) writeb(_v,(void __iomem *)_p)
+#define ei_inb_p(_p) readb((void __iomem *)_p)
+#define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p)
#define NET_DEBUG 0
#define DEBUG_INIT 2
@@ -60,6 +65,11 @@
#define DRV_NAME "etherh"
#define DRV_VERSION "1.11"
+static char version[] __initdata =
+ "EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
+
+#include "../lib8390.c"
+
static unsigned int net_debug = NET_DEBUG;
struct etherh_priv {
@@ -87,9 +97,6 @@ MODULE_AUTHOR("Russell King");
MODULE_DESCRIPTION("EtherH/EtherM driver");
MODULE_LICENSE("GPL");
-static char version[] __initdata =
- "EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
-
#define ETHERH500_DATAPORT 0x800 /* MEMC */
#define ETHERH500_NS8390 0x000 /* MEMC */
#define ETHERH500_CTRLPORT 0x800 /* IOC */
@@ -177,7 +184,7 @@ etherh_setif(struct net_device *dev)
switch (etherh_priv(dev)->id) {
case PROD_I3_ETHERLAN600:
case PROD_I3_ETHERLAN600A:
- addr = (void *)dev->base_addr + EN0_RCNTHI;
+ addr = (void __iomem *)dev->base_addr + EN0_RCNTHI;
switch (dev->if_port) {
case IF_PORT_10BASE2:
@@ -218,7 +225,7 @@ etherh_getifstat(struct net_device *dev)
switch (etherh_priv(dev)->id) {
case PROD_I3_ETHERLAN600:
case PROD_I3_ETHERLAN600A:
- addr = (void *)dev->base_addr + EN0_RCNTHI;
+ addr = (void __iomem *)dev->base_addr + EN0_RCNTHI;
switch (dev->if_port) {
case IF_PORT_10BASE2:
stat = 1;
@@ -281,7 +288,7 @@ static void
etherh_reset(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
- void __iomem *addr = (void *)dev->base_addr;
+ void __iomem *addr = (void __iomem *)dev->base_addr;
writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr);
@@ -327,7 +334,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
ei_local->dmaing = 1;
- addr = (void *)dev->base_addr;
+ addr = (void __iomem *)dev->base_addr;
dma_base = etherh_priv(dev)->dma_base;
count = (count + 1) & ~1;
@@ -360,7 +367,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
printk(KERN_ERR "%s: timeout waiting for TX RDC\n",
dev->name);
etherh_reset (dev);
- NS8390_init (dev, 1);
+ __NS8390_init (dev, 1);
break;
}
@@ -387,7 +394,7 @@ etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int
ei_local->dmaing = 1;
- addr = (void *)dev->base_addr;
+ addr = (void __iomem *)dev->base_addr;
dma_base = etherh_priv(dev)->dma_base;
buf = skb->data;
@@ -427,7 +434,7 @@ etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_p
ei_local->dmaing = 1;
- addr = (void *)dev->base_addr;
+ addr = (void __iomem *)dev->base_addr;
dma_base = etherh_priv(dev)->dma_base;
writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
@@ -465,7 +472,7 @@ etherh_open(struct net_device *dev)
return -EINVAL;
}
- if (request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))
+ if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev))
return -EAGAIN;
/*
@@ -491,7 +498,7 @@ etherh_open(struct net_device *dev)
etherh_setif(dev);
etherh_reset(dev);
- ei_open(dev);
+ __ei_open(dev);
return 0;
}
@@ -502,7 +509,7 @@ etherh_open(struct net_device *dev)
static int
etherh_close(struct net_device *dev)
{
- ei_close (dev);
+ __ei_close (dev);
free_irq (dev->irq, dev);
return 0;
}
@@ -650,7 +657,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
if (ret)
goto out;
- dev = __alloc_ei_netdev(sizeof(struct etherh_priv));
+ dev = ____alloc_ei_netdev(sizeof(struct etherh_priv));
if (!dev) {
ret = -ENOMEM;
goto release;
@@ -736,7 +743,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
ei_local->interface_num = 0;
etherh_reset(dev);
- NS8390_init(dev, 0);
+ __NS8390_init(dev, 0);
ret = register_netdev(dev);
if (ret)
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 7db3c8af0894..f0b6879a1c7d 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -360,7 +360,8 @@ static int mii_probe (struct net_device *dev)
BUG_ON(!phydev);
BUG_ON(phydev->attached_dev);
- phydev = phy_connect(dev, phydev->dev.bus_id, &au1000_adjust_link, 0);
+ phydev = phy_connect(dev, phydev->dev.bus_id, &au1000_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile
index 54c78d94f48b..382d23f810ab 100644
--- a/drivers/net/chelsio/Makefile
+++ b/drivers/net/chelsio/Makefile
@@ -1,11 +1,11 @@
#
-# Chelsio 10Gb NIC driver for Linux.
+# Chelsio T1 driver
#
obj-$(CONFIG_CHELSIO_T1) += cxgb.o
-EXTRA_CFLAGS += -Idrivers/net/chelsio $(DEBUG_FLAGS)
+cxgb-$(CONFIG_CHELSIO_T1_1G) += ixf1010.o mac.o mv88e1xxx.o vsc7326.o vsc8244.o
+cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o \
+ mv88x201x.o my3126.o $(cxgb-y)
-cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o
-
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 5d9dd14427c5..b265941e1372 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -45,6 +45,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/init.h>
@@ -53,13 +54,30 @@
#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
#define DRV_NAME "cxgb"
-#define DRV_VERSION "2.1.1"
+#define DRV_VERSION "2.2"
#define PFX DRV_NAME ": "
#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
+/*
+ * More powerful macro that selectively prints messages based on msg_enable.
+ * For info and debugging messages.
+ */
+#define CH_MSG(adapter, level, category, fmt, ...) do { \
+ if ((adapter)->msg_enable & NETIF_MSG_##category) \
+ printk(KERN_##level PFX "%s: " fmt, (adapter)->name, \
+ ## __VA_ARGS__); \
+} while (0)
+
+#ifdef DEBUG
+# define CH_DBG(adapter, category, fmt, ...) \
+ CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
+#else
+# define CH_DBG(fmt, ...)
+#endif
+
#define CH_DEVICE(devid, ssid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
@@ -71,10 +89,6 @@
typedef struct adapter adapter_t;
-void t1_elmer0_ext_intr(adapter_t *adapter);
-void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
- int speed, int duplex, int fc);
-
struct t1_rx_mode {
struct net_device *dev;
u32 idx;
@@ -97,26 +111,53 @@ static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
}
#define MAX_NPORTS 4
+#define PORT_MASK ((1 << MAX_NPORTS) - 1)
+#define NMTUS 8
+#define TCB_SIZE 128
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
enum {
CHBT_BOARD_N110,
- CHBT_BOARD_N210
+ CHBT_BOARD_N210,
+ CHBT_BOARD_7500,
+ CHBT_BOARD_8000,
+ CHBT_BOARD_CHT101,
+ CHBT_BOARD_CHT110,
+ CHBT_BOARD_CHT210,
+ CHBT_BOARD_CHT204,
+ CHBT_BOARD_CHT204V,
+ CHBT_BOARD_CHT204E,
+ CHBT_BOARD_CHN204,
+ CHBT_BOARD_COUGAR,
+ CHBT_BOARD_6800,
+ CHBT_BOARD_SIMUL,
};
enum {
+ CHBT_TERM_FPGA,
CHBT_TERM_T1,
- CHBT_TERM_T2
+ CHBT_TERM_T2,
+ CHBT_TERM_T3
};
enum {
+ CHBT_MAC_CHELSIO_A,
+ CHBT_MAC_IXF1010,
CHBT_MAC_PM3393,
+ CHBT_MAC_VSC7321,
+ CHBT_MAC_DUMMY
};
enum {
+ CHBT_PHY_88E1041,
+ CHBT_PHY_88E1111,
CHBT_PHY_88X2010,
+ CHBT_PHY_XPAK,
+ CHBT_PHY_MY3126,
+ CHBT_PHY_8244,
+ CHBT_PHY_DUMMY
};
enum {
@@ -150,16 +191,44 @@ struct chelsio_pci_params {
unsigned char is_pcix;
};
+struct tp_params {
+ unsigned int pm_size;
+ unsigned int cm_size;
+ unsigned int pm_rx_base;
+ unsigned int pm_tx_base;
+ unsigned int pm_rx_pg_size;
+ unsigned int pm_tx_pg_size;
+ unsigned int pm_rx_num_pgs;
+ unsigned int pm_tx_num_pgs;
+ unsigned int rx_coalescing_size;
+ unsigned int use_5tuple_mode;
+};
+
+struct mc5_params {
+ unsigned int mode; /* selects MC5 width */
+ unsigned int nservers; /* size of server region */
+ unsigned int nroutes; /* size of routing region */
+};
+
+/* Default MC5 region sizes */
+#define DEFAULT_SERVER_REGION_LEN 256
+#define DEFAULT_RT_REGION_LEN 1024
+
struct adapter_params {
struct sge_params sge;
+ struct mc5_params mc5;
+ struct tp_params tp;
struct chelsio_pci_params pci;
const struct board_info *brd_info;
+ unsigned short mtus[NMTUS];
unsigned int nports; /* # of ethernet ports */
unsigned int stats_update_period;
unsigned short chip_revision;
unsigned char chip_version;
+ unsigned char is_asic;
+ unsigned char has_msi;
};
struct link_config {
@@ -207,17 +276,20 @@ struct adapter {
/* Terminator modules. */
struct sge *sge;
struct peespi *espi;
+ struct petp *tp;
struct port_info port[MAX_NPORTS];
struct work_struct stats_update_task;
struct timer_list stats_update_timer;
- struct semaphore mib_mutex;
spinlock_t tpi_lock;
spinlock_t work_lock;
+ spinlock_t mac_lock;
+
/* guards async operations */
spinlock_t async_lock ____cacheline_aligned;
u32 slow_intr_mask;
+ int t1powersave;
};
enum { /* adapter flags */
@@ -256,6 +328,11 @@ struct board_info {
const char *desc;
};
+static inline int t1_is_asic(const adapter_t *adapter)
+{
+ return adapter->params.is_asic;
+}
+
extern struct pci_device_id t1_pci_tbl[];
static inline int adapter_matches_type(const adapter_t *adapter,
@@ -285,13 +362,15 @@ static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
return board_info(adap)->clock_core / 1000000;
}
+extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
+extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
extern void t1_interrupts_enable(adapter_t *adapter);
extern void t1_interrupts_disable(adapter_t *adapter);
extern void t1_interrupts_clear(adapter_t *adapter);
-extern int elmer0_ext_intr_handler(adapter_t *adapter);
+extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
extern int t1_slow_intr_handler(adapter_t *adapter);
extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
@@ -305,9 +384,7 @@ extern int t1_init_hw_modules(adapter_t *adapter);
extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
extern void t1_free_sw_modules(adapter_t *adapter);
extern void t1_fatal_err(adapter_t *adapter);
-
-extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
-extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
-extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
-
+extern void t1_link_changed(adapter_t *adapter, int port_id);
+extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
+ int speed, int duplex, int pause);
#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
index 3412342f7345..60901f25014e 100644
--- a/drivers/net/chelsio/cphy.h
+++ b/drivers/net/chelsio/cphy.h
@@ -52,7 +52,14 @@ struct mdio_ops {
/* PHY interrupt types */
enum {
cphy_cause_link_change = 0x1,
- cphy_cause_error = 0x2
+ cphy_cause_error = 0x2,
+ cphy_cause_fifo_error = 0x3
+};
+
+enum {
+ PHY_LINK_UP = 0x1,
+ PHY_AUTONEG_RDY = 0x2,
+ PHY_AUTONEG_EN = 0x4
};
struct cphy;
@@ -81,7 +88,18 @@ struct cphy_ops {
/* A PHY instance */
struct cphy {
int addr; /* PHY address */
+ int state; /* Link status state machine */
adapter_t *adapter; /* associated adapter */
+
+ struct work_struct phy_update;
+
+ u16 bmsr;
+ int count;
+ int act_count;
+ int act_on;
+
+ u32 elmer_gpo;
+
struct cphy_ops *ops; /* PHY operations */
int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *val);
@@ -142,6 +160,10 @@ struct gphy {
int (*reset)(adapter_t *adapter);
};
+extern struct gphy t1_my3126_ops;
+extern struct gphy t1_mv88e1xxx_ops;
+extern struct gphy t1_vsc8244_ops;
+extern struct gphy t1_xpak_ops;
extern struct gphy t1_mv88x201x_ops;
extern struct gphy t1_dummy_phy_ops;
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
index 5b357d9e88d6..35f565be4fd3 100644
--- a/drivers/net/chelsio/cpl5_cmd.h
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -46,24 +46,385 @@
#endif
enum CPL_opcode {
+ CPL_PASS_OPEN_REQ = 0x1,
+ CPL_PASS_OPEN_RPL = 0x2,
+ CPL_PASS_ESTABLISH = 0x3,
+ CPL_PASS_ACCEPT_REQ = 0xE,
+ CPL_PASS_ACCEPT_RPL = 0x4,
+ CPL_ACT_OPEN_REQ = 0x5,
+ CPL_ACT_OPEN_RPL = 0x6,
+ CPL_CLOSE_CON_REQ = 0x7,
+ CPL_CLOSE_CON_RPL = 0x8,
+ CPL_CLOSE_LISTSRV_REQ = 0x9,
+ CPL_CLOSE_LISTSRV_RPL = 0xA,
+ CPL_ABORT_REQ = 0xB,
+ CPL_ABORT_RPL = 0xC,
+ CPL_PEER_CLOSE = 0xD,
+ CPL_ACT_ESTABLISH = 0x17,
+
+ CPL_GET_TCB = 0x24,
+ CPL_GET_TCB_RPL = 0x25,
+ CPL_SET_TCB = 0x26,
+ CPL_SET_TCB_FIELD = 0x27,
+ CPL_SET_TCB_RPL = 0x28,
+ CPL_PCMD = 0x29,
+
+ CPL_PCMD_READ = 0x31,
+ CPL_PCMD_READ_RPL = 0x32,
+
+
+ CPL_RX_DATA = 0xA0,
+ CPL_RX_DATA_DDP = 0xA1,
+ CPL_RX_DATA_ACK = 0xA3,
CPL_RX_PKT = 0xAD,
+ CPL_RX_ISCSI_HDR = 0xAF,
+ CPL_TX_DATA_ACK = 0xB0,
+ CPL_TX_DATA = 0xB1,
CPL_TX_PKT = 0xB2,
CPL_TX_PKT_LSO = 0xB6,
+
+ CPL_RTE_DELETE_REQ = 0xC0,
+ CPL_RTE_DELETE_RPL = 0xC1,
+ CPL_RTE_WRITE_REQ = 0xC2,
+ CPL_RTE_WRITE_RPL = 0xD3,
+ CPL_RTE_READ_REQ = 0xC3,
+ CPL_RTE_READ_RPL = 0xC4,
+ CPL_L2T_WRITE_REQ = 0xC5,
+ CPL_L2T_WRITE_RPL = 0xD4,
+ CPL_L2T_READ_REQ = 0xC6,
+ CPL_L2T_READ_RPL = 0xC7,
+ CPL_SMT_WRITE_REQ = 0xC8,
+ CPL_SMT_WRITE_RPL = 0xD5,
+ CPL_SMT_READ_REQ = 0xC9,
+ CPL_SMT_READ_RPL = 0xCA,
+ CPL_ARP_MISS_REQ = 0xCD,
+ CPL_ARP_MISS_RPL = 0xCE,
+ CPL_MIGRATE_C2T_REQ = 0xDC,
+ CPL_MIGRATE_C2T_RPL = 0xDD,
+ CPL_ERROR = 0xD7,
+
+ /* internal: driver -> TOM */
+ CPL_MSS_CHANGE = 0xE1
};
-enum { /* TX_PKT_LSO ethernet types */
+#define NUM_CPL_CMDS 256
+
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_PARITY = 1,
+ CPL_ERR_TCAM_FULL = 3,
+ CPL_ERR_CONN_RESET = 20,
+ CPL_ERR_CONN_EXIST = 22,
+ CPL_ERR_ARP_MISS = 23,
+ CPL_ERR_BAD_SYN = 24,
+ CPL_ERR_CONN_TIMEDOUT = 30,
+ CPL_ERR_XMIT_TIMEDOUT = 31,
+ CPL_ERR_PERSIST_TIMEDOUT = 32,
+ CPL_ERR_FINWAIT2_TIMEDOUT = 33,
+ CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+ CPL_ERR_ABORT_FAILED = 42,
+ CPL_ERR_GENERAL = 99
+};
+
+enum {
+ CPL_CONN_POLICY_AUTO = 0,
+ CPL_CONN_POLICY_ASK = 1,
+ CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+ ULP_MODE_TCPDDP = 1,
+ ULP_MODE_ISCSI = 2,
+ ULP_MODE_IWARP = 3,
+ ULP_MODE_SSL = 4
+};
+
+enum {
+ CPL_PASS_OPEN_ACCEPT,
+ CPL_PASS_OPEN_REJECT
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+ CPL_ABORT_POST_CLOSE_REQ = 2
+};
+
+enum { // TX_PKT_LSO ethernet types
CPL_ETH_II,
CPL_ETH_II_VLAN,
CPL_ETH_802_3,
CPL_ETH_802_3_VLAN
};
-struct cpl_rx_data {
+union opcode_tid {
+ u32 opcode_tid;
+ u8 opcode;
+};
+
+#define S_OPCODE 24
+#define V_OPCODE(x) ((x) << S_OPCODE)
+#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
+#define G_TID(x) ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+
+struct tcp_options {
+ u16 mss;
+ u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 rsvd:4;
+ u8 ecn:1;
+ u8 sack:1;
+ u8 tstamp:1;
+#else
+ u8 tstamp:1;
+ u8 sack:1;
+ u8 ecn:1;
+ u8 rsvd:4;
+#endif
+};
+
+struct cpl_pass_open_req {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u32 opt0h;
+ u32 opt0l;
+ u32 peer_netmask;
+ u32 opt1;
+};
+
+struct cpl_pass_open_rpl {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u8 resvd[7];
+ u8 status;
+};
+
+struct cpl_pass_establish {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u32 tos_tid;
+ u8 l2t_idx;
+ u8 rsvd[3];
+ u32 snd_isn;
+ u32 rcv_isn;
+};
+
+struct cpl_pass_accept_req {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u32 tos_tid;
+ struct tcp_options tcp_options;
+ u8 dst_mac[6];
+ u16 vlan_tag;
+ u8 src_mac[6];
+ u8 rsvd[2];
+ u32 rcv_isn;
+ u32 unknown_tcp_options;
+};
+
+struct cpl_pass_accept_rpl {
+ union opcode_tid ot;
+ u32 rsvd0;
+ u32 rsvd1;
+ u32 peer_ip;
+ u32 opt0h;
+ union {
+ u32 opt0l;
+ struct {
+ u8 rsvd[3];
+ u8 status;
+ };
+ };
+};
+
+struct cpl_act_open_req {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u32 opt0h;
+ u32 opt0l;
+ u32 iff_vlantag;
+ u32 rsvd;
+};
+
+struct cpl_act_open_rpl {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u32 new_tid;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_act_establish {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u32 tos_tid;
+ u32 rsvd;
+ u32 snd_isn;
+ u32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+ union opcode_tid ot;
+ u32 rsvd;
+};
+
+struct cpl_get_tcb_rpl {
+ union opcode_tid ot;
+ u16 len;
+ u8 rsvd;
+ u8 status;
+};
+
+struct cpl_set_tcb {
+ union opcode_tid ot;
+ u16 len;
+ u16 rsvd;
+};
+
+struct cpl_set_tcb_field {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 offset;
+ u32 mask;
+ u32 val;
+};
+
+struct cpl_set_tcb_rpl {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_pcmd {
+ union opcode_tid ot;
+ u16 dlen_in;
+ u16 dlen_out;
+ u32 pcmd_parm[2];
+};
+
+struct cpl_pcmd_read {
+ union opcode_tid ot;
+ u32 rsvd1;
+ u16 rsvd2;
+ u32 addr;
+ u16 len;
+};
+
+struct cpl_pcmd_read_rpl {
+ union opcode_tid ot;
+ u16 len;
+};
+
+struct cpl_close_con_req {
+ union opcode_tid ot;
+ u32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+ u32 snd_nxt;
+ u32 rcv_nxt;
+};
+
+struct cpl_close_listserv_req {
+ union opcode_tid ot;
+ u32 rsvd;
+};
+
+struct cpl_close_listserv_rpl {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_abort_req {
+ union opcode_tid ot;
u32 rsvd0;
+ u8 rsvd1;
+ u8 cmd;
+ u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl {
+ union opcode_tid ot;
+ u32 rsvd0;
+ u8 rsvd1;
+ u8 status;
+ u8 rsvd2[6];
+};
+
+struct cpl_peer_close {
+ union opcode_tid ot;
+ u32 rsvd;
+};
+
+struct cpl_tx_data {
+ union opcode_tid ot;
+ u32 len;
+ u32 rsvd0;
+ u16 urg;
+ u16 flags;
+};
+
+struct cpl_tx_data_ack {
+ union opcode_tid ot;
+ u32 ack_seq;
+};
+
+struct cpl_rx_data {
+ union opcode_tid ot;
u32 len;
u32 seq;
u16 urg;
- u8 rsvd1;
+ u8 rsvd;
+ u8 status;
+};
+
+struct cpl_rx_data_ack {
+ union opcode_tid ot;
+ u32 credit;
+};
+
+struct cpl_rx_data_ddp {
+ union opcode_tid ot;
+ u32 len;
+ u32 seq;
+ u32 nxt_seq;
+ u32 ulp_crc;
+ u16 ddp_status;
+ u8 rsvd;
u8 status;
};
@@ -99,9 +460,9 @@ struct cpl_tx_pkt_lso {
u8 ip_csum_dis:1;
u8 l4_csum_dis:1;
u8 vlan_valid:1;
- u8 rsvd:1;
+ u8 :1;
#else
- u8 rsvd:1;
+ u8 :1;
u8 vlan_valid:1;
u8 l4_csum_dis:1;
u8 ip_csum_dis:1;
@@ -110,8 +471,7 @@ struct cpl_tx_pkt_lso {
u16 vlan;
__be32 len;
- u32 rsvd2;
- u8 rsvd3;
+ u8 rsvd[5];
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 tcp_hdr_words:4;
u8 ip_hdr_words:4;
@@ -138,8 +498,142 @@ struct cpl_rx_pkt {
u8 iff:4;
#endif
u16 csum;
- __be16 vlan;
+ u16 vlan;
u16 len;
};
+struct cpl_l2t_write_req {
+ union opcode_tid ot;
+ u32 params;
+ u8 rsvd1[2];
+ u8 dst_mac[6];
+};
+
+struct cpl_l2t_write_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd[3];
+};
+
+struct cpl_l2t_read_req {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 l2t_idx;
+};
+
+struct cpl_l2t_read_rpl {
+ union opcode_tid ot;
+ u32 params;
+ u8 rsvd1[2];
+ u8 dst_mac[6];
+};
+
+struct cpl_smt_write_req {
+ union opcode_tid ot;
+ u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 rsvd1:1;
+ u8 mtu_idx:3;
+ u8 iff:4;
+#else
+ u8 iff:4;
+ u8 mtu_idx:3;
+ u8 rsvd1:1;
+#endif
+ u16 rsvd2;
+ u16 rsvd3;
+ u8 src_mac1[6];
+ u16 rsvd4;
+ u8 src_mac0[6];
+};
+
+struct cpl_smt_write_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd[3];
+};
+
+struct cpl_smt_read_req {
+ union opcode_tid ot;
+ u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 rsvd1:4;
+ u8 iff:4;
+#else
+ u8 iff:4;
+ u8 rsvd1:4;
+#endif
+ u16 rsvd2;
+};
+
+struct cpl_smt_read_rpl {
+ union opcode_tid ot;
+ u8 status;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 rsvd1:1;
+ u8 mtu_idx:3;
+ u8 rsvd0:4;
+#else
+ u8 rsvd0:4;
+ u8 mtu_idx:3;
+ u8 rsvd1:1;
+#endif
+ u16 rsvd2;
+ u16 rsvd3;
+ u8 src_mac1[6];
+ u16 rsvd4;
+ u8 src_mac0[6];
+};
+
+struct cpl_rte_delete_req {
+ union opcode_tid ot;
+ u32 params;
+};
+
+struct cpl_rte_delete_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd[3];
+};
+
+struct cpl_rte_write_req {
+ union opcode_tid ot;
+ u32 params;
+ u32 netmask;
+ u32 faddr;
+};
+
+struct cpl_rte_write_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd[3];
+};
+
+struct cpl_rte_read_req {
+ union opcode_tid ot;
+ u32 params;
+};
+
+struct cpl_rte_read_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd0[2];
+ u8 l2t_idx;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 rsvd1:7;
+ u8 select:1;
+#else
+ u8 select:1;
+ u8 rsvd1:7;
+#endif
+ u8 rsvd2[3];
+ u32 addr;
+};
+
+struct cpl_mss_change {
+ union opcode_tid ot;
+ u32 mss;
+};
+
#endif /* _CXGB_CPL5_CMD_H_ */
+
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index ad7ff9641a7e..53bec6739812 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -45,7 +45,6 @@
#include <linux/if_vlan.h>
#include <linux/mii.h>
#include <linux/sockios.h>
-#include <linux/proc_fs.h>
#include <linux/dma-mapping.h>
#include <asm/uaccess.h>
@@ -54,36 +53,10 @@
#include "gmac.h"
#include "cphy.h"
#include "sge.h"
+#include "tp.h"
#include "espi.h"
+#include "elmer0.h"
-#ifdef work_struct
-#include <linux/tqueue.h>
-#define INIT_WORK INIT_TQUEUE
-#define schedule_work schedule_task
-#define flush_scheduled_work flush_scheduled_tasks
-
-static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
-{
- mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
-}
-
-static inline void cancel_mac_stats_update(struct adapter *ap)
-{
- del_timer_sync(&ap->stats_update_timer);
- flush_scheduled_tasks();
-}
-
-/*
- * Stats update timer for 2.4. It schedules a task to do the actual update as
- * we need to access MAC statistics in process context.
- */
-static void mac_stats_timer(unsigned long data)
-{
- struct adapter *ap = (struct adapter *)data;
-
- schedule_task(&ap->stats_update_task);
-}
-#else
#include <linux/workqueue.h>
static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
@@ -95,7 +68,6 @@ static inline void cancel_mac_stats_update(struct adapter *ap)
{
cancel_delayed_work(&ap->stats_update_task);
}
-#endif
#define MAX_CMDQ_ENTRIES 16384
#define MAX_CMDQ1_ENTRIES 1024
@@ -103,10 +75,9 @@ static inline void cancel_mac_stats_update(struct adapter *ap)
#define MAX_RX_JUMBO_BUFFERS 16384
#define MAX_TX_BUFFERS_HIGH 16384U
#define MAX_TX_BUFFERS_LOW 1536U
+#define MAX_TX_BUFFERS 1460U
#define MIN_FL_ENTRIES 32
-#define PORT_MASK ((1 << MAX_NPORTS) - 1)
-
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -124,8 +95,21 @@ MODULE_LICENSE("GPL");
static int dflt_msg_enable = DFLT_MSG_ENABLE;
module_param(dflt_msg_enable, int, 0);
-MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
+
+#define HCLOCK 0x0
+#define LCLOCK 0x1
+
+/* T1 cards powersave mode */
+static int t1_clock(struct adapter *adapter, int mode);
+static int t1powersave = 1; /* HW default is powersave mode. */
+module_param(t1powersave, int, 0);
+MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
+
+static int disable_msi = 0;
+module_param(disable_msi, int, 0);
+MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
static const char pci_speed[][4] = {
"33", "66", "100", "133"
@@ -149,7 +133,7 @@ static void t1_set_rxmode(struct net_device *dev)
static void link_report(struct port_info *p)
{
if (!netif_carrier_ok(p->dev))
- printk(KERN_INFO "%s: link down\n", p->dev->name);
+ printk(KERN_INFO "%s: link down\n", p->dev->name);
else {
const char *s = "10Mbps";
@@ -159,13 +143,13 @@ static void link_report(struct port_info *p)
case SPEED_100: s = "100Mbps"; break;
}
- printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
+ printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
p->dev->name, s,
p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
}
}
-void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
+void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
int speed, int duplex, int pause)
{
struct port_info *p = &adapter->port[port_id];
@@ -177,6 +161,22 @@ void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
netif_carrier_off(p->dev);
link_report(p);
+ /* multi-ports: inform toe */
+ if ((speed > 0) && (adapter->params.nports > 1)) {
+ unsigned int sched_speed = 10;
+ switch (speed) {
+ case SPEED_1000:
+ sched_speed = 1000;
+ break;
+ case SPEED_100:
+ sched_speed = 100;
+ break;
+ case SPEED_10:
+ sched_speed = 10;
+ break;
+ }
+ t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
+ }
}
}
@@ -195,8 +195,10 @@ static void link_start(struct port_info *p)
static void enable_hw_csum(struct adapter *adapter)
{
if (adapter->flags & TSO_CAPABLE)
- t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
- t1_tp_set_tcp_checksum_offload(adapter, 1);
+ t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
+ if (adapter->flags & UDP_CSUM_CAPABLE)
+ t1_tp_set_udp_checksum_offload(adapter->tp, 1);
+ t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
}
/*
@@ -217,11 +219,19 @@ static int cxgb_up(struct adapter *adapter)
}
t1_interrupts_clear(adapter);
- if ((err = request_irq(adapter->pdev->irq,
- t1_select_intr_handler(adapter), IRQF_SHARED,
- adapter->name, adapter))) {
+
+ adapter->params.has_msi = !disable_msi && pci_enable_msi(adapter->pdev) == 0;
+ err = request_irq(adapter->pdev->irq,
+ t1_select_intr_handler(adapter),
+ adapter->params.has_msi ? 0 : IRQF_SHARED,
+ adapter->name, adapter);
+ if (err) {
+ if (adapter->params.has_msi)
+ pci_disable_msi(adapter->pdev);
+
goto out_err;
}
+
t1_sge_start(adapter->sge);
t1_interrupts_enable(adapter);
out_err:
@@ -236,6 +246,8 @@ static void cxgb_down(struct adapter *adapter)
t1_sge_stop(adapter->sge);
t1_interrupts_disable(adapter);
free_irq(adapter->pdev->irq, adapter);
+ if (adapter->params.has_msi)
+ pci_disable_msi(adapter->pdev);
}
static int cxgb_open(struct net_device *dev)
@@ -290,7 +302,7 @@ static struct net_device_stats *t1_get_stats(struct net_device *dev)
/* Do a full update of the MAC stats */
pstats = p->mac->ops->statistics_update(p->mac,
- MAC_STATS_UPDATE_FULL);
+ MAC_STATS_UPDATE_FULL);
ns->tx_packets = pstats->TxUnicastFramesOK +
pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
@@ -344,47 +356,53 @@ static void set_msglevel(struct net_device *dev, u32 val)
}
static char stats_strings[][ETH_GSTRING_LEN] = {
- "TxOctetsOK",
- "TxOctetsBad",
- "TxUnicastFramesOK",
- "TxMulticastFramesOK",
- "TxBroadcastFramesOK",
- "TxPauseFrames",
- "TxFramesWithDeferredXmissions",
- "TxLateCollisions",
- "TxTotalCollisions",
- "TxFramesAbortedDueToXSCollisions",
- "TxUnderrun",
- "TxLengthErrors",
- "TxInternalMACXmitError",
- "TxFramesWithExcessiveDeferral",
- "TxFCSErrors",
-
- "RxOctetsOK",
- "RxOctetsBad",
- "RxUnicastFramesOK",
- "RxMulticastFramesOK",
- "RxBroadcastFramesOK",
- "RxPauseFrames",
- "RxFCSErrors",
- "RxAlignErrors",
- "RxSymbolErrors",
- "RxDataErrors",
- "RxSequenceErrors",
- "RxRuntErrors",
- "RxJabberErrors",
- "RxInternalMACRcvError",
- "RxInRangeLengthErrors",
- "RxOutOfRangeLengthField",
- "RxFrameTooLongErrors",
-
- "TSO",
- "VLANextractions",
- "VLANinsertions",
+ "TxOctetsOK",
+ "TxOctetsBad",
+ "TxUnicastFramesOK",
+ "TxMulticastFramesOK",
+ "TxBroadcastFramesOK",
+ "TxPauseFrames",
+ "TxFramesWithDeferredXmissions",
+ "TxLateCollisions",
+ "TxTotalCollisions",
+ "TxFramesAbortedDueToXSCollisions",
+ "TxUnderrun",
+ "TxLengthErrors",
+ "TxInternalMACXmitError",
+ "TxFramesWithExcessiveDeferral",
+ "TxFCSErrors",
+
+ "RxOctetsOK",
+ "RxOctetsBad",
+ "RxUnicastFramesOK",
+ "RxMulticastFramesOK",
+ "RxBroadcastFramesOK",
+ "RxPauseFrames",
+ "RxFCSErrors",
+ "RxAlignErrors",
+ "RxSymbolErrors",
+ "RxDataErrors",
+ "RxSequenceErrors",
+ "RxRuntErrors",
+ "RxJabberErrors",
+ "RxInternalMACRcvError",
+ "RxInRangeLengthErrors",
+ "RxOutOfRangeLengthField",
+ "RxFrameTooLongErrors",
+
+ /* Port stats */
+ "RxPackets",
"RxCsumGood",
+ "TxPackets",
"TxCsumOffload",
- "RxDrops"
-
+ "TxTso",
+ "RxVlan",
+ "TxVlan",
+
+ /* Interrupt stats */
+ "rx drops",
+ "pure_rsps",
+ "unhandled irqs",
"respQ_empty",
"respQ_overflow",
"freelistQ_empty",
@@ -392,11 +410,7 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
"pkt_mismatch",
"cmdQ_full0",
"cmdQ_full1",
- "tx_ipfrags",
- "tx_reg_pkts",
- "tx_lso_pkts",
- "tx_do_cksum",
-
+
"espi_DIP2ParityErr",
"espi_DIP4Err",
"espi_RxDrops",
@@ -404,7 +418,7 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
"espi_RxOvfl",
"espi_ParityErr"
};
-
+
#define T2_REGMAP_SIZE (3 * 1024)
static int get_regs_len(struct net_device *dev)
@@ -439,65 +453,77 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
struct adapter *adapter = dev->priv;
struct cmac *mac = adapter->port[dev->if_port].mac;
const struct cmac_statistics *s;
- const struct sge_port_stats *ss;
const struct sge_intr_counts *t;
+ struct sge_port_stats ss;
s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
- ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
- t = t1_sge_get_intr_counts(adapter->sge);
- *data++ = s->TxOctetsOK;
- *data++ = s->TxOctetsBad;
- *data++ = s->TxUnicastFramesOK;
- *data++ = s->TxMulticastFramesOK;
- *data++ = s->TxBroadcastFramesOK;
- *data++ = s->TxPauseFrames;
- *data++ = s->TxFramesWithDeferredXmissions;
- *data++ = s->TxLateCollisions;
- *data++ = s->TxTotalCollisions;
- *data++ = s->TxFramesAbortedDueToXSCollisions;
- *data++ = s->TxUnderrun;
- *data++ = s->TxLengthErrors;
- *data++ = s->TxInternalMACXmitError;
- *data++ = s->TxFramesWithExcessiveDeferral;
- *data++ = s->TxFCSErrors;
-
- *data++ = s->RxOctetsOK;
- *data++ = s->RxOctetsBad;
- *data++ = s->RxUnicastFramesOK;
- *data++ = s->RxMulticastFramesOK;
- *data++ = s->RxBroadcastFramesOK;
- *data++ = s->RxPauseFrames;
- *data++ = s->RxFCSErrors;
- *data++ = s->RxAlignErrors;
- *data++ = s->RxSymbolErrors;
- *data++ = s->RxDataErrors;
- *data++ = s->RxSequenceErrors;
- *data++ = s->RxRuntErrors;
- *data++ = s->RxJabberErrors;
- *data++ = s->RxInternalMACRcvError;
- *data++ = s->RxInRangeLengthErrors;
- *data++ = s->RxOutOfRangeLengthField;
- *data++ = s->RxFrameTooLongErrors;
-
- *data++ = ss->tso;
- *data++ = ss->vlan_xtract;
- *data++ = ss->vlan_insert;
- *data++ = ss->rx_cso_good;
- *data++ = ss->tx_cso;
- *data++ = ss->rx_drops;
-
- *data++ = (u64)t->respQ_empty;
- *data++ = (u64)t->respQ_overflow;
- *data++ = (u64)t->freelistQ_empty;
- *data++ = (u64)t->pkt_too_big;
- *data++ = (u64)t->pkt_mismatch;
- *data++ = (u64)t->cmdQ_full[0];
- *data++ = (u64)t->cmdQ_full[1];
- *data++ = (u64)t->tx_ipfrags;
- *data++ = (u64)t->tx_reg_pkts;
- *data++ = (u64)t->tx_lso_pkts;
- *data++ = (u64)t->tx_do_cksum;
+ *data++ = s->TxOctetsOK;
+ *data++ = s->TxOctetsBad;
+ *data++ = s->TxUnicastFramesOK;
+ *data++ = s->TxMulticastFramesOK;
+ *data++ = s->TxBroadcastFramesOK;
+ *data++ = s->TxPauseFrames;
+ *data++ = s->TxFramesWithDeferredXmissions;
+ *data++ = s->TxLateCollisions;
+ *data++ = s->TxTotalCollisions;
+ *data++ = s->TxFramesAbortedDueToXSCollisions;
+ *data++ = s->TxUnderrun;
+ *data++ = s->TxLengthErrors;
+ *data++ = s->TxInternalMACXmitError;
+ *data++ = s->TxFramesWithExcessiveDeferral;
+ *data++ = s->TxFCSErrors;
+
+ *data++ = s->RxOctetsOK;
+ *data++ = s->RxOctetsBad;
+ *data++ = s->RxUnicastFramesOK;
+ *data++ = s->RxMulticastFramesOK;
+ *data++ = s->RxBroadcastFramesOK;
+ *data++ = s->RxPauseFrames;
+ *data++ = s->RxFCSErrors;
+ *data++ = s->RxAlignErrors;
+ *data++ = s->RxSymbolErrors;
+ *data++ = s->RxDataErrors;
+ *data++ = s->RxSequenceErrors;
+ *data++ = s->RxRuntErrors;
+ *data++ = s->RxJabberErrors;
+ *data++ = s->RxInternalMACRcvError;
+ *data++ = s->RxInRangeLengthErrors;
+ *data++ = s->RxOutOfRangeLengthField;
+ *data++ = s->RxFrameTooLongErrors;
+
+ t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
+ *data++ = ss.rx_packets;
+ *data++ = ss.rx_cso_good;
+ *data++ = ss.tx_packets;
+ *data++ = ss.tx_cso;
+ *data++ = ss.tx_tso;
+ *data++ = ss.vlan_xtract;
+ *data++ = ss.vlan_insert;
+
+ t = t1_sge_get_intr_counts(adapter->sge);
+ *data++ = t->rx_drops;
+ *data++ = t->pure_rsps;
+ *data++ = t->unhandled_irqs;
+ *data++ = t->respQ_empty;
+ *data++ = t->respQ_overflow;
+ *data++ = t->freelistQ_empty;
+ *data++ = t->pkt_too_big;
+ *data++ = t->pkt_mismatch;
+ *data++ = t->cmdQ_full[0];
+ *data++ = t->cmdQ_full[1];
+
+ if (adapter->espi) {
+ const struct espi_intr_counts *e;
+
+ e = t1_espi_get_intr_counts(adapter->espi);
+ *data++ = e->DIP2_parity_err;
+ *data++ = e->DIP4_err;
+ *data++ = e->rx_drops;
+ *data++ = e->tx_drops;
+ *data++ = e->rx_ovflw;
+ *data++ = e->parity_err;
+ }
}
static inline void reg_block_dump(struct adapter *ap, void *buf,
@@ -521,6 +547,15 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
memset(buf, 0, T2_REGMAP_SIZE);
reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
+ reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
+ reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
+ reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
+ reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
+ reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
+ reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
+ reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
+ reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
+ reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -539,12 +574,12 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->duplex = -1;
}
- cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = p->phy->addr;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = p->link_config.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->phy_address = p->phy->addr;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->autoneg = p->link_config.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
return 0;
}
@@ -715,7 +750,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
return -EINVAL;
if (adapter->flags & FULL_INIT_DONE)
- return -EBUSY;
+ return -EBUSY;
adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
@@ -759,7 +794,9 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
static int get_eeprom_len(struct net_device *dev)
{
- return EEPROM_SIZE;
+ struct adapter *adapter = dev->priv;
+
+ return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
}
#define EEPROM_MAGIC(ap) \
@@ -809,47 +846,36 @@ static const struct ethtool_ops t1_ethtool_ops = {
.set_tso = set_tso,
};
-static void cxgb_proc_cleanup(struct adapter *adapter,
- struct proc_dir_entry *dir)
-{
- const char *name;
- name = adapter->name;
- remove_proc_entry(name, dir);
-}
-//#define chtoe_setup_toedev(adapter) NULL
-#define update_mtu_tab(adapter)
-#define write_smt_entry(adapter, idx)
-
static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
- struct adapter *adapter = dev->priv;
- struct mii_ioctl_data *data = if_mii(req);
+ struct adapter *adapter = dev->priv;
+ struct mii_ioctl_data *data = if_mii(req);
switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = adapter->port[dev->if_port].phy->addr;
- /* FALLTHRU */
- case SIOCGMIIREG: {
+ case SIOCGMIIPHY:
+ data->phy_id = adapter->port[dev->if_port].phy->addr;
+ /* FALLTHRU */
+ case SIOCGMIIREG: {
struct cphy *phy = adapter->port[dev->if_port].phy;
u32 val;
if (!phy->mdio_read)
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
&val);
- data->val_out = val;
- break;
+ data->val_out = val;
+ break;
}
- case SIOCSMIIREG: {
+ case SIOCSMIIREG: {
struct cphy *phy = adapter->port[dev->if_port].phy;
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
if (!phy->mdio_write)
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
data->val_in);
- break;
+ break;
}
default:
@@ -865,9 +891,9 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
struct cmac *mac = adapter->port[dev->if_port].mac;
if (!mac->ops->set_mtu)
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
if (new_mtu < 68)
- return -EINVAL;
+ return -EINVAL;
if ((ret = mac->ops->set_mtu(mac, new_mtu)))
return ret;
dev->mtu = new_mtu;
@@ -918,7 +944,7 @@ static void t1_netpoll(struct net_device *dev)
struct adapter *adapter = dev->priv;
local_irq_save(flags);
- t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
+ t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
local_irq_restore(flags);
}
#endif
@@ -955,14 +981,14 @@ static void ext_intr_task(void *data)
{
struct adapter *adapter = data;
- elmer0_ext_intr_handler(adapter);
+ t1_elmer0_ext_intr_handler(adapter);
/* Now reenable external interrupts */
spin_lock_irq(&adapter->async_lock);
adapter->slow_intr_mask |= F_PL_INTR_EXT;
writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
- adapter->regs + A_PL_ENABLE);
+ adapter->regs + A_PL_ENABLE);
spin_unlock_irq(&adapter->async_lock);
}
@@ -978,7 +1004,7 @@ void t1_elmer0_ext_intr(struct adapter *adapter)
*/
adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
- adapter->regs + A_PL_ENABLE);
+ adapter->regs + A_PL_ENABLE);
schedule_work(&adapter->ext_intr_handler_task);
}
@@ -1011,7 +1037,7 @@ static int __devinit init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err)
- return err;
+ return err;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
CH_ERR("%s: cannot find PCI device memory base address\n",
@@ -1043,7 +1069,7 @@ static int __devinit init_one(struct pci_dev *pdev,
pci_set_master(pdev);
- mmio_start = pci_resource_start(pdev, 0);
+ mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
bi = t1_get_board_info(ent->driver_data);
@@ -1081,21 +1107,15 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->msg_enable = dflt_msg_enable;
adapter->mmio_len = mmio_len;
- init_MUTEX(&adapter->mib_mutex);
spin_lock_init(&adapter->tpi_lock);
spin_lock_init(&adapter->work_lock);
spin_lock_init(&adapter->async_lock);
+ spin_lock_init(&adapter->mac_lock);
INIT_WORK(&adapter->ext_intr_handler_task,
ext_intr_task, adapter);
INIT_WORK(&adapter->stats_update_task, mac_stats_task,
adapter);
-#ifdef work_struct
- init_timer(&adapter->stats_update_timer);
- adapter->stats_update_timer.function = mac_stats_timer;
- adapter->stats_update_timer.data =
- (unsigned long)adapter;
-#endif
pci_set_drvdata(pdev, netdev);
}
@@ -1122,16 +1142,19 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->vlan_rx_register = vlan_rx_register;
netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
#endif
- adapter->flags |= TSO_CAPABLE;
- netdev->features |= NETIF_F_TSO;
+
+ /* T204: disable TSO */
+ if (!(is_T2(adapter)) || bi->port_number != 4) {
+ adapter->flags |= TSO_CAPABLE;
+ netdev->features |= NETIF_F_TSO;
+ }
}
netdev->open = cxgb_open;
netdev->stop = cxgb_close;
netdev->hard_start_xmit = t1_start_xmit;
netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
- sizeof(struct cpl_tx_pkt_lso) :
- sizeof(struct cpl_tx_pkt);
+ sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
netdev->get_stats = t1_get_stats;
netdev->set_multicast_list = t1_set_rxmode;
netdev->do_ioctl = t1_ioctl;
@@ -1142,7 +1165,7 @@ static int __devinit init_one(struct pci_dev *pdev,
#endif
netdev->weight = 64;
- SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+ SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
}
if (t1_init_sw_modules(adapter, bi) < 0) {
@@ -1169,7 +1192,7 @@ static int __devinit init_one(struct pci_dev *pdev,
if (!adapter->registered_device_map)
adapter->name = adapter->port[i].dev->name;
- __set_bit(i, &adapter->registered_device_map);
+ __set_bit(i, &adapter->registered_device_map);
}
}
if (!adapter->registered_device_map) {
@@ -1182,18 +1205,28 @@ static int __devinit init_one(struct pci_dev *pdev,
bi->desc, adapter->params.chip_revision,
adapter->params.pci.is_pcix ? "PCIX" : "PCI",
adapter->params.pci.speed, adapter->params.pci.width);
+
+ /*
+ * Set the T1B ASIC and memory clocks.
+ */
+ if (t1powersave)
+ adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
+ else
+ adapter->t1powersave = HCLOCK;
+ if (t1_is_T1B(adapter))
+ t1_clock(adapter, t1powersave);
+
return 0;
out_release_adapter_res:
t1_free_sw_modules(adapter);
out_free_dev:
if (adapter) {
- if (adapter->regs) iounmap(adapter->regs);
+ if (adapter->regs)
+ iounmap(adapter->regs);
for (i = bi->port_number - 1; i >= 0; --i)
- if (adapter->port[i].dev) {
- cxgb_proc_cleanup(adapter, proc_root_driver);
- kfree(adapter->port[i].dev);
- }
+ if (adapter->port[i].dev)
+ free_netdev(adapter->port[i].dev);
}
pci_release_regions(pdev);
out_disable_pdev:
@@ -1202,6 +1235,155 @@ static int __devinit init_one(struct pci_dev *pdev,
return err;
}
+static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
+{
+ int data;
+ int i;
+ u32 val;
+
+ enum {
+ S_CLOCK = 1 << 3,
+ S_DATA = 1 << 4
+ };
+
+ for (i = (nbits - 1); i > -1; i--) {
+
+ udelay(50);
+
+ data = ((bitdata >> i) & 0x1);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+
+ if (data)
+ val |= S_DATA;
+ else
+ val &= ~S_DATA;
+
+ udelay(50);
+
+ /* Set SCLOCK low */
+ val &= ~S_CLOCK;
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ udelay(50);
+
+ /* Write SCLOCK high */
+ val |= S_CLOCK;
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ }
+}
+
+static int t1_clock(struct adapter *adapter, int mode)
+{
+ u32 val;
+ int M_CORE_VAL;
+ int M_MEM_VAL;
+
+ enum {
+ M_CORE_BITS = 9,
+ T_CORE_VAL = 0,
+ T_CORE_BITS = 2,
+ N_CORE_VAL = 0,
+ N_CORE_BITS = 2,
+ M_MEM_BITS = 9,
+ T_MEM_VAL = 0,
+ T_MEM_BITS = 2,
+ N_MEM_VAL = 0,
+ N_MEM_BITS = 2,
+ NP_LOAD = 1 << 17,
+ S_LOAD_MEM = 1 << 5,
+ S_LOAD_CORE = 1 << 6,
+ S_CLOCK = 1 << 3
+ };
+
+ if (!t1_is_T1B(adapter))
+ return -ENODEV; /* Can't re-clock this chip. */
+
+ if (mode & 2) {
+ return 0; /* show current mode. */
+ }
+
+ if ((adapter->t1powersave & 1) == (mode & 1))
+ return -EALREADY; /* ASIC already running in mode. */
+
+ if ((mode & 1) == HCLOCK) {
+ M_CORE_VAL = 0x14;
+ M_MEM_VAL = 0x18;
+ adapter->t1powersave = HCLOCK; /* overclock */
+ } else {
+ M_CORE_VAL = 0xe;
+ M_MEM_VAL = 0x10;
+ adapter->t1powersave = LCLOCK; /* underclock */
+ }
+
+ /* Don't interrupt this serial stream! */
+ spin_lock(&adapter->tpi_lock);
+
+ /* Initialize for ASIC core */
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= NP_LOAD;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~S_LOAD_CORE;
+ val &= ~S_CLOCK;
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+
+ /* Serial program the ASIC clock synthesizer */
+ bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
+ bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
+ bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
+ udelay(50);
+
+ /* Finish ASIC core */
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= S_LOAD_CORE;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~S_LOAD_CORE;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+
+ /* Initialize for memory */
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= NP_LOAD;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~S_LOAD_MEM;
+ val &= ~S_CLOCK;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+
+ /* Serial program the memory clock synthesizer */
+ bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
+ bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
+ bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
+ udelay(50);
+
+ /* Finish memory */
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= S_LOAD_MEM;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~S_LOAD_MEM;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ spin_unlock(&adapter->tpi_lock);
+
+ return 0;
+}
+
static inline void t1_sw_reset(struct pci_dev *pdev)
{
pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
@@ -1223,10 +1405,9 @@ static void __devexit remove_one(struct pci_dev *pdev)
t1_free_sw_modules(adapter);
iounmap(adapter->regs);
while (--i >= 0)
- if (adapter->port[i].dev) {
- cxgb_proc_cleanup(adapter, proc_root_driver);
- kfree(adapter->port[i].dev);
- }
+ if (adapter->port[i].dev)
+ free_netdev(adapter->port[i].dev);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
index 5590cb2dac19..9ebecaa97d31 100644
--- a/drivers/net/chelsio/elmer0.h
+++ b/drivers/net/chelsio/elmer0.h
@@ -39,6 +39,12 @@
#ifndef _CXGB_ELMER0_H_
#define _CXGB_ELMER0_H_
+/* ELMER0 flavors */
+enum {
+ ELMER0_XC2S300E_6FT256_C,
+ ELMER0_XC2S100E_6TQ144_C
+};
+
/* ELMER0 registers */
#define A_ELMER0_VERSION 0x100000
#define A_ELMER0_PHY_CFG 0x100004
@@ -149,3 +155,4 @@
#define MI1_OP_INDIRECT_READ 3
#endif /* _CXGB_ELMER0_H_ */
+
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
index 542e5e065c6f..4192f0f5b3ee 100644
--- a/drivers/net/chelsio/espi.c
+++ b/drivers/net/chelsio/espi.c
@@ -81,46 +81,36 @@ static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
return busy;
}
-/* 1. Deassert rx_reset_core. */
-/* 2. Program TRICN_CNFG registers. */
-/* 3. Deassert rx_reset_link */
static int tricn_init(adapter_t *adapter)
{
- int i = 0;
- int stat = 0;
- int timeout = 0;
- int is_ready = 0;
+ int i, sme = 1;
- /* 1 */
- timeout=1000;
- do {
- stat = readl(adapter->regs + A_ESPI_RX_RESET);
- is_ready = (stat & 0x4);
- timeout--;
- udelay(5);
- } while (!is_ready || (timeout==0));
- writel(0x2, adapter->regs + A_ESPI_RX_RESET);
- if (timeout==0)
- {
- CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
- t1_fatal_err(adapter);
+ if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) {
+ CH_ERR("%s: ESPI clock not ready\n", adapter->name);
+ return -1;
}
- /* 2 */
- tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
- tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
- tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
- for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
- for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
- for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
- for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
- for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
- for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
- for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80);
- for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
-
- /* 3 */
- writel(0x3, adapter->regs + A_ESPI_RX_RESET);
+ writel(F_ESPI_RX_CORE_RST, adapter->regs + A_ESPI_RX_RESET);
+
+ if (sme) {
+ tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
+ tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
+ tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
+ }
+ for (i = 1; i <= 8; i++)
+ tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
+ for (i = 1; i <= 2; i++)
+ tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
+ for (i = 1; i <= 3; i++)
+ tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
+ tricn_write(adapter, 0, 2, 4, TRICN_CNFG, 0xf1);
+ tricn_write(adapter, 0, 2, 5, TRICN_CNFG, 0xe1);
+ tricn_write(adapter, 0, 2, 6, TRICN_CNFG, 0xf1);
+ tricn_write(adapter, 0, 2, 7, TRICN_CNFG, 0x80);
+ tricn_write(adapter, 0, 2, 8, TRICN_CNFG, 0xf1);
+
+ writel(F_ESPI_RX_CORE_RST | F_ESPI_RX_LNK_RST,
+ adapter->regs + A_ESPI_RX_RESET);
return 0;
}
@@ -143,6 +133,7 @@ void t1_espi_intr_enable(struct peespi *espi)
void t1_espi_intr_clear(struct peespi *espi)
{
+ readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
}
@@ -157,7 +148,6 @@ void t1_espi_intr_disable(struct peespi *espi)
int t1_espi_intr_handler(struct peespi *espi)
{
- u32 cnt;
u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
if (status & F_DIP4ERR)
@@ -177,7 +167,7 @@ int t1_espi_intr_handler(struct peespi *espi)
* Must read the error count to clear the interrupt
* that it causes.
*/
- cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
+ readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
}
/*
@@ -192,7 +182,7 @@ int t1_espi_intr_handler(struct peespi *espi)
const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
{
- return &espi->intr_cnt;
+ return &espi->intr_cnt;
}
static void espi_setup_for_pm3393(adapter_t *adapter)
@@ -210,17 +200,45 @@ static void espi_setup_for_pm3393(adapter_t *adapter)
writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
}
-/* T2 Init part -- */
-/* 1. Set T_ESPI_MISCCTRL_ADDR */
-/* 2. Init ESPI registers. */
-/* 3. Init TriCN Hard Macro */
-int t1_espi_init(struct peespi *espi, int mac_type, int nports)
+static void espi_setup_for_vsc7321(adapter_t *adapter)
+{
+ writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
+ writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1);
+ writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
+ writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+ writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+ writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+ writel(V_RX_NPORTS(4) | V_TX_NPORTS(4), adapter->regs + A_PORT_CONFIG);
+
+ writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
+}
+
+/*
+ * Note that T1B requires at least 2 ports for IXF1010 due to a HW bug.
+ */
+static void espi_setup_for_ixf1010(adapter_t *adapter, int nports)
{
- u32 cnt;
+ writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+ if (nports == 4) {
+ if (is_T2(adapter)) {
+ writel(0xf00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+ writel(0x3c0, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+ } else {
+ writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+ writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+ }
+ } else {
+ writel(0x1fff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+ writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+ }
+ writel(V_RX_NPORTS(nports) | V_TX_NPORTS(nports), adapter->regs + A_PORT_CONFIG);
+}
+
+int t1_espi_init(struct peespi *espi, int mac_type, int nports)
+{
u32 status_enable_extra = 0;
adapter_t *adapter = espi->adapter;
- u32 status, burstval = 0x800100;
/* Disable ESPI training. MACs that can handle it enable it below. */
writel(0, adapter->regs + A_ESPI_TRAIN);
@@ -229,38 +247,20 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports)
writel(V_OUT_OF_SYNC_COUNT(4) |
V_DIP2_PARITY_ERR_THRES(3) |
V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
- if (nports == 4) {
- /* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
- burstval = 0x200040;
- }
- }
- writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
+ writel(nports == 4 ? 0x200040 : 0x1000080,
+ adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
+ } else
+ writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
- switch (mac_type) {
- case CHBT_MAC_PM3393:
+ if (mac_type == CHBT_MAC_PM3393)
espi_setup_for_pm3393(adapter);
- break;
- default:
+ else if (mac_type == CHBT_MAC_VSC7321)
+ espi_setup_for_vsc7321(adapter);
+ else if (mac_type == CHBT_MAC_IXF1010) {
+ status_enable_extra = F_INTEL1010MODE;
+ espi_setup_for_ixf1010(adapter, nports);
+ } else
return -1;
- }
-
- /*
- * Make sure any pending interrupts from the SPI are
- * Cleared before enabling the interrupt.
- */
- writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE);
- status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
- if (status & F_DIP2PARITYERR) {
- cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
- }
-
- /*
- * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
- * write the status as is.
- */
- if (status && t1_is_T1B(espi->adapter))
- status = 1;
- writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
writel(status_enable_extra | F_RXSTATUSENABLE,
adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
@@ -271,9 +271,11 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports)
* Always position the control at the 1st port egress IN
* (sop,eop) counter to reduce PIOs for T/N210 workaround.
*/
- espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL)
- & ~MON_MASK) | (F_MONITORED_DIRECTION
- | F_MONITORED_INTERFACE);
+ espi->misc_ctrl = readl(adapter->regs + A_ESPI_MISC_CONTROL);
+ espi->misc_ctrl &= ~MON_MASK;
+ espi->misc_ctrl |= F_MONITORED_DIRECTION;
+ if (adapter->params.nports == 1)
+ espi->misc_ctrl |= F_MONITORED_INTERFACE;
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
spin_lock_init(&espi->lock);
}
@@ -299,8 +301,7 @@ void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
{
struct peespi *espi = adapter->espi;
- if (!is_T2(adapter))
- return;
+ if (!is_T2(adapter)) return;
spin_lock(&espi->lock);
espi->misc_ctrl = (val & ~MON_MASK) |
(espi->misc_ctrl & MON_MASK);
@@ -310,27 +311,61 @@ void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
{
- u32 sel;
-
struct peespi *espi = adapter->espi;
+ u32 sel;
if (!is_T2(adapter))
return 0;
+
sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
if (!wait) {
if (!spin_trylock(&espi->lock))
return 0;
- }
- else
+ } else
spin_lock(&espi->lock);
+
if ((sel != (espi->misc_ctrl & MON_MASK))) {
writel(((espi->misc_ctrl & ~MON_MASK) | sel),
adapter->regs + A_ESPI_MISC_CONTROL);
sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
- }
- else
+ } else
sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
spin_unlock(&espi->lock);
return sel;
}
+
+/*
+ * This function is for T204 only.
+ * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in
+ * one shot, since there is no per port counter on the out side.
+ */
+int
+t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
+{
+ struct peespi *espi = adapter->espi;
+ u8 i, nport = (u8)adapter->params.nports;
+
+ if (!wait) {
+ if (!spin_trylock(&espi->lock))
+ return -1;
+ } else
+ spin_lock(&espi->lock);
+
+ if ( (espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION ) {
+ espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) |
+ F_MONITORED_DIRECTION;
+ writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+ }
+ for (i = 0 ; i < nport; i++, valp++) {
+ if (i) {
+ writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i),
+ adapter->regs + A_ESPI_MISC_CONTROL);
+ }
+ *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+ }
+
+ writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+ spin_unlock(&espi->lock);
+ return 0;
+}
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h
index c90e37f8457c..84f2c98bc4cc 100644
--- a/drivers/net/chelsio/espi.h
+++ b/drivers/net/chelsio/espi.h
@@ -64,5 +64,6 @@ const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
+int t1_espi_get_mon_t204(adapter_t *, u32 *, u8);
#endif /* _CXGB_ESPI_H_ */
diff --git a/drivers/net/chelsio/fpga_defs.h b/drivers/net/chelsio/fpga_defs.h
new file mode 100644
index 000000000000..17a3c2ba36a3
--- /dev/null
+++ b/drivers/net/chelsio/fpga_defs.h
@@ -0,0 +1,232 @@
+/* $Date: 2005/03/07 23:59:05 $ $RCSfile: fpga_defs.h,v $ $Revision: 1.4 $ */
+
+/*
+ * FPGA specific definitions
+ */
+
+#ifndef __CHELSIO_FPGA_DEFS_H__
+#define __CHELSIO_FPGA_DEFS_H__
+
+#define FPGA_PCIX_ADDR_VERSION 0xA08
+#define FPGA_PCIX_ADDR_STAT 0xA0C
+
+/* FPGA master interrupt Cause/Enable bits */
+#define FPGA_PCIX_INTERRUPT_SGE_ERROR 0x1
+#define FPGA_PCIX_INTERRUPT_SGE_DATA 0x2
+#define FPGA_PCIX_INTERRUPT_TP 0x4
+#define FPGA_PCIX_INTERRUPT_MC3 0x8
+#define FPGA_PCIX_INTERRUPT_GMAC 0x10
+#define FPGA_PCIX_INTERRUPT_PCIX 0x20
+
+/* TP interrupt register addresses */
+#define FPGA_TP_ADDR_INTERRUPT_ENABLE 0xA10
+#define FPGA_TP_ADDR_INTERRUPT_CAUSE 0xA14
+#define FPGA_TP_ADDR_VERSION 0xA18
+
+/* TP interrupt Cause/Enable bits */
+#define FPGA_TP_INTERRUPT_MC4 0x1
+#define FPGA_TP_INTERRUPT_MC5 0x2
+
+/*
+ * PM interrupt register addresses
+ */
+#define FPGA_MC3_REG_INTRENABLE 0xA20
+#define FPGA_MC3_REG_INTRCAUSE 0xA24
+#define FPGA_MC3_REG_VERSION 0xA28
+
+/*
+ * GMAC interrupt register addresses
+ */
+#define FPGA_GMAC_ADDR_INTERRUPT_ENABLE 0xA30
+#define FPGA_GMAC_ADDR_INTERRUPT_CAUSE 0xA34
+#define FPGA_GMAC_ADDR_VERSION 0xA38
+
+/* GMAC Cause/Enable bits */
+#define FPGA_GMAC_INTERRUPT_PORT0 0x1
+#define FPGA_GMAC_INTERRUPT_PORT1 0x2
+#define FPGA_GMAC_INTERRUPT_PORT2 0x4
+#define FPGA_GMAC_INTERRUPT_PORT3 0x8
+
+/* MI0 registers */
+#define A_MI0_CLK 0xb00
+
+#define S_MI0_CLK_DIV 0
+#define M_MI0_CLK_DIV 0xff
+#define V_MI0_CLK_DIV(x) ((x) << S_MI0_CLK_DIV)
+#define G_MI0_CLK_DIV(x) (((x) >> S_MI0_CLK_DIV) & M_MI0_CLK_DIV)
+
+#define S_MI0_CLK_CNT 8
+#define M_MI0_CLK_CNT 0xff
+#define V_MI0_CLK_CNT(x) ((x) << S_MI0_CLK_CNT)
+#define G_MI0_CLK_CNT(x) (((x) >> S_MI0_CLK_CNT) & M_MI0_CLK_CNT)
+
+#define A_MI0_CSR 0xb04
+
+#define S_MI0_CSR_POLL 0
+#define V_MI0_CSR_POLL(x) ((x) << S_MI0_CSR_POLL)
+#define F_MI0_CSR_POLL V_MI0_CSR_POLL(1U)
+
+#define S_MI0_PREAMBLE 1
+#define V_MI0_PREAMBLE(x) ((x) << S_MI0_PREAMBLE)
+#define F_MI0_PREAMBLE V_MI0_PREAMBLE(1U)
+
+#define S_MI0_INTR_ENABLE 2
+#define V_MI0_INTR_ENABLE(x) ((x) << S_MI0_INTR_ENABLE)
+#define F_MI0_INTR_ENABLE V_MI0_INTR_ENABLE(1U)
+
+#define S_MI0_BUSY 3
+#define V_MI0_BUSY(x) ((x) << S_MI0_BUSY)
+#define F_MI0_BUSY V_MI0_BUSY(1U)
+
+#define S_MI0_MDIO 4
+#define V_MI0_MDIO(x) ((x) << S_MI0_MDIO)
+#define F_MI0_MDIO V_MI0_MDIO(1U)
+
+#define A_MI0_ADDR 0xb08
+
+#define S_MI0_PHY_REG_ADDR 0
+#define M_MI0_PHY_REG_ADDR 0x1f
+#define V_MI0_PHY_REG_ADDR(x) ((x) << S_MI0_PHY_REG_ADDR)
+#define G_MI0_PHY_REG_ADDR(x) (((x) >> S_MI0_PHY_REG_ADDR) & M_MI0_PHY_REG_ADDR)
+
+#define S_MI0_PHY_ADDR 5
+#define M_MI0_PHY_ADDR 0x1f
+#define V_MI0_PHY_ADDR(x) ((x) << S_MI0_PHY_ADDR)
+#define G_MI0_PHY_ADDR(x) (((x) >> S_MI0_PHY_ADDR) & M_MI0_PHY_ADDR)
+
+#define A_MI0_DATA_EXT 0xb0c
+#define A_MI0_DATA_INT 0xb10
+
+/* GMAC registers */
+#define A_GMAC_MACID_LO 0x28
+#define A_GMAC_MACID_HI 0x2c
+#define A_GMAC_CSR 0x30
+
+#define S_INTERFACE 0
+#define M_INTERFACE 0x3
+#define V_INTERFACE(x) ((x) << S_INTERFACE)
+#define G_INTERFACE(x) (((x) >> S_INTERFACE) & M_INTERFACE)
+
+#define S_MAC_TX_ENABLE 2
+#define V_MAC_TX_ENABLE(x) ((x) << S_MAC_TX_ENABLE)
+#define F_MAC_TX_ENABLE V_MAC_TX_ENABLE(1U)
+
+#define S_MAC_RX_ENABLE 3
+#define V_MAC_RX_ENABLE(x) ((x) << S_MAC_RX_ENABLE)
+#define F_MAC_RX_ENABLE V_MAC_RX_ENABLE(1U)
+
+#define S_MAC_LB_ENABLE 4
+#define V_MAC_LB_ENABLE(x) ((x) << S_MAC_LB_ENABLE)
+#define F_MAC_LB_ENABLE V_MAC_LB_ENABLE(1U)
+
+#define S_MAC_SPEED 5
+#define M_MAC_SPEED 0x3
+#define V_MAC_SPEED(x) ((x) << S_MAC_SPEED)
+#define G_MAC_SPEED(x) (((x) >> S_MAC_SPEED) & M_MAC_SPEED)
+
+#define S_MAC_HD_FC_ENABLE 7
+#define V_MAC_HD_FC_ENABLE(x) ((x) << S_MAC_HD_FC_ENABLE)
+#define F_MAC_HD_FC_ENABLE V_MAC_HD_FC_ENABLE(1U)
+
+#define S_MAC_HALF_DUPLEX 8
+#define V_MAC_HALF_DUPLEX(x) ((x) << S_MAC_HALF_DUPLEX)
+#define F_MAC_HALF_DUPLEX V_MAC_HALF_DUPLEX(1U)
+
+#define S_MAC_PROMISC 9
+#define V_MAC_PROMISC(x) ((x) << S_MAC_PROMISC)
+#define F_MAC_PROMISC V_MAC_PROMISC(1U)
+
+#define S_MAC_MC_ENABLE 10
+#define V_MAC_MC_ENABLE(x) ((x) << S_MAC_MC_ENABLE)
+#define F_MAC_MC_ENABLE V_MAC_MC_ENABLE(1U)
+
+#define S_MAC_RESET 11
+#define V_MAC_RESET(x) ((x) << S_MAC_RESET)
+#define F_MAC_RESET V_MAC_RESET(1U)
+
+#define S_MAC_RX_PAUSE_ENABLE 12
+#define V_MAC_RX_PAUSE_ENABLE(x) ((x) << S_MAC_RX_PAUSE_ENABLE)
+#define F_MAC_RX_PAUSE_ENABLE V_MAC_RX_PAUSE_ENABLE(1U)
+
+#define S_MAC_TX_PAUSE_ENABLE 13
+#define V_MAC_TX_PAUSE_ENABLE(x) ((x) << S_MAC_TX_PAUSE_ENABLE)
+#define F_MAC_TX_PAUSE_ENABLE V_MAC_TX_PAUSE_ENABLE(1U)
+
+#define S_MAC_LWM_ENABLE 14
+#define V_MAC_LWM_ENABLE(x) ((x) << S_MAC_LWM_ENABLE)
+#define F_MAC_LWM_ENABLE V_MAC_LWM_ENABLE(1U)
+
+#define S_MAC_MAGIC_PKT_ENABLE 15
+#define V_MAC_MAGIC_PKT_ENABLE(x) ((x) << S_MAC_MAGIC_PKT_ENABLE)
+#define F_MAC_MAGIC_PKT_ENABLE V_MAC_MAGIC_PKT_ENABLE(1U)
+
+#define S_MAC_ISL_ENABLE 16
+#define V_MAC_ISL_ENABLE(x) ((x) << S_MAC_ISL_ENABLE)
+#define F_MAC_ISL_ENABLE V_MAC_ISL_ENABLE(1U)
+
+#define S_MAC_JUMBO_ENABLE 17
+#define V_MAC_JUMBO_ENABLE(x) ((x) << S_MAC_JUMBO_ENABLE)
+#define F_MAC_JUMBO_ENABLE V_MAC_JUMBO_ENABLE(1U)
+
+#define S_MAC_RX_PAD_ENABLE 18
+#define V_MAC_RX_PAD_ENABLE(x) ((x) << S_MAC_RX_PAD_ENABLE)
+#define F_MAC_RX_PAD_ENABLE V_MAC_RX_PAD_ENABLE(1U)
+
+#define S_MAC_RX_CRC_ENABLE 19
+#define V_MAC_RX_CRC_ENABLE(x) ((x) << S_MAC_RX_CRC_ENABLE)
+#define F_MAC_RX_CRC_ENABLE V_MAC_RX_CRC_ENABLE(1U)
+
+#define A_GMAC_IFS 0x34
+
+#define S_MAC_IFS2 0
+#define M_MAC_IFS2 0x3f
+#define V_MAC_IFS2(x) ((x) << S_MAC_IFS2)
+#define G_MAC_IFS2(x) (((x) >> S_MAC_IFS2) & M_MAC_IFS2)
+
+#define S_MAC_IFS1 8
+#define M_MAC_IFS1 0x7f
+#define V_MAC_IFS1(x) ((x) << S_MAC_IFS1)
+#define G_MAC_IFS1(x) (((x) >> S_MAC_IFS1) & M_MAC_IFS1)
+
+#define A_GMAC_JUMBO_FRAME_LEN 0x38
+#define A_GMAC_LNK_DLY 0x3c
+#define A_GMAC_PAUSETIME 0x40
+#define A_GMAC_MCAST_LO 0x44
+#define A_GMAC_MCAST_HI 0x48
+#define A_GMAC_MCAST_MASK_LO 0x4c
+#define A_GMAC_MCAST_MASK_HI 0x50
+#define A_GMAC_RMT_CNT 0x54
+#define A_GMAC_RMT_DATA 0x58
+#define A_GMAC_BACKOFF_SEED 0x5c
+#define A_GMAC_TXF_THRES 0x60
+
+#define S_TXF_READ_THRESHOLD 0
+#define M_TXF_READ_THRESHOLD 0xff
+#define V_TXF_READ_THRESHOLD(x) ((x) << S_TXF_READ_THRESHOLD)
+#define G_TXF_READ_THRESHOLD(x) (((x) >> S_TXF_READ_THRESHOLD) & M_TXF_READ_THRESHOLD)
+
+#define S_TXF_WRITE_THRESHOLD 16
+#define M_TXF_WRITE_THRESHOLD 0xff
+#define V_TXF_WRITE_THRESHOLD(x) ((x) << S_TXF_WRITE_THRESHOLD)
+#define G_TXF_WRITE_THRESHOLD(x) (((x) >> S_TXF_WRITE_THRESHOLD) & M_TXF_WRITE_THRESHOLD)
+
+#define MAC_REG_BASE 0x600
+#define MAC_REG_ADDR(idx, reg) (MAC_REG_BASE + (idx) * 128 + (reg))
+
+#define MAC_REG_IDLO(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_LO)
+#define MAC_REG_IDHI(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_HI)
+#define MAC_REG_CSR(idx) MAC_REG_ADDR(idx, A_GMAC_CSR)
+#define MAC_REG_IFS(idx) MAC_REG_ADDR(idx, A_GMAC_IFS)
+#define MAC_REG_LARGEFRAMELENGTH(idx) MAC_REG_ADDR(idx, A_GMAC_JUMBO_FRAME_LEN)
+#define MAC_REG_LINKDLY(idx) MAC_REG_ADDR(idx, A_GMAC_LNK_DLY)
+#define MAC_REG_PAUSETIME(idx) MAC_REG_ADDR(idx, A_GMAC_PAUSETIME)
+#define MAC_REG_CASTLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_LO)
+#define MAC_REG_MCASTHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_HI)
+#define MAC_REG_CASTMASKLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_LO)
+#define MAC_REG_MCASTMASKHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_HI)
+#define MAC_REG_RMCNT(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_CNT)
+#define MAC_REG_RMDATA(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_DATA)
+#define MAC_REG_GMRANDBACKOFFSEED(idx) MAC_REG_ADDR(idx, A_GMAC_BACKOFF_SEED)
+#define MAC_REG_TXFTHRESHOLDS(idx) MAC_REG_ADDR(idx, A_GMAC_TXF_THRES)
+
+#endif
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
index 746b0eeea964..a2b8ad9b5535 100644
--- a/drivers/net/chelsio/gmac.h
+++ b/drivers/net/chelsio/gmac.h
@@ -62,6 +62,8 @@ struct cmac_statistics {
u64 TxInternalMACXmitError;
u64 TxFramesWithExcessiveDeferral;
u64 TxFCSErrors;
+ u64 TxJumboFramesOK;
+ u64 TxJumboOctetsOK;
/* Receive */
u64 RxOctetsOK;
@@ -81,6 +83,8 @@ struct cmac_statistics {
u64 RxInRangeLengthErrors;
u64 RxOutOfRangeLengthField;
u64 RxFrameTooLongErrors;
+ u64 RxJumboFramesOK;
+ u64 RxJumboOctetsOK;
};
struct cmac_ops {
@@ -128,6 +132,7 @@ struct gmac {
extern struct gmac t1_pm3393_ops;
extern struct gmac t1_chelsio_mac_ops;
extern struct gmac t1_vsc7321_ops;
+extern struct gmac t1_vsc7326_ops;
extern struct gmac t1_ixf1010_ops;
extern struct gmac t1_dummy_mac_ops;
diff --git a/drivers/net/chelsio/ixf1010.c b/drivers/net/chelsio/ixf1010.c
new file mode 100644
index 000000000000..5b8f144e83d4
--- /dev/null
+++ b/drivers/net/chelsio/ixf1010.c
@@ -0,0 +1,485 @@
+/* $Date: 2005/11/12 02:13:49 $ $RCSfile: ixf1010.c,v $ $Revision: 1.36 $ */
+#include "gmac.h"
+#include "elmer0.h"
+
+/* Update fast changing statistics every 15 seconds */
+#define STATS_TICK_SECS 15
+/* 30 minutes for full statistics update */
+#define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS)
+
+/*
+ * The IXF1010 can handle frames up to 16383 bytes but it's optimized for
+ * frames up to 9831 (0x2667) bytes, so we limit jumbo frame size to this.
+ * This length includes ethernet header and FCS.
+ */
+#define MAX_FRAME_SIZE 0x2667
+
+/* MAC registers */
+enum {
+ /* Per-port registers */
+ REG_MACADDR_LOW = 0,
+ REG_MACADDR_HIGH = 0x4,
+ REG_FDFC_TYPE = 0xC,
+ REG_FC_TX_TIMER_VALUE = 0x1c,
+ REG_IPG_RX_TIME1 = 0x28,
+ REG_IPG_RX_TIME2 = 0x2c,
+ REG_IPG_TX_TIME = 0x30,
+ REG_PAUSE_THRES = 0x38,
+ REG_MAX_FRAME_SIZE = 0x3c,
+ REG_RGMII_SPEED = 0x40,
+ REG_FC_ENABLE = 0x48,
+ REG_DISCARD_CTRL_FRAMES = 0x54,
+ REG_DIVERSE_CONFIG = 0x60,
+ REG_RX_FILTER = 0x64,
+ REG_MC_ADDR_LOW = 0x68,
+ REG_MC_ADDR_HIGH = 0x6c,
+
+ REG_RX_OCTETS_OK = 0x80,
+ REG_RX_OCTETS_BAD = 0x84,
+ REG_RX_UC_PKTS = 0x88,
+ REG_RX_MC_PKTS = 0x8c,
+ REG_RX_BC_PKTS = 0x90,
+ REG_RX_FCS_ERR = 0xb0,
+ REG_RX_TAGGED = 0xb4,
+ REG_RX_DATA_ERR = 0xb8,
+ REG_RX_ALIGN_ERR = 0xbc,
+ REG_RX_LONG_ERR = 0xc0,
+ REG_RX_JABBER_ERR = 0xc4,
+ REG_RX_PAUSE_FRAMES = 0xc8,
+ REG_RX_UNKNOWN_CTRL_FRAMES = 0xcc,
+ REG_RX_VERY_LONG_ERR = 0xd0,
+ REG_RX_RUNT_ERR = 0xd4,
+ REG_RX_SHORT_ERR = 0xd8,
+ REG_RX_SYMBOL_ERR = 0xe4,
+
+ REG_TX_OCTETS_OK = 0x100,
+ REG_TX_OCTETS_BAD = 0x104,
+ REG_TX_UC_PKTS = 0x108,
+ REG_TX_MC_PKTS = 0x10c,
+ REG_TX_BC_PKTS = 0x110,
+ REG_TX_EXCESSIVE_LEN_DROP = 0x14c,
+ REG_TX_UNDERRUN = 0x150,
+ REG_TX_TAGGED = 0x154,
+ REG_TX_PAUSE_FRAMES = 0x15C,
+
+ /* Global registers */
+ REG_PORT_ENABLE = 0x1400,
+
+ REG_JTAG_ID = 0x1430,
+
+ RX_FIFO_HIGH_WATERMARK_BASE = 0x1600,
+ RX_FIFO_LOW_WATERMARK_BASE = 0x1628,
+ RX_FIFO_FRAMES_REMOVED_BASE = 0x1650,
+
+ REG_RX_ERR_DROP = 0x167c,
+ REG_RX_FIFO_OVERFLOW_EVENT = 0x1680,
+
+ TX_FIFO_HIGH_WATERMARK_BASE = 0x1800,
+ TX_FIFO_LOW_WATERMARK_BASE = 0x1828,
+ TX_FIFO_XFER_THRES_BASE = 0x1850,
+
+ REG_TX_FIFO_OVERFLOW_EVENT = 0x1878,
+ REG_TX_FIFO_OOS_EVENT = 0x1884,
+
+ TX_FIFO_FRAMES_REMOVED_BASE = 0x1888,
+
+ REG_SPI_RX_BURST = 0x1c00,
+ REG_SPI_RX_TRAINING = 0x1c04,
+ REG_SPI_RX_CALENDAR = 0x1c08,
+ REG_SPI_TX_SYNC = 0x1c0c
+};
+
+enum { /* RMON registers */
+ REG_RxOctetsTotalOK = 0x80,
+ REG_RxOctetsBad = 0x84,
+ REG_RxUCPkts = 0x88,
+ REG_RxMCPkts = 0x8c,
+ REG_RxBCPkts = 0x90,
+ REG_RxJumboPkts = 0xac,
+ REG_RxFCSErrors = 0xb0,
+ REG_RxDataErrors = 0xb8,
+ REG_RxAlignErrors = 0xbc,
+ REG_RxLongErrors = 0xc0,
+ REG_RxJabberErrors = 0xc4,
+ REG_RxPauseMacControlCounter = 0xc8,
+ REG_RxVeryLongErrors = 0xd0,
+ REG_RxRuntErrors = 0xd4,
+ REG_RxShortErrors = 0xd8,
+ REG_RxSequenceErrors = 0xe0,
+ REG_RxSymbolErrors = 0xe4,
+
+ REG_TxOctetsTotalOK = 0x100,
+ REG_TxOctetsBad = 0x104,
+ REG_TxUCPkts = 0x108,
+ REG_TxMCPkts = 0x10c,
+ REG_TxBCPkts = 0x110,
+ REG_TxJumboPkts = 0x12C,
+ REG_TxTotalCollisions = 0x134,
+ REG_TxExcessiveLengthDrop = 0x14c,
+ REG_TxUnderrun = 0x150,
+ REG_TxCRCErrors = 0x158,
+ REG_TxPauseFrames = 0x15c
+};
+
+enum {
+ DIVERSE_CONFIG_PAD_ENABLE = 0x80,
+ DIVERSE_CONFIG_CRC_ADD = 0x40
+};
+
+#define MACREG_BASE 0
+#define MACREG(mac, mac_reg) ((mac)->instance->mac_base + (mac_reg))
+
+struct _cmac_instance {
+ u32 mac_base;
+ u32 index;
+ u32 version;
+ u32 ticks;
+};
+
+static void disable_port(struct cmac *mac)
+{
+ u32 val;
+
+ t1_tpi_read(mac->adapter, REG_PORT_ENABLE, &val);
+ val &= ~(1 << mac->instance->index);
+ t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val);
+}
+
+#define RMON_UPDATE(mac, name, stat_name) \
+ t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \
+ (mac)->stats.stat_name += val;
+
+/*
+ * Read the current values of the RMON counters and add them to the cumulative
+ * port statistics. The HW RMON counters are cleared by this operation.
+ */
+static void port_stats_update(struct cmac *mac)
+{
+ u32 val;
+
+ /* Rx stats */
+ RMON_UPDATE(mac, RxOctetsTotalOK, RxOctetsOK);
+ RMON_UPDATE(mac, RxOctetsBad, RxOctetsBad);
+ RMON_UPDATE(mac, RxUCPkts, RxUnicastFramesOK);
+ RMON_UPDATE(mac, RxMCPkts, RxMulticastFramesOK);
+ RMON_UPDATE(mac, RxBCPkts, RxBroadcastFramesOK);
+ RMON_UPDATE(mac, RxJumboPkts, RxJumboFramesOK);
+ RMON_UPDATE(mac, RxFCSErrors, RxFCSErrors);
+ RMON_UPDATE(mac, RxAlignErrors, RxAlignErrors);
+ RMON_UPDATE(mac, RxLongErrors, RxFrameTooLongErrors);
+ RMON_UPDATE(mac, RxVeryLongErrors, RxFrameTooLongErrors);
+ RMON_UPDATE(mac, RxPauseMacControlCounter, RxPauseFrames);
+ RMON_UPDATE(mac, RxDataErrors, RxDataErrors);
+ RMON_UPDATE(mac, RxJabberErrors, RxJabberErrors);
+ RMON_UPDATE(mac, RxRuntErrors, RxRuntErrors);
+ RMON_UPDATE(mac, RxShortErrors, RxRuntErrors);
+ RMON_UPDATE(mac, RxSequenceErrors, RxSequenceErrors);
+ RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
+
+ /* Tx stats (skip collision stats as we are full-duplex only) */
+ RMON_UPDATE(mac, TxOctetsTotalOK, TxOctetsOK);
+ RMON_UPDATE(mac, TxOctetsBad, TxOctetsBad);
+ RMON_UPDATE(mac, TxUCPkts, TxUnicastFramesOK);
+ RMON_UPDATE(mac, TxMCPkts, TxMulticastFramesOK);
+ RMON_UPDATE(mac, TxBCPkts, TxBroadcastFramesOK);
+ RMON_UPDATE(mac, TxJumboPkts, TxJumboFramesOK);
+ RMON_UPDATE(mac, TxPauseFrames, TxPauseFrames);
+ RMON_UPDATE(mac, TxExcessiveLengthDrop, TxLengthErrors);
+ RMON_UPDATE(mac, TxUnderrun, TxUnderrun);
+ RMON_UPDATE(mac, TxCRCErrors, TxFCSErrors);
+}
+
+/* No-op interrupt operation as this MAC does not support interrupts */
+static int mac_intr_op(struct cmac *mac)
+{
+ return 0;
+}
+
+/* Expect MAC address to be in network byte order. */
+static int mac_set_address(struct cmac *mac, u8 addr[6])
+{
+ u32 addr_lo, addr_hi;
+
+ addr_lo = addr[2];
+ addr_lo = (addr_lo << 8) | addr[3];
+ addr_lo = (addr_lo << 8) | addr[4];
+ addr_lo = (addr_lo << 8) | addr[5];
+
+ addr_hi = addr[0];
+ addr_hi = (addr_hi << 8) | addr[1];
+
+ t1_tpi_write(mac->adapter, MACREG(mac, REG_MACADDR_LOW), addr_lo);
+ t1_tpi_write(mac->adapter, MACREG(mac, REG_MACADDR_HIGH), addr_hi);
+ return 0;
+}
+
+static int mac_get_address(struct cmac *mac, u8 addr[6])
+{
+ u32 addr_lo, addr_hi;
+
+ t1_tpi_read(mac->adapter, MACREG(mac, REG_MACADDR_LOW), &addr_lo);
+ t1_tpi_read(mac->adapter, MACREG(mac, REG_MACADDR_HIGH), &addr_hi);
+
+ addr[0] = (u8) (addr_hi >> 8);
+ addr[1] = (u8) addr_hi;
+ addr[2] = (u8) (addr_lo >> 24);
+ addr[3] = (u8) (addr_lo >> 16);
+ addr[4] = (u8) (addr_lo >> 8);
+ addr[5] = (u8) addr_lo;
+ return 0;
+}
+
+/* This is intended to reset a port, not the whole MAC */
+static int mac_reset(struct cmac *mac)
+{
+ return 0;
+}
+
+static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
+{
+ u32 val, new_mode;
+ adapter_t *adapter = mac->adapter;
+ u32 addr_lo, addr_hi;
+ u8 *addr;
+
+ t1_tpi_read(adapter, MACREG(mac, REG_RX_FILTER), &val);
+ new_mode = val & ~7;
+ if (!t1_rx_mode_promisc(rm) && mac->instance->version > 0)
+ new_mode |= 1; /* only set if version > 0 due to erratum */
+ if (!t1_rx_mode_promisc(rm) && !t1_rx_mode_allmulti(rm)
+ && t1_rx_mode_mc_cnt(rm) <= 1)
+ new_mode |= 2;
+ if (new_mode != val)
+ t1_tpi_write(adapter, MACREG(mac, REG_RX_FILTER), new_mode);
+ switch (t1_rx_mode_mc_cnt(rm)) {
+ case 0:
+ t1_tpi_write(adapter, MACREG(mac, REG_MC_ADDR_LOW), 0);
+ t1_tpi_write(adapter, MACREG(mac, REG_MC_ADDR_HIGH), 0);
+ break;
+ case 1:
+ addr = t1_get_next_mcaddr(rm);
+ addr_lo = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
+ addr[5];
+ addr_hi = (addr[0] << 8) | addr[1];
+ t1_tpi_write(adapter, MACREG(mac, REG_MC_ADDR_LOW), addr_lo);
+ t1_tpi_write(adapter, MACREG(mac, REG_MC_ADDR_HIGH), addr_hi);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int mac_set_mtu(struct cmac *mac, int mtu)
+{
+ /* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */
+ if (mtu > (MAX_FRAME_SIZE - 14 - 4)) return -EINVAL;
+ t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE),
+ mtu + 14 + 4);
+ return 0;
+}
+
+static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex,
+ int fc)
+{
+ u32 val;
+
+ if (speed >= 0 && speed != SPEED_100 && speed != SPEED_1000)
+ return -1;
+ if (duplex >= 0 && duplex != DUPLEX_FULL)
+ return -1;
+
+ if (speed >= 0) {
+ val = speed == SPEED_100 ? 1 : 2;
+ t1_tpi_write(mac->adapter, MACREG(mac, REG_RGMII_SPEED), val);
+ }
+
+ t1_tpi_read(mac->adapter, MACREG(mac, REG_FC_ENABLE), &val);
+ val &= ~3;
+ if (fc & PAUSE_RX)
+ val |= 1;
+ if (fc & PAUSE_TX)
+ val |= 2;
+ t1_tpi_write(mac->adapter, MACREG(mac, REG_FC_ENABLE), val);
+ return 0;
+}
+
+static int mac_get_speed_duplex_fc(struct cmac *mac, int *speed, int *duplex,
+ int *fc)
+{
+ u32 val;
+
+ if (duplex)
+ *duplex = DUPLEX_FULL;
+ if (speed) {
+ t1_tpi_read(mac->adapter, MACREG(mac, REG_RGMII_SPEED),
+ &val);
+ *speed = (val & 2) ? SPEED_1000 : SPEED_100;
+ }
+ if (fc) {
+ t1_tpi_read(mac->adapter, MACREG(mac, REG_FC_ENABLE), &val);
+ *fc = 0;
+ if (val & 1)
+ *fc |= PAUSE_RX;
+ if (val & 2)
+ *fc |= PAUSE_TX;
+ }
+ return 0;
+}
+
+static void enable_port(struct cmac *mac)
+{
+ u32 val;
+ u32 index = mac->instance->index;
+ adapter_t *adapter = mac->adapter;
+
+ t1_tpi_read(adapter, MACREG(mac, REG_DIVERSE_CONFIG), &val);
+ val |= DIVERSE_CONFIG_CRC_ADD | DIVERSE_CONFIG_PAD_ENABLE;
+ t1_tpi_write(adapter, MACREG(mac, REG_DIVERSE_CONFIG), val);
+ if (mac->instance->version > 0)
+ t1_tpi_write(adapter, MACREG(mac, REG_RX_FILTER), 3);
+ else /* Don't enable unicast address filtering due to IXF1010 bug */
+ t1_tpi_write(adapter, MACREG(mac, REG_RX_FILTER), 2);
+
+ t1_tpi_read(adapter, REG_RX_ERR_DROP, &val);
+ val |= (1 << index);
+ t1_tpi_write(adapter, REG_RX_ERR_DROP, val);
+
+ /*
+ * Clear the port RMON registers by adding their current values to the
+ * cumulatice port stats and then clearing the stats. Really.
+ */
+ port_stats_update(mac);
+ memset(&mac->stats, 0, sizeof(struct cmac_statistics));
+ mac->instance->ticks = 0;
+
+ t1_tpi_read(adapter, REG_PORT_ENABLE, &val);
+ val |= (1 << index);
+ t1_tpi_write(adapter, REG_PORT_ENABLE, val);
+
+ index <<= 2;
+ if (is_T2(adapter)) {
+ /* T204: set the Fifo water level & threshold */
+ t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740);
+ t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730);
+ t1_tpi_write(adapter, TX_FIFO_HIGH_WATERMARK_BASE + index, 0x600);
+ t1_tpi_write(adapter, TX_FIFO_LOW_WATERMARK_BASE + index, 0x1d0);
+ t1_tpi_write(adapter, TX_FIFO_XFER_THRES_BASE + index, 0x1100);
+ } else {
+ /*
+ * Set the TX Fifo Threshold to 0x400 instead of 0x100 to work around
+ * Underrun problem. Intel has blessed this solution.
+ */
+ t1_tpi_write(adapter, TX_FIFO_XFER_THRES_BASE + index, 0x400);
+ }
+}
+
+/* IXF1010 ports do not have separate enables for TX and RX */
+static int mac_enable(struct cmac *mac, int which)
+{
+ if (which & (MAC_DIRECTION_RX | MAC_DIRECTION_TX))
+ enable_port(mac);
+ return 0;
+}
+
+static int mac_disable(struct cmac *mac, int which)
+{
+ if (which & (MAC_DIRECTION_RX | MAC_DIRECTION_TX))
+ disable_port(mac);
+ return 0;
+}
+
+/*
+ * This function is called periodically to accumulate the current values of the
+ * RMON counters into the port statistics. Since the counters are only 32 bits
+ * some of them can overflow in less than a minute at GigE speeds, so this
+ * function should be called every 30 seconds or so.
+ *
+ * To cut down on reading costs we update only the octet counters at each tick
+ * and do a full update at major ticks, which can be every 30 minutes or more.
+ */
+static const struct cmac_statistics *mac_update_statistics(struct cmac *mac,
+ int flag)
+{
+ if (flag == MAC_STATS_UPDATE_FULL ||
+ MAJOR_UPDATE_TICKS <= mac->instance->ticks) {
+ port_stats_update(mac);
+ mac->instance->ticks = 0;
+ } else {
+ u32 val;
+
+ RMON_UPDATE(mac, RxOctetsTotalOK, RxOctetsOK);
+ RMON_UPDATE(mac, TxOctetsTotalOK, TxOctetsOK);
+ mac->instance->ticks++;
+ }
+ return &mac->stats;
+}
+
+static void mac_destroy(struct cmac *mac)
+{
+ kfree(mac);
+}
+
+static struct cmac_ops ixf1010_ops = {
+ .destroy = mac_destroy,
+ .reset = mac_reset,
+ .interrupt_enable = mac_intr_op,
+ .interrupt_disable = mac_intr_op,
+ .interrupt_clear = mac_intr_op,
+ .enable = mac_enable,
+ .disable = mac_disable,
+ .set_mtu = mac_set_mtu,
+ .set_rx_mode = mac_set_rx_mode,
+ .set_speed_duplex_fc = mac_set_speed_duplex_fc,
+ .get_speed_duplex_fc = mac_get_speed_duplex_fc,
+ .statistics_update = mac_update_statistics,
+ .macaddress_get = mac_get_address,
+ .macaddress_set = mac_set_address,
+};
+
+static int ixf1010_mac_reset(adapter_t *adapter)
+{
+ u32 val;
+
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ if ((val & 1) != 0) {
+ val &= ~1;
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(2);
+ }
+ val |= 1;
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(2);
+
+ t1_tpi_write(adapter, REG_PORT_ENABLE, 0);
+ return 0;
+}
+
+static struct cmac *ixf1010_mac_create(adapter_t *adapter, int index)
+{
+ struct cmac *mac;
+ u32 val;
+
+ if (index > 9) return NULL;
+
+ mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
+ if (!mac) return NULL;
+
+ mac->ops = &ixf1010_ops;
+ mac->instance = (cmac_instance *)(mac + 1);
+
+ mac->instance->mac_base = MACREG_BASE + (index * 0x200);
+ mac->instance->index = index;
+ mac->adapter = adapter;
+ mac->instance->ticks = 0;
+
+ t1_tpi_read(adapter, REG_JTAG_ID, &val);
+ mac->instance->version = val >> 28;
+ return mac;
+}
+
+struct gmac t1_ixf1010_ops = {
+ STATS_TICK_SECS,
+ ixf1010_mac_create,
+ ixf1010_mac_reset
+};
diff --git a/drivers/net/chelsio/mac.c b/drivers/net/chelsio/mac.c
new file mode 100644
index 000000000000..6af39dc70459
--- /dev/null
+++ b/drivers/net/chelsio/mac.c
@@ -0,0 +1,368 @@
+/* $Date: 2005/10/22 00:42:59 $ $RCSfile: mac.c,v $ $Revision: 1.32 $ */
+#include "gmac.h"
+#include "regs.h"
+#include "fpga_defs.h"
+
+#define MAC_CSR_INTERFACE_GMII 0x0
+#define MAC_CSR_INTERFACE_TBI 0x1
+#define MAC_CSR_INTERFACE_MII 0x2
+#define MAC_CSR_INTERFACE_RMII 0x3
+
+/* Chelsio's MAC statistics. */
+struct mac_statistics {
+
+ /* Transmit */
+ u32 TxFramesTransmittedOK;
+ u32 TxReserved1;
+ u32 TxReserved2;
+ u32 TxOctetsTransmittedOK;
+ u32 TxFramesWithDeferredXmissions;
+ u32 TxLateCollisions;
+ u32 TxFramesAbortedDueToXSCollisions;
+ u32 TxFramesLostDueToIntMACXmitError;
+ u32 TxReserved3;
+ u32 TxMulticastFrameXmittedOK;
+ u32 TxBroadcastFramesXmittedOK;
+ u32 TxFramesWithExcessiveDeferral;
+ u32 TxPAUSEMACCtrlFramesTransmitted;
+
+ /* Receive */
+ u32 RxFramesReceivedOK;
+ u32 RxFrameCheckSequenceErrors;
+ u32 RxAlignmentErrors;
+ u32 RxOctetsReceivedOK;
+ u32 RxFramesLostDueToIntMACRcvError;
+ u32 RxMulticastFramesReceivedOK;
+ u32 RxBroadcastFramesReceivedOK;
+ u32 RxInRangeLengthErrors;
+ u32 RxTxOutOfRangeLengthField;
+ u32 RxFrameTooLongErrors;
+ u32 RxPAUSEMACCtrlFramesReceived;
+};
+
+static int static_aPorts[] = {
+ FPGA_GMAC_INTERRUPT_PORT0,
+ FPGA_GMAC_INTERRUPT_PORT1,
+ FPGA_GMAC_INTERRUPT_PORT2,
+ FPGA_GMAC_INTERRUPT_PORT3
+};
+
+struct _cmac_instance {
+ u32 index;
+};
+
+static int mac_intr_enable(struct cmac *mac)
+{
+ u32 mac_intr;
+
+ if (t1_is_asic(mac->adapter)) {
+ /* ASIC */
+
+ /* We don't use the on chip MAC for ASIC products. */
+ } else {
+ /* FPGA */
+
+ /* Set parent gmac interrupt. */
+ mac_intr = readl(mac->adapter->regs + A_PL_ENABLE);
+ mac_intr |= FPGA_PCIX_INTERRUPT_GMAC;
+ writel(mac_intr, mac->adapter->regs + A_PL_ENABLE);
+
+ mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE);
+ mac_intr |= static_aPorts[mac->instance->index];
+ writel(mac_intr,
+ mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE);
+ }
+
+ return 0;
+}
+
+static int mac_intr_disable(struct cmac *mac)
+{
+ u32 mac_intr;
+
+ if (t1_is_asic(mac->adapter)) {
+ /* ASIC */
+
+ /* We don't use the on chip MAC for ASIC products. */
+ } else {
+ /* FPGA */
+
+ /* Set parent gmac interrupt. */
+ mac_intr = readl(mac->adapter->regs + A_PL_ENABLE);
+ mac_intr &= ~FPGA_PCIX_INTERRUPT_GMAC;
+ writel(mac_intr, mac->adapter->regs + A_PL_ENABLE);
+
+ mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE);
+ mac_intr &= ~(static_aPorts[mac->instance->index]);
+ writel(mac_intr,
+ mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE);
+ }
+
+ return 0;
+}
+
+static int mac_intr_clear(struct cmac *mac)
+{
+ u32 mac_intr;
+
+ if (t1_is_asic(mac->adapter)) {
+ /* ASIC */
+
+ /* We don't use the on chip MAC for ASIC products. */
+ } else {
+ /* FPGA */
+
+ /* Set parent gmac interrupt. */
+ writel(FPGA_PCIX_INTERRUPT_GMAC,
+ mac->adapter->regs + A_PL_CAUSE);
+ mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE);
+ mac_intr |= (static_aPorts[mac->instance->index]);
+ writel(mac_intr,
+ mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE);
+ }
+
+ return 0;
+}
+
+static int mac_get_address(struct cmac *mac, u8 addr[6])
+{
+ u32 data32_lo, data32_hi;
+
+ data32_lo = readl(mac->adapter->regs
+ + MAC_REG_IDLO(mac->instance->index));
+ data32_hi = readl(mac->adapter->regs
+ + MAC_REG_IDHI(mac->instance->index));
+
+ addr[0] = (u8) ((data32_hi >> 8) & 0xFF);
+ addr[1] = (u8) ((data32_hi) & 0xFF);
+ addr[2] = (u8) ((data32_lo >> 24) & 0xFF);
+ addr[3] = (u8) ((data32_lo >> 16) & 0xFF);
+ addr[4] = (u8) ((data32_lo >> 8) & 0xFF);
+ addr[5] = (u8) ((data32_lo) & 0xFF);
+ return 0;
+}
+
+static int mac_reset(struct cmac *mac)
+{
+ u32 data32;
+ int mac_in_reset, time_out = 100;
+ int idx = mac->instance->index;
+
+ data32 = readl(mac->adapter->regs + MAC_REG_CSR(idx));
+ writel(data32 | F_MAC_RESET,
+ mac->adapter->regs + MAC_REG_CSR(idx));
+
+ do {
+ data32 = readl(mac->adapter->regs + MAC_REG_CSR(idx));
+
+ mac_in_reset = data32 & F_MAC_RESET;
+ if (mac_in_reset)
+ udelay(1);
+ } while (mac_in_reset && --time_out);
+
+ if (mac_in_reset) {
+ CH_ERR("%s: MAC %d reset timed out\n",
+ mac->adapter->name, idx);
+ return 2;
+ }
+
+ return 0;
+}
+
+static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
+{
+ u32 val;
+
+ val = readl(mac->adapter->regs
+ + MAC_REG_CSR(mac->instance->index));
+ val &= ~(F_MAC_PROMISC | F_MAC_MC_ENABLE);
+ val |= V_MAC_PROMISC(t1_rx_mode_promisc(rm) != 0);
+ val |= V_MAC_MC_ENABLE(t1_rx_mode_allmulti(rm) != 0);
+ writel(val,
+ mac->adapter->regs + MAC_REG_CSR(mac->instance->index));
+
+ return 0;
+}
+
+static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex,
+ int fc)
+{
+ u32 data32;
+
+ data32 = readl(mac->adapter->regs
+ + MAC_REG_CSR(mac->instance->index));
+ data32 &= ~(F_MAC_HALF_DUPLEX | V_MAC_SPEED(M_MAC_SPEED) |
+ V_INTERFACE(M_INTERFACE) | F_MAC_TX_PAUSE_ENABLE |
+ F_MAC_RX_PAUSE_ENABLE);
+
+ switch (speed) {
+ case SPEED_10:
+ case SPEED_100:
+ data32 |= V_INTERFACE(MAC_CSR_INTERFACE_MII);
+ data32 |= V_MAC_SPEED(speed == SPEED_10 ? 0 : 1);
+ break;
+ case SPEED_1000:
+ data32 |= V_INTERFACE(MAC_CSR_INTERFACE_GMII);
+ data32 |= V_MAC_SPEED(2);
+ break;
+ }
+
+ if (duplex >= 0)
+ data32 |= V_MAC_HALF_DUPLEX(duplex == DUPLEX_HALF);
+
+ if (fc >= 0) {
+ data32 |= V_MAC_RX_PAUSE_ENABLE((fc & PAUSE_RX) != 0);
+ data32 |= V_MAC_TX_PAUSE_ENABLE((fc & PAUSE_TX) != 0);
+ }
+
+ writel(data32,
+ mac->adapter->regs + MAC_REG_CSR(mac->instance->index));
+ return 0;
+}
+
+static int mac_enable(struct cmac *mac, int which)
+{
+ u32 val;
+
+ val = readl(mac->adapter->regs
+ + MAC_REG_CSR(mac->instance->index));
+ if (which & MAC_DIRECTION_RX)
+ val |= F_MAC_RX_ENABLE;
+ if (which & MAC_DIRECTION_TX)
+ val |= F_MAC_TX_ENABLE;
+ writel(val,
+ mac->adapter->regs + MAC_REG_CSR(mac->instance->index));
+ return 0;
+}
+
+static int mac_disable(struct cmac *mac, int which)
+{
+ u32 val;
+
+ val = readl(mac->adapter->regs
+ + MAC_REG_CSR(mac->instance->index));
+ if (which & MAC_DIRECTION_RX)
+ val &= ~F_MAC_RX_ENABLE;
+ if (which & MAC_DIRECTION_TX)
+ val &= ~F_MAC_TX_ENABLE;
+ writel(val,
+ mac->adapter->regs + MAC_REG_CSR(mac->instance->index));
+ return 0;
+}
+
+#if 0
+static int mac_set_ifs(struct cmac *mac, u32 mode)
+{
+ t1_write_reg_4(mac->adapter,
+ MAC_REG_IFS(mac->instance->index),
+ mode);
+ return 0;
+}
+
+static int mac_enable_isl(struct cmac *mac)
+{
+ u32 data32 = readl(mac->adapter->regs
+ + MAC_REG_CSR(mac->instance->index));
+ data32 |= F_MAC_RX_ENABLE | F_MAC_TX_ENABLE;
+ t1_write_reg_4(mac->adapter,
+ MAC_REG_CSR(mac->instance->index),
+ data32);
+ return 0;
+}
+#endif
+
+static int mac_set_mtu(struct cmac *mac, int mtu)
+{
+ if (mtu > 9600)
+ return -EINVAL;
+ writel(mtu + ETH_HLEN + VLAN_HLEN,
+ mac->adapter->regs + MAC_REG_LARGEFRAMELENGTH(mac->instance->index));
+
+ return 0;
+}
+
+static const struct cmac_statistics *mac_update_statistics(struct cmac *mac,
+ int flag)
+{
+ struct mac_statistics st;
+ u32 *p = (u32 *) & st, i;
+
+ writel(0,
+ mac->adapter->regs + MAC_REG_RMCNT(mac->instance->index));
+
+ for (i = 0; i < sizeof(st) / sizeof(u32); i++)
+ *p++ = readl(mac->adapter->regs
+ + MAC_REG_RMDATA(mac->instance->index));
+
+ /* XXX convert stats */
+ return &mac->stats;
+}
+
+static void mac_destroy(struct cmac *mac)
+{
+ kfree(mac);
+}
+
+static struct cmac_ops chelsio_mac_ops = {
+ .destroy = mac_destroy,
+ .reset = mac_reset,
+ .interrupt_enable = mac_intr_enable,
+ .interrupt_disable = mac_intr_disable,
+ .interrupt_clear = mac_intr_clear,
+ .enable = mac_enable,
+ .disable = mac_disable,
+ .set_mtu = mac_set_mtu,
+ .set_rx_mode = mac_set_rx_mode,
+ .set_speed_duplex_fc = mac_set_speed_duplex_fc,
+ .macaddress_get = mac_get_address,
+ .statistics_update = mac_update_statistics,
+};
+
+static struct cmac *mac_create(adapter_t *adapter, int index)
+{
+ struct cmac *mac;
+ u32 data32;
+
+ if (index >= 4)
+ return NULL;
+
+ mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ mac->ops = &chelsio_mac_ops;
+ mac->instance = (cmac_instance *) (mac + 1);
+
+ mac->instance->index = index;
+ mac->adapter = adapter;
+
+ data32 = readl(adapter->regs + MAC_REG_CSR(mac->instance->index));
+ data32 &= ~(F_MAC_RESET | F_MAC_PROMISC | F_MAC_PROMISC |
+ F_MAC_LB_ENABLE | F_MAC_RX_ENABLE | F_MAC_TX_ENABLE);
+ data32 |= F_MAC_JUMBO_ENABLE;
+ writel(data32, adapter->regs + MAC_REG_CSR(mac->instance->index));
+
+ /* Initialize the random backoff seed. */
+ data32 = 0x55aa + (3 * index);
+ writel(data32,
+ adapter->regs + MAC_REG_GMRANDBACKOFFSEED(mac->instance->index));
+
+ /* Check to see if the mac address needs to be set manually. */
+ data32 = readl(adapter->regs + MAC_REG_IDLO(mac->instance->index));
+ if (data32 == 0 || data32 == 0xffffffff) {
+ /*
+ * Add a default MAC address if we can't read one.
+ */
+ writel(0x43FFFFFF - index,
+ adapter->regs + MAC_REG_IDLO(mac->instance->index));
+ writel(0x0007,
+ adapter->regs + MAC_REG_IDHI(mac->instance->index));
+ }
+
+ (void) mac_set_mtu(mac, 1500);
+ return mac;
+}
+
+struct gmac t1_chelsio_mac_ops = {
+ .create = mac_create
+};
diff --git a/drivers/net/chelsio/mv88e1xxx.c b/drivers/net/chelsio/mv88e1xxx.c
new file mode 100644
index 000000000000..28ac93ff7c4f
--- /dev/null
+++ b/drivers/net/chelsio/mv88e1xxx.c
@@ -0,0 +1,397 @@
+/* $Date: 2005/10/24 23:18:13 $ $RCSfile: mv88e1xxx.c,v $ $Revision: 1.49 $ */
+#include "common.h"
+#include "mv88e1xxx.h"
+#include "cphy.h"
+#include "elmer0.h"
+
+/* MV88E1XXX MDI crossover register values */
+#define CROSSOVER_MDI 0
+#define CROSSOVER_MDIX 1
+#define CROSSOVER_AUTO 3
+
+#define INTR_ENABLE_MASK 0x6CA0
+
+/*
+ * Set the bits given by 'bitval' in PHY register 'reg'.
+ */
+static void mdio_set_bit(struct cphy *cphy, int reg, u32 bitval)
+{
+ u32 val;
+
+ (void) simple_mdio_read(cphy, reg, &val);
+ (void) simple_mdio_write(cphy, reg, val | bitval);
+}
+
+/*
+ * Clear the bits given by 'bitval' in PHY register 'reg'.
+ */
+static void mdio_clear_bit(struct cphy *cphy, int reg, u32 bitval)
+{
+ u32 val;
+
+ (void) simple_mdio_read(cphy, reg, &val);
+ (void) simple_mdio_write(cphy, reg, val & ~bitval);
+}
+
+/*
+ * NAME: phy_reset
+ *
+ * DESC: Reset the given PHY's port. NOTE: This is not a global
+ * chip reset.
+ *
+ * PARAMS: cphy - Pointer to PHY instance data.
+ *
+ * RETURN: 0 - Successfull reset.
+ * -1 - Timeout.
+ */
+static int mv88e1xxx_reset(struct cphy *cphy, int wait)
+{
+ u32 ctl;
+ int time_out = 1000;
+
+ mdio_set_bit(cphy, MII_BMCR, BMCR_RESET);
+
+ do {
+ (void) simple_mdio_read(cphy, MII_BMCR, &ctl);
+ ctl &= BMCR_RESET;
+ if (ctl)
+ udelay(1);
+ } while (ctl && --time_out);
+
+ return ctl ? -1 : 0;
+}
+
+static int mv88e1xxx_interrupt_enable(struct cphy *cphy)
+{
+ /* Enable PHY interrupts. */
+ (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER,
+ INTR_ENABLE_MASK);
+
+ /* Enable Marvell interrupts through Elmer0. */
+ if (t1_is_asic(cphy->adapter)) {
+ u32 elmer;
+
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+ elmer |= ELMER0_GP_BIT1;
+ if (is_T2(cphy->adapter)) {
+ elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
+ }
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+ }
+ return 0;
+}
+
+static int mv88e1xxx_interrupt_disable(struct cphy *cphy)
+{
+ /* Disable all phy interrupts. */
+ (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, 0);
+
+ /* Disable Marvell interrupts through Elmer0. */
+ if (t1_is_asic(cphy->adapter)) {
+ u32 elmer;
+
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+ elmer &= ~ELMER0_GP_BIT1;
+ if (is_T2(cphy->adapter)) {
+ elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
+ }
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+ }
+ return 0;
+}
+
+static int mv88e1xxx_interrupt_clear(struct cphy *cphy)
+{
+ u32 elmer;
+
+ /* Clear PHY interrupts by reading the register. */
+ (void) simple_mdio_read(cphy,
+ MV88E1XXX_INTERRUPT_STATUS_REGISTER, &elmer);
+
+ /* Clear Marvell interrupts through Elmer0. */
+ if (t1_is_asic(cphy->adapter)) {
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+ elmer |= ELMER0_GP_BIT1;
+ if (is_T2(cphy->adapter)) {
+ elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
+ }
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+ }
+ return 0;
+}
+
+/*
+ * Set the PHY speed and duplex. This also disables auto-negotiation, except
+ * for 1Gb/s, where auto-negotiation is mandatory.
+ */
+static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ u32 ctl;
+
+ (void) simple_mdio_read(phy, MII_BMCR, &ctl);
+ if (speed >= 0) {
+ ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+ if (speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+ else if (speed == SPEED_1000)
+ ctl |= BMCR_SPEED1000;
+ }
+ if (duplex >= 0) {
+ ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
+ if (duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ }
+ if (ctl & BMCR_SPEED1000) /* auto-negotiation required for 1Gb/s */
+ ctl |= BMCR_ANENABLE;
+ (void) simple_mdio_write(phy, MII_BMCR, ctl);
+ return 0;
+}
+
+static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover)
+{
+ u32 data32;
+
+ (void) simple_mdio_read(cphy,
+ MV88E1XXX_SPECIFIC_CNTRL_REGISTER, &data32);
+ data32 &= ~V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE);
+ data32 |= V_PSCR_MDI_XOVER_MODE(crossover);
+ (void) simple_mdio_write(cphy,
+ MV88E1XXX_SPECIFIC_CNTRL_REGISTER, data32);
+ return 0;
+}
+
+static int mv88e1xxx_autoneg_enable(struct cphy *cphy)
+{
+ u32 ctl;
+
+ (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO);
+
+ (void) simple_mdio_read(cphy, MII_BMCR, &ctl);
+ /* restart autoneg for change to take effect */
+ ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
+ (void) simple_mdio_write(cphy, MII_BMCR, ctl);
+ return 0;
+}
+
+static int mv88e1xxx_autoneg_disable(struct cphy *cphy)
+{
+ u32 ctl;
+
+ /*
+ * Crossover *must* be set to manual in order to disable auto-neg.
+ * The Alaska FAQs document highlights this point.
+ */
+ (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_MDI);
+
+ /*
+ * Must include autoneg reset when disabling auto-neg. This
+ * is described in the Alaska FAQ document.
+ */
+ (void) simple_mdio_read(cphy, MII_BMCR, &ctl);
+ ctl &= ~BMCR_ANENABLE;
+ (void) simple_mdio_write(cphy, MII_BMCR, ctl | BMCR_ANRESTART);
+ return 0;
+}
+
+static int mv88e1xxx_autoneg_restart(struct cphy *cphy)
+{
+ mdio_set_bit(cphy, MII_BMCR, BMCR_ANRESTART);
+ return 0;
+}
+
+static int mv88e1xxx_advertise(struct cphy *phy, unsigned int advertise_map)
+{
+ u32 val = 0;
+
+ if (advertise_map &
+ (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
+ (void) simple_mdio_read(phy, MII_GBCR, &val);
+ val &= ~(GBCR_ADV_1000HALF | GBCR_ADV_1000FULL);
+ if (advertise_map & ADVERTISED_1000baseT_Half)
+ val |= GBCR_ADV_1000HALF;
+ if (advertise_map & ADVERTISED_1000baseT_Full)
+ val |= GBCR_ADV_1000FULL;
+ }
+ (void) simple_mdio_write(phy, MII_GBCR, val);
+
+ val = 1;
+ if (advertise_map & ADVERTISED_10baseT_Half)
+ val |= ADVERTISE_10HALF;
+ if (advertise_map & ADVERTISED_10baseT_Full)
+ val |= ADVERTISE_10FULL;
+ if (advertise_map & ADVERTISED_100baseT_Half)
+ val |= ADVERTISE_100HALF;
+ if (advertise_map & ADVERTISED_100baseT_Full)
+ val |= ADVERTISE_100FULL;
+ if (advertise_map & ADVERTISED_PAUSE)
+ val |= ADVERTISE_PAUSE;
+ if (advertise_map & ADVERTISED_ASYM_PAUSE)
+ val |= ADVERTISE_PAUSE_ASYM;
+ (void) simple_mdio_write(phy, MII_ADVERTISE, val);
+ return 0;
+}
+
+static int mv88e1xxx_set_loopback(struct cphy *cphy, int on)
+{
+ if (on)
+ mdio_set_bit(cphy, MII_BMCR, BMCR_LOOPBACK);
+ else
+ mdio_clear_bit(cphy, MII_BMCR, BMCR_LOOPBACK);
+ return 0;
+}
+
+static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ u32 status;
+ int sp = -1, dplx = -1, pause = 0;
+
+ (void) simple_mdio_read(cphy,
+ MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
+ if ((status & V_PSSR_STATUS_RESOLVED) != 0) {
+ if (status & V_PSSR_RX_PAUSE)
+ pause |= PAUSE_RX;
+ if (status & V_PSSR_TX_PAUSE)
+ pause |= PAUSE_TX;
+ dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+ sp = G_PSSR_SPEED(status);
+ if (sp == 0)
+ sp = SPEED_10;
+ else if (sp == 1)
+ sp = SPEED_100;
+ else
+ sp = SPEED_1000;
+ }
+ if (link_ok)
+ *link_ok = (status & V_PSSR_LINK) != 0;
+ if (speed)
+ *speed = sp;
+ if (duplex)
+ *duplex = dplx;
+ if (fc)
+ *fc = pause;
+ return 0;
+}
+
+static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable)
+{
+ u32 val;
+
+ (void) simple_mdio_read(cphy,
+ MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, &val);
+
+ /*
+ * Set the downshift counter to 2 so we try to establish Gb link
+ * twice before downshifting.
+ */
+ val &= ~(V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT));
+
+ if (downshift_enable)
+ val |= V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2);
+ (void) simple_mdio_write(cphy,
+ MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, val);
+ return 0;
+}
+
+static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
+{
+ int cphy_cause = 0;
+ u32 status;
+
+ /*
+ * Loop until cause reads zero. Need to handle bouncing interrupts.
+ */
+ while (1) {
+ u32 cause;
+
+ (void) simple_mdio_read(cphy,
+ MV88E1XXX_INTERRUPT_STATUS_REGISTER,
+ &cause);
+ cause &= INTR_ENABLE_MASK;
+ if (!cause) break;
+
+ if (cause & MV88E1XXX_INTR_LINK_CHNG) {
+ (void) simple_mdio_read(cphy,
+ MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
+
+ if (status & MV88E1XXX_INTR_LINK_CHNG) {
+ cphy->state |= PHY_LINK_UP;
+ } else {
+ cphy->state &= ~PHY_LINK_UP;
+ if (cphy->state & PHY_AUTONEG_EN)
+ cphy->state &= ~PHY_AUTONEG_RDY;
+ cphy_cause |= cphy_cause_link_change;
+ }
+ }
+
+ if (cause & MV88E1XXX_INTR_AUTONEG_DONE)
+ cphy->state |= PHY_AUTONEG_RDY;
+
+ if ((cphy->state & (PHY_LINK_UP | PHY_AUTONEG_RDY)) ==
+ (PHY_LINK_UP | PHY_AUTONEG_RDY))
+ cphy_cause |= cphy_cause_link_change;
+ }
+ return cphy_cause;
+}
+
+static void mv88e1xxx_destroy(struct cphy *cphy)
+{
+ kfree(cphy);
+}
+
+static struct cphy_ops mv88e1xxx_ops = {
+ .destroy = mv88e1xxx_destroy,
+ .reset = mv88e1xxx_reset,
+ .interrupt_enable = mv88e1xxx_interrupt_enable,
+ .interrupt_disable = mv88e1xxx_interrupt_disable,
+ .interrupt_clear = mv88e1xxx_interrupt_clear,
+ .interrupt_handler = mv88e1xxx_interrupt_handler,
+ .autoneg_enable = mv88e1xxx_autoneg_enable,
+ .autoneg_disable = mv88e1xxx_autoneg_disable,
+ .autoneg_restart = mv88e1xxx_autoneg_restart,
+ .advertise = mv88e1xxx_advertise,
+ .set_loopback = mv88e1xxx_set_loopback,
+ .set_speed_duplex = mv88e1xxx_set_speed_duplex,
+ .get_link_status = mv88e1xxx_get_link_status,
+};
+
+static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr,
+ struct mdio_ops *mdio_ops)
+{
+ struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
+
+ if (!cphy) return NULL;
+
+ cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops);
+
+ /* Configure particular PHY's to run in a different mode. */
+ if ((board_info(adapter)->caps & SUPPORTED_TP) &&
+ board_info(adapter)->chip_phy == CHBT_PHY_88E1111) {
+ /*
+ * Configure the PHY transmitter as class A to reduce EMI.
+ */
+ (void) simple_mdio_write(cphy,
+ MV88E1XXX_EXTENDED_ADDR_REGISTER, 0xB);
+ (void) simple_mdio_write(cphy,
+ MV88E1XXX_EXTENDED_REGISTER, 0x8004);
+ }
+ (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */
+
+ /* LED */
+ if (is_T2(adapter)) {
+ (void) simple_mdio_write(cphy,
+ MV88E1XXX_LED_CONTROL_REGISTER, 0x1);
+ }
+
+ return cphy;
+}
+
+static int mv88e1xxx_phy_reset(adapter_t* adapter)
+{
+ return 0;
+}
+
+struct gphy t1_mv88e1xxx_ops = {
+ mv88e1xxx_phy_create,
+ mv88e1xxx_phy_reset
+};
diff --git a/drivers/net/chelsio/mv88e1xxx.h b/drivers/net/chelsio/mv88e1xxx.h
new file mode 100644
index 000000000000..967cc4286359
--- /dev/null
+++ b/drivers/net/chelsio/mv88e1xxx.h
@@ -0,0 +1,127 @@
+/* $Date: 2005/03/07 23:59:05 $ $RCSfile: mv88e1xxx.h,v $ $Revision: 1.13 $ */
+#ifndef CHELSIO_MV8E1XXX_H
+#define CHELSIO_MV8E1XXX_H
+
+#ifndef BMCR_SPEED1000
+# define BMCR_SPEED1000 0x40
+#endif
+
+#ifndef ADVERTISE_PAUSE
+# define ADVERTISE_PAUSE 0x400
+#endif
+#ifndef ADVERTISE_PAUSE_ASYM
+# define ADVERTISE_PAUSE_ASYM 0x800
+#endif
+
+/* Gigabit MII registers */
+#define MII_GBCR 9 /* 1000Base-T control register */
+#define MII_GBSR 10 /* 1000Base-T status register */
+
+/* 1000Base-T control register fields */
+#define GBCR_ADV_1000HALF 0x100
+#define GBCR_ADV_1000FULL 0x200
+#define GBCR_PREFER_MASTER 0x400
+#define GBCR_MANUAL_AS_MASTER 0x800
+#define GBCR_MANUAL_CONFIG_ENABLE 0x1000
+
+/* 1000Base-T status register fields */
+#define GBSR_LP_1000HALF 0x400
+#define GBSR_LP_1000FULL 0x800
+#define GBSR_REMOTE_OK 0x1000
+#define GBSR_LOCAL_OK 0x2000
+#define GBSR_LOCAL_MASTER 0x4000
+#define GBSR_MASTER_FAULT 0x8000
+
+/* Marvell PHY interrupt status bits. */
+#define MV88E1XXX_INTR_JABBER 0x0001
+#define MV88E1XXX_INTR_POLARITY_CHNG 0x0002
+#define MV88E1XXX_INTR_ENG_DETECT_CHNG 0x0010
+#define MV88E1XXX_INTR_DOWNSHIFT 0x0020
+#define MV88E1XXX_INTR_MDI_XOVER_CHNG 0x0040
+#define MV88E1XXX_INTR_FIFO_OVER_UNDER 0x0080
+#define MV88E1XXX_INTR_FALSE_CARRIER 0x0100
+#define MV88E1XXX_INTR_SYMBOL_ERROR 0x0200
+#define MV88E1XXX_INTR_LINK_CHNG 0x0400
+#define MV88E1XXX_INTR_AUTONEG_DONE 0x0800
+#define MV88E1XXX_INTR_PAGE_RECV 0x1000
+#define MV88E1XXX_INTR_DUPLEX_CHNG 0x2000
+#define MV88E1XXX_INTR_SPEED_CHNG 0x4000
+#define MV88E1XXX_INTR_AUTONEG_ERR 0x8000
+
+/* Marvell PHY specific registers. */
+#define MV88E1XXX_SPECIFIC_CNTRL_REGISTER 16
+#define MV88E1XXX_SPECIFIC_STATUS_REGISTER 17
+#define MV88E1XXX_INTERRUPT_ENABLE_REGISTER 18
+#define MV88E1XXX_INTERRUPT_STATUS_REGISTER 19
+#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER 20
+#define MV88E1XXX_RECV_ERR_CNTR_REGISTER 21
+#define MV88E1XXX_RES_REGISTER 22
+#define MV88E1XXX_GLOBAL_STATUS_REGISTER 23
+#define MV88E1XXX_LED_CONTROL_REGISTER 24
+#define MV88E1XXX_MANUAL_LED_OVERRIDE_REGISTER 25
+#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER 26
+#define MV88E1XXX_EXT_PHY_SPECIFIC_STATUS_REGISTER 27
+#define MV88E1XXX_VIRTUAL_CABLE_TESTER_REGISTER 28
+#define MV88E1XXX_EXTENDED_ADDR_REGISTER 29
+#define MV88E1XXX_EXTENDED_REGISTER 30
+
+/* PHY specific control register fields */
+#define S_PSCR_MDI_XOVER_MODE 5
+#define M_PSCR_MDI_XOVER_MODE 0x3
+#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE)
+#define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE)
+
+/* Extended PHY specific control register fields */
+#define S_DOWNSHIFT_ENABLE 8
+#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE)
+
+#define S_DOWNSHIFT_CNT 9
+#define M_DOWNSHIFT_CNT 0x7
+#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT)
+#define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT)
+
+/* PHY specific status register fields */
+#define S_PSSR_JABBER 0
+#define V_PSSR_JABBER (1 << S_PSSR_JABBER)
+
+#define S_PSSR_POLARITY 1
+#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY)
+
+#define S_PSSR_RX_PAUSE 2
+#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE)
+
+#define S_PSSR_TX_PAUSE 3
+#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE)
+
+#define S_PSSR_ENERGY_DETECT 4
+#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT)
+
+#define S_PSSR_DOWNSHIFT_STATUS 5
+#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS)
+
+#define S_PSSR_MDI 6
+#define V_PSSR_MDI (1 << S_PSSR_MDI)
+
+#define S_PSSR_CABLE_LEN 7
+#define M_PSSR_CABLE_LEN 0x7
+#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN)
+#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN)
+
+#define S_PSSR_LINK 10
+#define V_PSSR_LINK (1 << S_PSSR_LINK)
+
+#define S_PSSR_STATUS_RESOLVED 11
+#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED)
+
+#define S_PSSR_PAGE_RECEIVED 12
+#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED)
+
+#define S_PSSR_DUPLEX 13
+#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX)
+
+#define S_PSSR_SPEED 14
+#define M_PSSR_SPEED 0x3
+#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED)
+#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED)
+
+#endif
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c
index db5034282782..c8e89480d906 100644
--- a/drivers/net/chelsio/mv88x201x.c
+++ b/drivers/net/chelsio/mv88x201x.c
@@ -85,29 +85,33 @@ static int mv88x201x_reset(struct cphy *cphy, int wait)
static int mv88x201x_interrupt_enable(struct cphy *cphy)
{
- u32 elmer;
-
/* Enable PHY LASI interrupts. */
mdio_write(cphy, 0x1, 0x9002, 0x1);
/* Enable Marvell interrupts through Elmer0. */
- t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
- elmer |= ELMER0_GP_BIT6;
- t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+ if (t1_is_asic(cphy->adapter)) {
+ u32 elmer;
+
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+ elmer |= ELMER0_GP_BIT6;
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+ }
return 0;
}
static int mv88x201x_interrupt_disable(struct cphy *cphy)
{
- u32 elmer;
-
/* Disable PHY LASI interrupts. */
mdio_write(cphy, 0x1, 0x9002, 0x0);
/* Disable Marvell interrupts through Elmer0. */
- t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
- elmer &= ~ELMER0_GP_BIT6;
- t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+ if (t1_is_asic(cphy->adapter)) {
+ u32 elmer;
+
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+ elmer &= ~ELMER0_GP_BIT6;
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+ }
return 0;
}
@@ -140,9 +144,11 @@ static int mv88x201x_interrupt_clear(struct cphy *cphy)
#endif
/* Clear Marvell interrupts through Elmer0. */
- t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
- elmer |= ELMER0_GP_BIT6;
- t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+ if (t1_is_asic(cphy->adapter)) {
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+ elmer |= ELMER0_GP_BIT6;
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+ }
return 0;
}
@@ -205,11 +211,11 @@ static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr,
struct mdio_ops *mdio_ops)
{
u32 val;
- struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL);
+ struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
if (!cphy)
return NULL;
- memset(cphy, 0, sizeof(*cphy));
+
cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops);
/* Commands the PHY to enable XFP's clock. */
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
new file mode 100644
index 000000000000..0b90014d5b3e
--- /dev/null
+++ b/drivers/net/chelsio/my3126.c
@@ -0,0 +1,204 @@
+/* $Date: 2005/11/12 02:13:49 $ $RCSfile: my3126.c,v $ $Revision: 1.15 $ */
+#include "cphy.h"
+#include "elmer0.h"
+#include "suni1x10gexp_regs.h"
+
+/* Port Reset */
+static int my3126_reset(struct cphy *cphy, int wait)
+{
+ /*
+ * This can be done through registers. It is not required since
+ * a full chip reset is used.
+ */
+ return (0);
+}
+
+static int my3126_interrupt_enable(struct cphy *cphy)
+{
+ schedule_delayed_work(&cphy->phy_update, HZ/30);
+ t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo);
+ return (0);
+}
+
+static int my3126_interrupt_disable(struct cphy *cphy)
+{
+ cancel_rearming_delayed_work(&cphy->phy_update);
+ return (0);
+}
+
+static int my3126_interrupt_clear(struct cphy *cphy)
+{
+ return (0);
+}
+
+#define OFFSET(REG_ADDR) (REG_ADDR << 2)
+
+static int my3126_interrupt_handler(struct cphy *cphy)
+{
+ u32 val;
+ u16 val16;
+ u16 status;
+ u32 act_count;
+ adapter_t *adapter;
+ adapter = cphy->adapter;
+
+ if (cphy->count == 50) {
+ mdio_read(cphy, 0x1, 0x1, &val);
+ val16 = (u16) val;
+ status = cphy->bmsr ^ val16;
+
+ if (status & BMSR_LSTATUS)
+ t1_link_changed(adapter, 0);
+ cphy->bmsr = val16;
+
+ /* We have only enabled link change interrupts so it
+ must be that
+ */
+ cphy->count = 0;
+ }
+
+ t1_tpi_write(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_CONTROL),
+ SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
+ t1_tpi_read(adapter,
+ OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW), &act_count);
+ t1_tpi_read(adapter,
+ OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW), &val);
+ act_count += val;
+
+ /* Populate elmer_gpo with the register value */
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ cphy->elmer_gpo = val;
+
+ if ( (val & (1 << 8)) || (val & (1 << 19)) ||
+ (cphy->act_count == act_count) || cphy->act_on ) {
+ if (is_T2(adapter))
+ val |= (1 << 9);
+ else if (t1_is_T1B(adapter))
+ val |= (1 << 20);
+ cphy->act_on = 0;
+ } else {
+ if (is_T2(adapter))
+ val &= ~(1 << 9);
+ else if (t1_is_T1B(adapter))
+ val &= ~(1 << 20);
+ cphy->act_on = 1;
+ }
+
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ cphy->elmer_gpo = val;
+ cphy->act_count = act_count;
+ cphy->count++;
+
+ return cphy_cause_link_change;
+}
+
+static void my3216_poll(void *arg)
+{
+ my3126_interrupt_handler(arg);
+}
+
+static int my3126_set_loopback(struct cphy *cphy, int on)
+{
+ return (0);
+}
+
+/* To check the activity LED */
+static int my3126_get_link_status(struct cphy *cphy,
+ int *link_ok, int *speed, int *duplex, int *fc)
+{
+ u32 val;
+ u16 val16;
+ adapter_t *adapter;
+
+ adapter = cphy->adapter;
+ mdio_read(cphy, 0x1, 0x1, &val);
+ val16 = (u16) val;
+
+ /* Populate elmer_gpo with the register value */
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ cphy->elmer_gpo = val;
+
+ *link_ok = (val16 & BMSR_LSTATUS);
+
+ if (*link_ok) {
+ /* Turn on the LED. */
+ if (is_T2(adapter))
+ val &= ~(1 << 8);
+ else if (t1_is_T1B(adapter))
+ val &= ~(1 << 19);
+ } else {
+ /* Turn off the LED. */
+ if (is_T2(adapter))
+ val |= (1 << 8);
+ else if (t1_is_T1B(adapter))
+ val |= (1 << 19);
+ }
+
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ cphy->elmer_gpo = val;
+ *speed = SPEED_10000;
+ *duplex = DUPLEX_FULL;
+
+ /* need to add flow control */
+ if (fc)
+ *fc = PAUSE_RX | PAUSE_TX;
+
+ return (0);
+}
+
+static void my3126_destroy(struct cphy *cphy)
+{
+ kfree(cphy);
+}
+
+static struct cphy_ops my3126_ops = {
+ .destroy = my3126_destroy,
+ .reset = my3126_reset,
+ .interrupt_enable = my3126_interrupt_enable,
+ .interrupt_disable = my3126_interrupt_disable,
+ .interrupt_clear = my3126_interrupt_clear,
+ .interrupt_handler = my3126_interrupt_handler,
+ .get_link_status = my3126_get_link_status,
+ .set_loopback = my3126_set_loopback,
+};
+
+static struct cphy *my3126_phy_create(adapter_t *adapter,
+ int phy_addr, struct mdio_ops *mdio_ops)
+{
+ struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL);
+
+ if (cphy)
+ cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
+
+ INIT_WORK(&cphy->phy_update, my3216_poll, cphy);
+ cphy->bmsr = 0;
+
+ return (cphy);
+}
+
+/* Chip Reset */
+static int my3126_phy_reset(adapter_t * adapter)
+{
+ u32 val;
+
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~4;
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ msleep(100);
+
+ t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
+ msleep(1000);
+
+ /* Now lets enable the Laser. Delay 100us */
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= 0x8000;
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(100);
+ return (0);
+}
+
+struct gphy t1_my3126_ops = {
+ my3126_phy_create,
+ my3126_phy_reset
+};
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 04a1404fc65e..63cabeb98afe 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -43,21 +43,7 @@
#include "elmer0.h"
#include "suni1x10gexp_regs.h"
-/* 802.3ae 10Gb/s MDIO Manageable Device(MMD)
- */
-enum {
- MMD_RESERVED,
- MMD_PMAPMD,
- MMD_WIS,
- MMD_PCS,
- MMD_PHY_XGXS, /* XGMII Extender Sublayer */
- MMD_DTE_XGXS,
-};
-
-enum {
- PHY_XGXS_CTRL_1,
- PHY_XGXS_STATUS_1
-};
+#include <linux/crc32.h>
#define OFFSET(REG_ADDR) (REG_ADDR << 2)
@@ -88,6 +74,8 @@ enum { /* RMON registers */
RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
+ RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW,
+ RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW,
TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
@@ -95,7 +83,9 @@ enum { /* RMON registers */
TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
- TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW
+ TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW,
+ TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW,
+ TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW
};
struct _cmac_instance {
@@ -124,12 +114,12 @@ static int pm3393_reset(struct cmac *cmac)
/*
* Enable interrupts for the PM3393
-
- 1. Enable PM3393 BLOCK interrupts.
- 2. Enable PM3393 Master Interrupt bit(INTE)
- 3. Enable ELMER's PM3393 bit.
- 4. Enable Terminator external interrupt.
-*/
+ *
+ * 1. Enable PM3393 BLOCK interrupts.
+ * 2. Enable PM3393 Master Interrupt bit(INTE)
+ * 3. Enable ELMER's PM3393 bit.
+ * 4. Enable Terminator external interrupt.
+ */
static int pm3393_interrupt_enable(struct cmac *cmac)
{
u32 pl_intr;
@@ -257,14 +247,12 @@ static int pm3393_interrupt_clear(struct cmac *cmac)
static int pm3393_interrupt_handler(struct cmac *cmac)
{
u32 master_intr_status;
-/*
- 1. Read master interrupt register.
- 2. Read BLOCK's interrupt status registers.
- 3. Handle BLOCK interrupts.
-*/
+
/* Read the master interrupt status register. */
pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
&master_intr_status);
+ CH_DBG(cmac->adapter, INTR, "PM3393 intr cause 0x%x\n",
+ master_intr_status);
/* TBD XXX Lets just clear everything for now */
pm3393_interrupt_clear(cmac);
@@ -307,11 +295,7 @@ static int pm3393_enable_port(struct cmac *cmac, int which)
* The PHY doesn't give us link status indication on its own so have
* the link management code query it instead.
*/
- {
- extern void link_changed(adapter_t *adapter, int port_id);
-
- link_changed(cmac->adapter, 0);
- }
+ t1_link_changed(cmac->adapter, 0);
return 0;
}
@@ -363,33 +347,6 @@ static int pm3393_set_mtu(struct cmac *cmac, int mtu)
return 0;
}
-static u32 calc_crc(u8 *b, int len)
-{
- int i;
- u32 crc = (u32)~0;
-
- /* calculate crc one bit at a time */
- while (len--) {
- crc ^= *b++;
- for (i = 0; i < 8; i++) {
- if (crc & 0x1)
- crc = (crc >> 1) ^ 0xedb88320;
- else
- crc = (crc >> 1);
- }
- }
-
- /* reverse bits */
- crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0);
- crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc);
- crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa);
- /* swap bytes */
- crc = (crc >> 16) | (crc << 16);
- crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00);
-
- return crc;
-}
-
static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
{
int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
@@ -423,7 +380,7 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
u16 mc_filter[4] = { 0, };
while ((addr = t1_get_next_mcaddr(rm))) {
- bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f; /* bit[23:28] */
+ bit = (ether_crc(ETH_ALEN, addr) >> 23) & 0x3f; /* bit[23:28] */
mc_filter[bit >> 4] |= 1 << (bit & 0xf);
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
@@ -471,20 +428,29 @@ static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
return 0;
}
+static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val,
+ int over)
+{
+ u32 val0, val1, val2;
+
+ t1_tpi_read(adapter, offs, &val0);
+ t1_tpi_read(adapter, offs + 4, &val1);
+ t1_tpi_read(adapter, offs + 8, &val2);
+
+ *val &= ~0ull << 40;
+ *val |= val0 & 0xffff;
+ *val |= (val1 & 0xffff) << 16;
+ *val |= (u64)(val2 & 0xff) << 32;
+
+ if (over)
+ *val += 1ull << 40;
+}
+
#define RMON_UPDATE(mac, name, stat_name) \
- { \
- t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
- t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \
- t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \
- (mac)->stats.stat_name = ((u64)val0 & 0xffff) | \
- (((u64)val1 & 0xffff) << 16) | \
- (((u64)val2 & 0xff) << 32) | \
- ((mac)->stats.stat_name & \
- (~(u64)0 << 40)); \
- if (ro & \
- ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \
- (mac)->stats.stat_name += ((u64)1 << 40); \
- }
+ pm3393_rmon_update((mac)->adapter, OFFSET(name), \
+ &(mac)->stats.stat_name, \
+ (ro &((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)))
+
static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
int flag)
@@ -519,6 +485,8 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
RMON_UPDATE(mac, RxFragments, RxRuntErrors);
RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
+ RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
+ RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
/* Tx stats */
RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
@@ -529,6 +497,8 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
+ RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
+ RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
return &mac->stats;
}
@@ -631,10 +601,9 @@ static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
{
struct cmac *cmac;
- cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
+ cmac = kzalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
if (!cmac)
return NULL;
- memset(cmac, 0, sizeof(*cmac));
cmac->ops = &pm3393_ops;
cmac->instance = (cmac_instance *) (cmac + 1);
@@ -815,6 +784,12 @@ static int pm3393_mac_reset(adapter_t * adapter)
successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
&& is_xaui_mabc_pll_locked);
+
+ CH_DBG(adapter, HW,
+ "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
+ "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
+ i, is_pl4_reset_finished, val, is_pl4_outof_lock,
+ is_xaui_mabc_pll_locked);
}
return successful_reset ? 0 : 1;
}
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h
index b90e11f40d1f..c80bf4d6d0a6 100644
--- a/drivers/net/chelsio/regs.h
+++ b/drivers/net/chelsio/regs.h
@@ -71,6 +71,10 @@
#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
+#define S_DISABLE_CMDQ0_GTS 8
+#define V_DISABLE_CMDQ0_GTS(x) ((x) << S_DISABLE_CMDQ0_GTS)
+#define F_DISABLE_CMDQ0_GTS V_DISABLE_CMDQ0_GTS(1U)
+
#define S_DISABLE_CMDQ1_GTS 9
#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U)
@@ -87,12 +91,18 @@
#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U)
+#define S_FL_SELECTION_CRITERIA 13
+#define V_FL_SELECTION_CRITERIA(x) ((x) << S_FL_SELECTION_CRITERIA)
+#define F_FL_SELECTION_CRITERIA V_FL_SELECTION_CRITERIA(1U)
+
#define S_ISCSI_COALESCE 14
#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
#define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U)
#define S_RX_PKT_OFFSET 15
+#define M_RX_PKT_OFFSET 0x7
#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
+#define G_RX_PKT_OFFSET(x) (((x) >> S_RX_PKT_OFFSET) & M_RX_PKT_OFFSET)
#define S_VLAN_XTRACT 18
#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
@@ -108,16 +118,114 @@
#define A_SG_FL1BASELWR 0x20
#define A_SG_FL1BASEUPR 0x24
#define A_SG_CMD0SIZE 0x28
+
+#define S_CMDQ0_SIZE 0
+#define M_CMDQ0_SIZE 0x1ffff
+#define V_CMDQ0_SIZE(x) ((x) << S_CMDQ0_SIZE)
+#define G_CMDQ0_SIZE(x) (((x) >> S_CMDQ0_SIZE) & M_CMDQ0_SIZE)
+
#define A_SG_FL0SIZE 0x2c
+
+#define S_FL0_SIZE 0
+#define M_FL0_SIZE 0x1ffff
+#define V_FL0_SIZE(x) ((x) << S_FL0_SIZE)
+#define G_FL0_SIZE(x) (((x) >> S_FL0_SIZE) & M_FL0_SIZE)
+
#define A_SG_RSPSIZE 0x30
+
+#define S_RESPQ_SIZE 0
+#define M_RESPQ_SIZE 0x1ffff
+#define V_RESPQ_SIZE(x) ((x) << S_RESPQ_SIZE)
+#define G_RESPQ_SIZE(x) (((x) >> S_RESPQ_SIZE) & M_RESPQ_SIZE)
+
#define A_SG_RSPBASELWR 0x34
#define A_SG_RSPBASEUPR 0x38
#define A_SG_FLTHRESHOLD 0x3c
+
+#define S_FL_THRESHOLD 0
+#define M_FL_THRESHOLD 0xffff
+#define V_FL_THRESHOLD(x) ((x) << S_FL_THRESHOLD)
+#define G_FL_THRESHOLD(x) (((x) >> S_FL_THRESHOLD) & M_FL_THRESHOLD)
+
#define A_SG_RSPQUEUECREDIT 0x40
+
+#define S_RESPQ_CREDIT 0
+#define M_RESPQ_CREDIT 0x1ffff
+#define V_RESPQ_CREDIT(x) ((x) << S_RESPQ_CREDIT)
+#define G_RESPQ_CREDIT(x) (((x) >> S_RESPQ_CREDIT) & M_RESPQ_CREDIT)
+
#define A_SG_SLEEPING 0x48
+
+#define S_SLEEPING 0
+#define M_SLEEPING 0xffff
+#define V_SLEEPING(x) ((x) << S_SLEEPING)
+#define G_SLEEPING(x) (((x) >> S_SLEEPING) & M_SLEEPING)
+
#define A_SG_INTRTIMER 0x4c
+
+#define S_INTERRUPT_TIMER_COUNT 0
+#define M_INTERRUPT_TIMER_COUNT 0xffffff
+#define V_INTERRUPT_TIMER_COUNT(x) ((x) << S_INTERRUPT_TIMER_COUNT)
+#define G_INTERRUPT_TIMER_COUNT(x) (((x) >> S_INTERRUPT_TIMER_COUNT) & M_INTERRUPT_TIMER_COUNT)
+
+#define A_SG_CMD0PTR 0x50
+
+#define S_CMDQ0_POINTER 0
+#define M_CMDQ0_POINTER 0xffff
+#define V_CMDQ0_POINTER(x) ((x) << S_CMDQ0_POINTER)
+#define G_CMDQ0_POINTER(x) (((x) >> S_CMDQ0_POINTER) & M_CMDQ0_POINTER)
+
+#define S_CURRENT_GENERATION_BIT 16
+#define V_CURRENT_GENERATION_BIT(x) ((x) << S_CURRENT_GENERATION_BIT)
+#define F_CURRENT_GENERATION_BIT V_CURRENT_GENERATION_BIT(1U)
+
+#define A_SG_CMD1PTR 0x54
+
+#define S_CMDQ1_POINTER 0
+#define M_CMDQ1_POINTER 0xffff
+#define V_CMDQ1_POINTER(x) ((x) << S_CMDQ1_POINTER)
+#define G_CMDQ1_POINTER(x) (((x) >> S_CMDQ1_POINTER) & M_CMDQ1_POINTER)
+
+#define A_SG_FL0PTR 0x58
+
+#define S_FL0_POINTER 0
+#define M_FL0_POINTER 0xffff
+#define V_FL0_POINTER(x) ((x) << S_FL0_POINTER)
+#define G_FL0_POINTER(x) (((x) >> S_FL0_POINTER) & M_FL0_POINTER)
+
+#define A_SG_FL1PTR 0x5c
+
+#define S_FL1_POINTER 0
+#define M_FL1_POINTER 0xffff
+#define V_FL1_POINTER(x) ((x) << S_FL1_POINTER)
+#define G_FL1_POINTER(x) (((x) >> S_FL1_POINTER) & M_FL1_POINTER)
+
+#define A_SG_VERSION 0x6c
+
+#define S_DAY 0
+#define M_DAY 0x1f
+#define V_DAY(x) ((x) << S_DAY)
+#define G_DAY(x) (((x) >> S_DAY) & M_DAY)
+
+#define S_MONTH 5
+#define M_MONTH 0xf
+#define V_MONTH(x) ((x) << S_MONTH)
+#define G_MONTH(x) (((x) >> S_MONTH) & M_MONTH)
+
#define A_SG_CMD1SIZE 0xb0
+
+#define S_CMDQ1_SIZE 0
+#define M_CMDQ1_SIZE 0x1ffff
+#define V_CMDQ1_SIZE(x) ((x) << S_CMDQ1_SIZE)
+#define G_CMDQ1_SIZE(x) (((x) >> S_CMDQ1_SIZE) & M_CMDQ1_SIZE)
+
#define A_SG_FL1SIZE 0xb4
+
+#define S_FL1_SIZE 0
+#define M_FL1_SIZE 0x1ffff
+#define V_FL1_SIZE(x) ((x) << S_FL1_SIZE)
+#define G_FL1_SIZE(x) (((x) >> S_FL1_SIZE) & M_FL1_SIZE)
+
#define A_SG_INT_ENABLE 0xb8
#define S_RESPQ_EXHAUSTED 0
@@ -144,21 +252,369 @@
#define A_SG_RESPACCUTIMER 0xc0
/* MC3 registers */
+#define A_MC3_CFG 0x100
+
+#define S_CLK_ENABLE 0
+#define V_CLK_ENABLE(x) ((x) << S_CLK_ENABLE)
+#define F_CLK_ENABLE V_CLK_ENABLE(1U)
#define S_READY 1
#define V_READY(x) ((x) << S_READY)
#define F_READY V_READY(1U)
-/* MC4 registers */
+#define S_READ_TO_WRITE_DELAY 2
+#define M_READ_TO_WRITE_DELAY 0x7
+#define V_READ_TO_WRITE_DELAY(x) ((x) << S_READ_TO_WRITE_DELAY)
+#define G_READ_TO_WRITE_DELAY(x) (((x) >> S_READ_TO_WRITE_DELAY) & M_READ_TO_WRITE_DELAY)
+
+#define S_WRITE_TO_READ_DELAY 5
+#define M_WRITE_TO_READ_DELAY 0x7
+#define V_WRITE_TO_READ_DELAY(x) ((x) << S_WRITE_TO_READ_DELAY)
+#define G_WRITE_TO_READ_DELAY(x) (((x) >> S_WRITE_TO_READ_DELAY) & M_WRITE_TO_READ_DELAY)
+#define S_MC3_BANK_CYCLE 8
+#define M_MC3_BANK_CYCLE 0xf
+#define V_MC3_BANK_CYCLE(x) ((x) << S_MC3_BANK_CYCLE)
+#define G_MC3_BANK_CYCLE(x) (((x) >> S_MC3_BANK_CYCLE) & M_MC3_BANK_CYCLE)
+
+#define S_REFRESH_CYCLE 12
+#define M_REFRESH_CYCLE 0xf
+#define V_REFRESH_CYCLE(x) ((x) << S_REFRESH_CYCLE)
+#define G_REFRESH_CYCLE(x) (((x) >> S_REFRESH_CYCLE) & M_REFRESH_CYCLE)
+
+#define S_PRECHARGE_CYCLE 16
+#define M_PRECHARGE_CYCLE 0x3
+#define V_PRECHARGE_CYCLE(x) ((x) << S_PRECHARGE_CYCLE)
+#define G_PRECHARGE_CYCLE(x) (((x) >> S_PRECHARGE_CYCLE) & M_PRECHARGE_CYCLE)
+
+#define S_ACTIVE_TO_READ_WRITE_DELAY 18
+#define V_ACTIVE_TO_READ_WRITE_DELAY(x) ((x) << S_ACTIVE_TO_READ_WRITE_DELAY)
+#define F_ACTIVE_TO_READ_WRITE_DELAY V_ACTIVE_TO_READ_WRITE_DELAY(1U)
+
+#define S_ACTIVE_TO_PRECHARGE_DELAY 19
+#define M_ACTIVE_TO_PRECHARGE_DELAY 0x7
+#define V_ACTIVE_TO_PRECHARGE_DELAY(x) ((x) << S_ACTIVE_TO_PRECHARGE_DELAY)
+#define G_ACTIVE_TO_PRECHARGE_DELAY(x) (((x) >> S_ACTIVE_TO_PRECHARGE_DELAY) & M_ACTIVE_TO_PRECHARGE_DELAY)
+
+#define S_WRITE_RECOVERY_DELAY 22
+#define M_WRITE_RECOVERY_DELAY 0x3
+#define V_WRITE_RECOVERY_DELAY(x) ((x) << S_WRITE_RECOVERY_DELAY)
+#define G_WRITE_RECOVERY_DELAY(x) (((x) >> S_WRITE_RECOVERY_DELAY) & M_WRITE_RECOVERY_DELAY)
+
+#define S_DENSITY 24
+#define M_DENSITY 0x3
+#define V_DENSITY(x) ((x) << S_DENSITY)
+#define G_DENSITY(x) (((x) >> S_DENSITY) & M_DENSITY)
+
+#define S_ORGANIZATION 26
+#define V_ORGANIZATION(x) ((x) << S_ORGANIZATION)
+#define F_ORGANIZATION V_ORGANIZATION(1U)
+
+#define S_BANKS 27
+#define V_BANKS(x) ((x) << S_BANKS)
+#define F_BANKS V_BANKS(1U)
+
+#define S_UNREGISTERED 28
+#define V_UNREGISTERED(x) ((x) << S_UNREGISTERED)
+#define F_UNREGISTERED V_UNREGISTERED(1U)
+
+#define S_MC3_WIDTH 29
+#define M_MC3_WIDTH 0x3
+#define V_MC3_WIDTH(x) ((x) << S_MC3_WIDTH)
+#define G_MC3_WIDTH(x) (((x) >> S_MC3_WIDTH) & M_MC3_WIDTH)
+
+#define S_MC3_SLOW 31
+#define V_MC3_SLOW(x) ((x) << S_MC3_SLOW)
+#define F_MC3_SLOW V_MC3_SLOW(1U)
+
+#define A_MC3_MODE 0x104
+
+#define S_MC3_MODE 0
+#define M_MC3_MODE 0x3fff
+#define V_MC3_MODE(x) ((x) << S_MC3_MODE)
+#define G_MC3_MODE(x) (((x) >> S_MC3_MODE) & M_MC3_MODE)
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define A_MC3_EXT_MODE 0x108
+
+#define S_MC3_EXTENDED_MODE 0
+#define M_MC3_EXTENDED_MODE 0x3fff
+#define V_MC3_EXTENDED_MODE(x) ((x) << S_MC3_EXTENDED_MODE)
+#define G_MC3_EXTENDED_MODE(x) (((x) >> S_MC3_EXTENDED_MODE) & M_MC3_EXTENDED_MODE)
+
+#define A_MC3_PRECHARG 0x10c
+#define A_MC3_REFRESH 0x110
+
+#define S_REFRESH_ENABLE 0
+#define V_REFRESH_ENABLE(x) ((x) << S_REFRESH_ENABLE)
+#define F_REFRESH_ENABLE V_REFRESH_ENABLE(1U)
+
+#define S_REFRESH_DIVISOR 1
+#define M_REFRESH_DIVISOR 0x3fff
+#define V_REFRESH_DIVISOR(x) ((x) << S_REFRESH_DIVISOR)
+#define G_REFRESH_DIVISOR(x) (((x) >> S_REFRESH_DIVISOR) & M_REFRESH_DIVISOR)
+
+#define A_MC3_STROBE 0x114
+
+#define S_MASTER_DLL_RESET 0
+#define V_MASTER_DLL_RESET(x) ((x) << S_MASTER_DLL_RESET)
+#define F_MASTER_DLL_RESET V_MASTER_DLL_RESET(1U)
+
+#define S_MASTER_DLL_TAP_COUNT 1
+#define M_MASTER_DLL_TAP_COUNT 0xff
+#define V_MASTER_DLL_TAP_COUNT(x) ((x) << S_MASTER_DLL_TAP_COUNT)
+#define G_MASTER_DLL_TAP_COUNT(x) (((x) >> S_MASTER_DLL_TAP_COUNT) & M_MASTER_DLL_TAP_COUNT)
+
+#define S_MASTER_DLL_LOCKED 9
+#define V_MASTER_DLL_LOCKED(x) ((x) << S_MASTER_DLL_LOCKED)
+#define F_MASTER_DLL_LOCKED V_MASTER_DLL_LOCKED(1U)
+
+#define S_MASTER_DLL_MAX_TAP_COUNT 10
+#define V_MASTER_DLL_MAX_TAP_COUNT(x) ((x) << S_MASTER_DLL_MAX_TAP_COUNT)
+#define F_MASTER_DLL_MAX_TAP_COUNT V_MASTER_DLL_MAX_TAP_COUNT(1U)
+
+#define S_MASTER_DLL_TAP_COUNT_OFFSET 11
+#define M_MASTER_DLL_TAP_COUNT_OFFSET 0x3f
+#define V_MASTER_DLL_TAP_COUNT_OFFSET(x) ((x) << S_MASTER_DLL_TAP_COUNT_OFFSET)
+#define G_MASTER_DLL_TAP_COUNT_OFFSET(x) (((x) >> S_MASTER_DLL_TAP_COUNT_OFFSET) & M_MASTER_DLL_TAP_COUNT_OFFSET)
+
+#define S_SLAVE_DLL_RESET 11
+#define V_SLAVE_DLL_RESET(x) ((x) << S_SLAVE_DLL_RESET)
+#define F_SLAVE_DLL_RESET V_SLAVE_DLL_RESET(1U)
+
+#define S_SLAVE_DLL_DELTA 12
+#define M_SLAVE_DLL_DELTA 0xf
+#define V_SLAVE_DLL_DELTA(x) ((x) << S_SLAVE_DLL_DELTA)
+#define G_SLAVE_DLL_DELTA(x) (((x) >> S_SLAVE_DLL_DELTA) & M_SLAVE_DLL_DELTA)
+
+#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT 17
+#define M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT 0x3f
+#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT)
+#define G_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) & M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT)
+
+#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE 23
+#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE)
+#define F_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(1U)
+
+#define S_SLAVE_DELAY_LINE_TAP_COUNT 24
+#define M_SLAVE_DELAY_LINE_TAP_COUNT 0x3f
+#define V_SLAVE_DELAY_LINE_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_TAP_COUNT)
+#define G_SLAVE_DELAY_LINE_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_TAP_COUNT) & M_SLAVE_DELAY_LINE_TAP_COUNT)
+
+#define A_MC3_ECC_CNTL 0x118
+
+#define S_ECC_GENERATION_ENABLE 0
+#define V_ECC_GENERATION_ENABLE(x) ((x) << S_ECC_GENERATION_ENABLE)
+#define F_ECC_GENERATION_ENABLE V_ECC_GENERATION_ENABLE(1U)
+
+#define S_ECC_CHECK_ENABLE 1
+#define V_ECC_CHECK_ENABLE(x) ((x) << S_ECC_CHECK_ENABLE)
+#define F_ECC_CHECK_ENABLE V_ECC_CHECK_ENABLE(1U)
+
+#define S_CORRECTABLE_ERROR_COUNT 2
+#define M_CORRECTABLE_ERROR_COUNT 0xff
+#define V_CORRECTABLE_ERROR_COUNT(x) ((x) << S_CORRECTABLE_ERROR_COUNT)
+#define G_CORRECTABLE_ERROR_COUNT(x) (((x) >> S_CORRECTABLE_ERROR_COUNT) & M_CORRECTABLE_ERROR_COUNT)
+
+#define S_UNCORRECTABLE_ERROR_COUNT 10
+#define M_UNCORRECTABLE_ERROR_COUNT 0xff
+#define V_UNCORRECTABLE_ERROR_COUNT(x) ((x) << S_UNCORRECTABLE_ERROR_COUNT)
+#define G_UNCORRECTABLE_ERROR_COUNT(x) (((x) >> S_UNCORRECTABLE_ERROR_COUNT) & M_UNCORRECTABLE_ERROR_COUNT)
+
+#define A_MC3_CE_ADDR 0x11c
+
+#define S_MC3_CE_ADDR 4
+#define M_MC3_CE_ADDR 0xfffffff
+#define V_MC3_CE_ADDR(x) ((x) << S_MC3_CE_ADDR)
+#define G_MC3_CE_ADDR(x) (((x) >> S_MC3_CE_ADDR) & M_MC3_CE_ADDR)
+
+#define A_MC3_CE_DATA0 0x120
+#define A_MC3_CE_DATA1 0x124
+#define A_MC3_CE_DATA2 0x128
+#define A_MC3_CE_DATA3 0x12c
+#define A_MC3_CE_DATA4 0x130
+#define A_MC3_UE_ADDR 0x134
+
+#define S_MC3_UE_ADDR 4
+#define M_MC3_UE_ADDR 0xfffffff
+#define V_MC3_UE_ADDR(x) ((x) << S_MC3_UE_ADDR)
+#define G_MC3_UE_ADDR(x) (((x) >> S_MC3_UE_ADDR) & M_MC3_UE_ADDR)
+
+#define A_MC3_UE_DATA0 0x138
+#define A_MC3_UE_DATA1 0x13c
+#define A_MC3_UE_DATA2 0x140
+#define A_MC3_UE_DATA3 0x144
+#define A_MC3_UE_DATA4 0x148
+#define A_MC3_BD_ADDR 0x14c
+#define A_MC3_BD_DATA0 0x150
+#define A_MC3_BD_DATA1 0x154
+#define A_MC3_BD_DATA2 0x158
+#define A_MC3_BD_DATA3 0x15c
+#define A_MC3_BD_DATA4 0x160
+#define A_MC3_BD_OP 0x164
+
+#define S_BACK_DOOR_OPERATION 0
+#define V_BACK_DOOR_OPERATION(x) ((x) << S_BACK_DOOR_OPERATION)
+#define F_BACK_DOOR_OPERATION V_BACK_DOOR_OPERATION(1U)
+
+#define A_MC3_BIST_ADDR_BEG 0x168
+#define A_MC3_BIST_ADDR_END 0x16c
+#define A_MC3_BIST_DATA 0x170
+#define A_MC3_BIST_OP 0x174
+
+#define S_OP 0
+#define V_OP(x) ((x) << S_OP)
+#define F_OP V_OP(1U)
+
+#define S_DATA_PATTERN 1
+#define M_DATA_PATTERN 0x3
+#define V_DATA_PATTERN(x) ((x) << S_DATA_PATTERN)
+#define G_DATA_PATTERN(x) (((x) >> S_DATA_PATTERN) & M_DATA_PATTERN)
+
+#define S_CONTINUOUS 3
+#define V_CONTINUOUS(x) ((x) << S_CONTINUOUS)
+#define F_CONTINUOUS V_CONTINUOUS(1U)
+
+#define A_MC3_INT_ENABLE 0x178
+
+#define S_MC3_CORR_ERR 0
+#define V_MC3_CORR_ERR(x) ((x) << S_MC3_CORR_ERR)
+#define F_MC3_CORR_ERR V_MC3_CORR_ERR(1U)
+
+#define S_MC3_UNCORR_ERR 1
+#define V_MC3_UNCORR_ERR(x) ((x) << S_MC3_UNCORR_ERR)
+#define F_MC3_UNCORR_ERR V_MC3_UNCORR_ERR(1U)
+
+#define S_MC3_PARITY_ERR 2
+#define M_MC3_PARITY_ERR 0xff
+#define V_MC3_PARITY_ERR(x) ((x) << S_MC3_PARITY_ERR)
+#define G_MC3_PARITY_ERR(x) (((x) >> S_MC3_PARITY_ERR) & M_MC3_PARITY_ERR)
+
+#define S_MC3_ADDR_ERR 10
+#define V_MC3_ADDR_ERR(x) ((x) << S_MC3_ADDR_ERR)
+#define F_MC3_ADDR_ERR V_MC3_ADDR_ERR(1U)
+
+#define A_MC3_INT_CAUSE 0x17c
+
+/* MC4 registers */
#define A_MC4_CFG 0x180
+
+#define S_POWER_UP 0
+#define V_POWER_UP(x) ((x) << S_POWER_UP)
+#define F_POWER_UP V_POWER_UP(1U)
+
+#define S_MC4_BANK_CYCLE 8
+#define M_MC4_BANK_CYCLE 0x7
+#define V_MC4_BANK_CYCLE(x) ((x) << S_MC4_BANK_CYCLE)
+#define G_MC4_BANK_CYCLE(x) (((x) >> S_MC4_BANK_CYCLE) & M_MC4_BANK_CYCLE)
+
+#define S_MC4_NARROW 24
+#define V_MC4_NARROW(x) ((x) << S_MC4_NARROW)
+#define F_MC4_NARROW V_MC4_NARROW(1U)
+
#define S_MC4_SLOW 25
#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
#define F_MC4_SLOW V_MC4_SLOW(1U)
-/* TPI registers */
+#define S_MC4A_WIDTH 24
+#define M_MC4A_WIDTH 0x3
+#define V_MC4A_WIDTH(x) ((x) << S_MC4A_WIDTH)
+#define G_MC4A_WIDTH(x) (((x) >> S_MC4A_WIDTH) & M_MC4A_WIDTH)
+
+#define S_MC4A_SLOW 26
+#define V_MC4A_SLOW(x) ((x) << S_MC4A_SLOW)
+#define F_MC4A_SLOW V_MC4A_SLOW(1U)
+
+#define A_MC4_MODE 0x184
+
+#define S_MC4_MODE 0
+#define M_MC4_MODE 0x7fff
+#define V_MC4_MODE(x) ((x) << S_MC4_MODE)
+#define G_MC4_MODE(x) (((x) >> S_MC4_MODE) & M_MC4_MODE)
+
+#define A_MC4_EXT_MODE 0x188
+
+#define S_MC4_EXTENDED_MODE 0
+#define M_MC4_EXTENDED_MODE 0x7fff
+#define V_MC4_EXTENDED_MODE(x) ((x) << S_MC4_EXTENDED_MODE)
+#define G_MC4_EXTENDED_MODE(x) (((x) >> S_MC4_EXTENDED_MODE) & M_MC4_EXTENDED_MODE)
+
+#define A_MC4_REFRESH 0x190
+#define A_MC4_STROBE 0x194
+#define A_MC4_ECC_CNTL 0x198
+#define A_MC4_CE_ADDR 0x19c
+
+#define S_MC4_CE_ADDR 4
+#define M_MC4_CE_ADDR 0xffffff
+#define V_MC4_CE_ADDR(x) ((x) << S_MC4_CE_ADDR)
+#define G_MC4_CE_ADDR(x) (((x) >> S_MC4_CE_ADDR) & M_MC4_CE_ADDR)
+
+#define A_MC4_CE_DATA0 0x1a0
+#define A_MC4_CE_DATA1 0x1a4
+#define A_MC4_CE_DATA2 0x1a8
+#define A_MC4_CE_DATA3 0x1ac
+#define A_MC4_CE_DATA4 0x1b0
+#define A_MC4_UE_ADDR 0x1b4
+
+#define S_MC4_UE_ADDR 4
+#define M_MC4_UE_ADDR 0xffffff
+#define V_MC4_UE_ADDR(x) ((x) << S_MC4_UE_ADDR)
+#define G_MC4_UE_ADDR(x) (((x) >> S_MC4_UE_ADDR) & M_MC4_UE_ADDR)
+
+#define A_MC4_UE_DATA0 0x1b8
+#define A_MC4_UE_DATA1 0x1bc
+#define A_MC4_UE_DATA2 0x1c0
+#define A_MC4_UE_DATA3 0x1c4
+#define A_MC4_UE_DATA4 0x1c8
+#define A_MC4_BD_ADDR 0x1cc
+
+#define S_MC4_BACK_DOOR_ADDR 0
+#define M_MC4_BACK_DOOR_ADDR 0xfffffff
+#define V_MC4_BACK_DOOR_ADDR(x) ((x) << S_MC4_BACK_DOOR_ADDR)
+#define G_MC4_BACK_DOOR_ADDR(x) (((x) >> S_MC4_BACK_DOOR_ADDR) & M_MC4_BACK_DOOR_ADDR)
+
+#define A_MC4_BD_DATA0 0x1d0
+#define A_MC4_BD_DATA1 0x1d4
+#define A_MC4_BD_DATA2 0x1d8
+#define A_MC4_BD_DATA3 0x1dc
+#define A_MC4_BD_DATA4 0x1e0
+#define A_MC4_BD_OP 0x1e4
+
+#define S_OPERATION 0
+#define V_OPERATION(x) ((x) << S_OPERATION)
+#define F_OPERATION V_OPERATION(1U)
+
+#define A_MC4_BIST_ADDR_BEG 0x1e8
+#define A_MC4_BIST_ADDR_END 0x1ec
+#define A_MC4_BIST_DATA 0x1f0
+#define A_MC4_BIST_OP 0x1f4
+#define A_MC4_INT_ENABLE 0x1f8
+
+#define S_MC4_CORR_ERR 0
+#define V_MC4_CORR_ERR(x) ((x) << S_MC4_CORR_ERR)
+#define F_MC4_CORR_ERR V_MC4_CORR_ERR(1U)
+
+#define S_MC4_UNCORR_ERR 1
+#define V_MC4_UNCORR_ERR(x) ((x) << S_MC4_UNCORR_ERR)
+#define F_MC4_UNCORR_ERR V_MC4_UNCORR_ERR(1U)
+
+#define S_MC4_ADDR_ERR 2
+#define V_MC4_ADDR_ERR(x) ((x) << S_MC4_ADDR_ERR)
+#define F_MC4_ADDR_ERR V_MC4_ADDR_ERR(1U)
+
+#define A_MC4_INT_CAUSE 0x1fc
+/* TPI registers */
#define A_TPI_ADDR 0x280
+
+#define S_TPI_ADDRESS 0
+#define M_TPI_ADDRESS 0xffffff
+#define V_TPI_ADDRESS(x) ((x) << S_TPI_ADDRESS)
+#define G_TPI_ADDRESS(x) (((x) >> S_TPI_ADDRESS) & M_TPI_ADDRESS)
+
#define A_TPI_WR_DATA 0x284
#define A_TPI_RD_DATA 0x288
#define A_TPI_CSR 0x28c
@@ -171,6 +627,10 @@
#define V_TPIRDY(x) ((x) << S_TPIRDY)
#define F_TPIRDY V_TPIRDY(1U)
+#define S_INT_DIR 31
+#define V_INT_DIR(x) ((x) << S_INT_DIR)
+#define F_INT_DIR V_INT_DIR(1U)
+
#define A_TPI_PAR 0x29c
#define S_TPIPAR 0
@@ -178,14 +638,26 @@
#define V_TPIPAR(x) ((x) << S_TPIPAR)
#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
-/* TP registers */
+/* TP registers */
#define A_TP_IN_CONFIG 0x300
+#define S_TP_IN_CSPI_TUNNEL 0
+#define V_TP_IN_CSPI_TUNNEL(x) ((x) << S_TP_IN_CSPI_TUNNEL)
+#define F_TP_IN_CSPI_TUNNEL V_TP_IN_CSPI_TUNNEL(1U)
+
+#define S_TP_IN_CSPI_ETHERNET 1
+#define V_TP_IN_CSPI_ETHERNET(x) ((x) << S_TP_IN_CSPI_ETHERNET)
+#define F_TP_IN_CSPI_ETHERNET V_TP_IN_CSPI_ETHERNET(1U)
+
#define S_TP_IN_CSPI_CPL 3
#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
#define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U)
+#define S_TP_IN_CSPI_POS 4
+#define V_TP_IN_CSPI_POS(x) ((x) << S_TP_IN_CSPI_POS)
+#define F_TP_IN_CSPI_POS V_TP_IN_CSPI_POS(1U)
+
#define S_TP_IN_CSPI_CHECK_IP_CSUM 5
#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
#define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
@@ -194,10 +666,22 @@
#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
#define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
+#define S_TP_IN_ESPI_TUNNEL 7
+#define V_TP_IN_ESPI_TUNNEL(x) ((x) << S_TP_IN_ESPI_TUNNEL)
+#define F_TP_IN_ESPI_TUNNEL V_TP_IN_ESPI_TUNNEL(1U)
+
#define S_TP_IN_ESPI_ETHERNET 8
#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
#define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U)
+#define S_TP_IN_ESPI_CPL 10
+#define V_TP_IN_ESPI_CPL(x) ((x) << S_TP_IN_ESPI_CPL)
+#define F_TP_IN_ESPI_CPL V_TP_IN_ESPI_CPL(1U)
+
+#define S_TP_IN_ESPI_POS 11
+#define V_TP_IN_ESPI_POS(x) ((x) << S_TP_IN_ESPI_POS)
+#define F_TP_IN_ESPI_POS V_TP_IN_ESPI_POS(1U)
+
#define S_TP_IN_ESPI_CHECK_IP_CSUM 12
#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
#define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
@@ -212,14 +696,42 @@
#define A_TP_OUT_CONFIG 0x304
+#define S_TP_OUT_C_ETH 0
+#define V_TP_OUT_C_ETH(x) ((x) << S_TP_OUT_C_ETH)
+#define F_TP_OUT_C_ETH V_TP_OUT_C_ETH(1U)
+
#define S_TP_OUT_CSPI_CPL 2
#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
#define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U)
+#define S_TP_OUT_CSPI_POS 3
+#define V_TP_OUT_CSPI_POS(x) ((x) << S_TP_OUT_CSPI_POS)
+#define F_TP_OUT_CSPI_POS V_TP_OUT_CSPI_POS(1U)
+
+#define S_TP_OUT_CSPI_GENERATE_IP_CSUM 4
+#define V_TP_OUT_CSPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_IP_CSUM)
+#define F_TP_OUT_CSPI_GENERATE_IP_CSUM V_TP_OUT_CSPI_GENERATE_IP_CSUM(1U)
+
+#define S_TP_OUT_CSPI_GENERATE_TCP_CSUM 5
+#define V_TP_OUT_CSPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_TCP_CSUM)
+#define F_TP_OUT_CSPI_GENERATE_TCP_CSUM V_TP_OUT_CSPI_GENERATE_TCP_CSUM(1U)
+
#define S_TP_OUT_ESPI_ETHERNET 6
#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
#define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U)
+#define S_TP_OUT_ESPI_TAG_ETHERNET 7
+#define V_TP_OUT_ESPI_TAG_ETHERNET(x) ((x) << S_TP_OUT_ESPI_TAG_ETHERNET)
+#define F_TP_OUT_ESPI_TAG_ETHERNET V_TP_OUT_ESPI_TAG_ETHERNET(1U)
+
+#define S_TP_OUT_ESPI_CPL 8
+#define V_TP_OUT_ESPI_CPL(x) ((x) << S_TP_OUT_ESPI_CPL)
+#define F_TP_OUT_ESPI_CPL V_TP_OUT_ESPI_CPL(1U)
+
+#define S_TP_OUT_ESPI_POS 9
+#define V_TP_OUT_ESPI_POS(x) ((x) << S_TP_OUT_ESPI_POS)
+#define F_TP_OUT_ESPI_POS V_TP_OUT_ESPI_POS(1U)
+
#define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10
#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
#define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
@@ -233,6 +745,16 @@
#define S_IP_TTL 0
#define M_IP_TTL 0xff
#define V_IP_TTL(x) ((x) << S_IP_TTL)
+#define G_IP_TTL(x) (((x) >> S_IP_TTL) & M_IP_TTL)
+
+#define S_TCAM_SERVER_REGION_USAGE 8
+#define M_TCAM_SERVER_REGION_USAGE 0x3
+#define V_TCAM_SERVER_REGION_USAGE(x) ((x) << S_TCAM_SERVER_REGION_USAGE)
+#define G_TCAM_SERVER_REGION_USAGE(x) (((x) >> S_TCAM_SERVER_REGION_USAGE) & M_TCAM_SERVER_REGION_USAGE)
+
+#define S_QOS_MAPPING 10
+#define V_QOS_MAPPING(x) ((x) << S_QOS_MAPPING)
+#define F_QOS_MAPPING V_QOS_MAPPING(1U)
#define S_TCP_CSUM 11
#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
@@ -246,31 +768,476 @@
#define V_IP_CSUM(x) ((x) << S_IP_CSUM)
#define F_IP_CSUM V_IP_CSUM(1U)
+#define S_IP_ID_SPLIT 14
+#define V_IP_ID_SPLIT(x) ((x) << S_IP_ID_SPLIT)
+#define F_IP_ID_SPLIT V_IP_ID_SPLIT(1U)
+
#define S_PATH_MTU 15
#define V_PATH_MTU(x) ((x) << S_PATH_MTU)
#define F_PATH_MTU V_PATH_MTU(1U)
#define S_5TUPLE_LOOKUP 17
+#define M_5TUPLE_LOOKUP 0x3
#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
+#define G_5TUPLE_LOOKUP(x) (((x) >> S_5TUPLE_LOOKUP) & M_5TUPLE_LOOKUP)
+
+#define S_IP_FRAGMENT_DROP 19
+#define V_IP_FRAGMENT_DROP(x) ((x) << S_IP_FRAGMENT_DROP)
+#define F_IP_FRAGMENT_DROP V_IP_FRAGMENT_DROP(1U)
+
+#define S_PING_DROP 20
+#define V_PING_DROP(x) ((x) << S_PING_DROP)
+#define F_PING_DROP V_PING_DROP(1U)
+
+#define S_PROTECT_MODE 21
+#define V_PROTECT_MODE(x) ((x) << S_PROTECT_MODE)
+#define F_PROTECT_MODE V_PROTECT_MODE(1U)
+
+#define S_SYN_COOKIE_ALGORITHM 22
+#define V_SYN_COOKIE_ALGORITHM(x) ((x) << S_SYN_COOKIE_ALGORITHM)
+#define F_SYN_COOKIE_ALGORITHM V_SYN_COOKIE_ALGORITHM(1U)
+
+#define S_ATTACK_FILTER 23
+#define V_ATTACK_FILTER(x) ((x) << S_ATTACK_FILTER)
+#define F_ATTACK_FILTER V_ATTACK_FILTER(1U)
+
+#define S_INTERFACE_TYPE 24
+#define V_INTERFACE_TYPE(x) ((x) << S_INTERFACE_TYPE)
+#define F_INTERFACE_TYPE V_INTERFACE_TYPE(1U)
+
+#define S_DISABLE_RX_FLOW_CONTROL 25
+#define V_DISABLE_RX_FLOW_CONTROL(x) ((x) << S_DISABLE_RX_FLOW_CONTROL)
+#define F_DISABLE_RX_FLOW_CONTROL V_DISABLE_RX_FLOW_CONTROL(1U)
#define S_SYN_COOKIE_PARAMETER 26
+#define M_SYN_COOKIE_PARAMETER 0x3f
#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
+#define G_SYN_COOKIE_PARAMETER(x) (((x) >> S_SYN_COOKIE_PARAMETER) & M_SYN_COOKIE_PARAMETER)
+
+#define A_TP_GLOBAL_RX_CREDITS 0x30c
+#define A_TP_CM_SIZE 0x310
+#define A_TP_CM_MM_BASE 0x314
+
+#define S_CM_MEMMGR_BASE 0
+#define M_CM_MEMMGR_BASE 0xfffffff
+#define V_CM_MEMMGR_BASE(x) ((x) << S_CM_MEMMGR_BASE)
+#define G_CM_MEMMGR_BASE(x) (((x) >> S_CM_MEMMGR_BASE) & M_CM_MEMMGR_BASE)
+
+#define A_TP_CM_TIMER_BASE 0x318
+
+#define S_CM_TIMER_BASE 0
+#define M_CM_TIMER_BASE 0xfffffff
+#define V_CM_TIMER_BASE(x) ((x) << S_CM_TIMER_BASE)
+#define G_CM_TIMER_BASE(x) (((x) >> S_CM_TIMER_BASE) & M_CM_TIMER_BASE)
+
+#define A_TP_PM_SIZE 0x31c
+#define A_TP_PM_TX_BASE 0x320
+#define A_TP_PM_DEFRAG_BASE 0x324
+#define A_TP_PM_RX_BASE 0x328
+#define A_TP_PM_RX_PG_SIZE 0x32c
+#define A_TP_PM_RX_MAX_PGS 0x330
+#define A_TP_PM_TX_PG_SIZE 0x334
+#define A_TP_PM_TX_MAX_PGS 0x338
+#define A_TP_TCP_OPTIONS 0x340
+
+#define S_TIMESTAMP 0
+#define M_TIMESTAMP 0x3
+#define V_TIMESTAMP(x) ((x) << S_TIMESTAMP)
+#define G_TIMESTAMP(x) (((x) >> S_TIMESTAMP) & M_TIMESTAMP)
+
+#define S_WINDOW_SCALE 2
+#define M_WINDOW_SCALE 0x3
+#define V_WINDOW_SCALE(x) ((x) << S_WINDOW_SCALE)
+#define G_WINDOW_SCALE(x) (((x) >> S_WINDOW_SCALE) & M_WINDOW_SCALE)
+
+#define S_SACK 4
+#define M_SACK 0x3
+#define V_SACK(x) ((x) << S_SACK)
+#define G_SACK(x) (((x) >> S_SACK) & M_SACK)
+
+#define S_ECN 6
+#define M_ECN 0x3
+#define V_ECN(x) ((x) << S_ECN)
+#define G_ECN(x) (((x) >> S_ECN) & M_ECN)
+
+#define S_SACK_ALGORITHM 8
+#define M_SACK_ALGORITHM 0x3
+#define V_SACK_ALGORITHM(x) ((x) << S_SACK_ALGORITHM)
+#define G_SACK_ALGORITHM(x) (((x) >> S_SACK_ALGORITHM) & M_SACK_ALGORITHM)
+
+#define S_MSS 10
+#define V_MSS(x) ((x) << S_MSS)
+#define F_MSS V_MSS(1U)
+
+#define S_DEFAULT_PEER_MSS 16
+#define M_DEFAULT_PEER_MSS 0xffff
+#define V_DEFAULT_PEER_MSS(x) ((x) << S_DEFAULT_PEER_MSS)
+#define G_DEFAULT_PEER_MSS(x) (((x) >> S_DEFAULT_PEER_MSS) & M_DEFAULT_PEER_MSS)
+
+#define A_TP_DACK_CONFIG 0x344
+
+#define S_DACK_MODE 0
+#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
+#define F_DACK_MODE V_DACK_MODE(1U)
+
+#define S_DACK_AUTO_MGMT 1
+#define V_DACK_AUTO_MGMT(x) ((x) << S_DACK_AUTO_MGMT)
+#define F_DACK_AUTO_MGMT V_DACK_AUTO_MGMT(1U)
+
+#define S_DACK_AUTO_CAREFUL 2
+#define V_DACK_AUTO_CAREFUL(x) ((x) << S_DACK_AUTO_CAREFUL)
+#define F_DACK_AUTO_CAREFUL V_DACK_AUTO_CAREFUL(1U)
+
+#define S_DACK_MSS_SELECTOR 3
+#define M_DACK_MSS_SELECTOR 0x3
+#define V_DACK_MSS_SELECTOR(x) ((x) << S_DACK_MSS_SELECTOR)
+#define G_DACK_MSS_SELECTOR(x) (((x) >> S_DACK_MSS_SELECTOR) & M_DACK_MSS_SELECTOR)
+
+#define S_DACK_BYTE_THRESHOLD 5
+#define M_DACK_BYTE_THRESHOLD 0xfffff
+#define V_DACK_BYTE_THRESHOLD(x) ((x) << S_DACK_BYTE_THRESHOLD)
+#define G_DACK_BYTE_THRESHOLD(x) (((x) >> S_DACK_BYTE_THRESHOLD) & M_DACK_BYTE_THRESHOLD)
#define A_TP_PC_CONFIG 0x348
+
+#define S_TP_ACCESS_LATENCY 0
+#define M_TP_ACCESS_LATENCY 0xf
+#define V_TP_ACCESS_LATENCY(x) ((x) << S_TP_ACCESS_LATENCY)
+#define G_TP_ACCESS_LATENCY(x) (((x) >> S_TP_ACCESS_LATENCY) & M_TP_ACCESS_LATENCY)
+
+#define S_HELD_FIN_DISABLE 4
+#define V_HELD_FIN_DISABLE(x) ((x) << S_HELD_FIN_DISABLE)
+#define F_HELD_FIN_DISABLE V_HELD_FIN_DISABLE(1U)
+
+#define S_DDP_FC_ENABLE 5
+#define V_DDP_FC_ENABLE(x) ((x) << S_DDP_FC_ENABLE)
+#define F_DDP_FC_ENABLE V_DDP_FC_ENABLE(1U)
+
+#define S_RDMA_ERR_ENABLE 6
+#define V_RDMA_ERR_ENABLE(x) ((x) << S_RDMA_ERR_ENABLE)
+#define F_RDMA_ERR_ENABLE V_RDMA_ERR_ENABLE(1U)
+
+#define S_FAST_PDU_DELIVERY 7
+#define V_FAST_PDU_DELIVERY(x) ((x) << S_FAST_PDU_DELIVERY)
+#define F_FAST_PDU_DELIVERY V_FAST_PDU_DELIVERY(1U)
+
+#define S_CLEAR_FIN 8
+#define V_CLEAR_FIN(x) ((x) << S_CLEAR_FIN)
+#define F_CLEAR_FIN V_CLEAR_FIN(1U)
+
#define S_DIS_TX_FILL_WIN_PUSH 12
#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
#define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U)
#define S_TP_PC_REV 30
#define M_TP_PC_REV 0x3
+#define V_TP_PC_REV(x) ((x) << S_TP_PC_REV)
#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
+
+#define A_TP_BACKOFF0 0x350
+
+#define S_ELEMENT0 0
+#define M_ELEMENT0 0xff
+#define V_ELEMENT0(x) ((x) << S_ELEMENT0)
+#define G_ELEMENT0(x) (((x) >> S_ELEMENT0) & M_ELEMENT0)
+
+#define S_ELEMENT1 8
+#define M_ELEMENT1 0xff
+#define V_ELEMENT1(x) ((x) << S_ELEMENT1)
+#define G_ELEMENT1(x) (((x) >> S_ELEMENT1) & M_ELEMENT1)
+
+#define S_ELEMENT2 16
+#define M_ELEMENT2 0xff
+#define V_ELEMENT2(x) ((x) << S_ELEMENT2)
+#define G_ELEMENT2(x) (((x) >> S_ELEMENT2) & M_ELEMENT2)
+
+#define S_ELEMENT3 24
+#define M_ELEMENT3 0xff
+#define V_ELEMENT3(x) ((x) << S_ELEMENT3)
+#define G_ELEMENT3(x) (((x) >> S_ELEMENT3) & M_ELEMENT3)
+
+#define A_TP_BACKOFF1 0x354
+#define A_TP_BACKOFF2 0x358
+#define A_TP_BACKOFF3 0x35c
+#define A_TP_PARA_REG0 0x360
+
+#define S_VAR_MULT 0
+#define M_VAR_MULT 0xf
+#define V_VAR_MULT(x) ((x) << S_VAR_MULT)
+#define G_VAR_MULT(x) (((x) >> S_VAR_MULT) & M_VAR_MULT)
+
+#define S_VAR_GAIN 4
+#define M_VAR_GAIN 0xf
+#define V_VAR_GAIN(x) ((x) << S_VAR_GAIN)
+#define G_VAR_GAIN(x) (((x) >> S_VAR_GAIN) & M_VAR_GAIN)
+
+#define S_SRTT_GAIN 8
+#define M_SRTT_GAIN 0xf
+#define V_SRTT_GAIN(x) ((x) << S_SRTT_GAIN)
+#define G_SRTT_GAIN(x) (((x) >> S_SRTT_GAIN) & M_SRTT_GAIN)
+
+#define S_RTTVAR_INIT 12
+#define M_RTTVAR_INIT 0xf
+#define V_RTTVAR_INIT(x) ((x) << S_RTTVAR_INIT)
+#define G_RTTVAR_INIT(x) (((x) >> S_RTTVAR_INIT) & M_RTTVAR_INIT)
+
+#define S_DUP_THRESH 20
+#define M_DUP_THRESH 0xf
+#define V_DUP_THRESH(x) ((x) << S_DUP_THRESH)
+#define G_DUP_THRESH(x) (((x) >> S_DUP_THRESH) & M_DUP_THRESH)
+
+#define S_INIT_CONG_WIN 24
+#define M_INIT_CONG_WIN 0x7
+#define V_INIT_CONG_WIN(x) ((x) << S_INIT_CONG_WIN)
+#define G_INIT_CONG_WIN(x) (((x) >> S_INIT_CONG_WIN) & M_INIT_CONG_WIN)
+
+#define A_TP_PARA_REG1 0x364
+
+#define S_INITIAL_SLOW_START_THRESHOLD 0
+#define M_INITIAL_SLOW_START_THRESHOLD 0xffff
+#define V_INITIAL_SLOW_START_THRESHOLD(x) ((x) << S_INITIAL_SLOW_START_THRESHOLD)
+#define G_INITIAL_SLOW_START_THRESHOLD(x) (((x) >> S_INITIAL_SLOW_START_THRESHOLD) & M_INITIAL_SLOW_START_THRESHOLD)
+
+#define S_RECEIVE_BUFFER_SIZE 16
+#define M_RECEIVE_BUFFER_SIZE 0xffff
+#define V_RECEIVE_BUFFER_SIZE(x) ((x) << S_RECEIVE_BUFFER_SIZE)
+#define G_RECEIVE_BUFFER_SIZE(x) (((x) >> S_RECEIVE_BUFFER_SIZE) & M_RECEIVE_BUFFER_SIZE)
+
+#define A_TP_PARA_REG2 0x368
+
+#define S_RX_COALESCE_SIZE 0
+#define M_RX_COALESCE_SIZE 0xffff
+#define V_RX_COALESCE_SIZE(x) ((x) << S_RX_COALESCE_SIZE)
+#define G_RX_COALESCE_SIZE(x) (((x) >> S_RX_COALESCE_SIZE) & M_RX_COALESCE_SIZE)
+
+#define S_MAX_RX_SIZE 16
+#define M_MAX_RX_SIZE 0xffff
+#define V_MAX_RX_SIZE(x) ((x) << S_MAX_RX_SIZE)
+#define G_MAX_RX_SIZE(x) (((x) >> S_MAX_RX_SIZE) & M_MAX_RX_SIZE)
+
+#define A_TP_PARA_REG3 0x36c
+
+#define S_RX_COALESCING_PSH_DELIVER 0
+#define V_RX_COALESCING_PSH_DELIVER(x) ((x) << S_RX_COALESCING_PSH_DELIVER)
+#define F_RX_COALESCING_PSH_DELIVER V_RX_COALESCING_PSH_DELIVER(1U)
+
+#define S_RX_COALESCING_ENABLE 1
+#define V_RX_COALESCING_ENABLE(x) ((x) << S_RX_COALESCING_ENABLE)
+#define F_RX_COALESCING_ENABLE V_RX_COALESCING_ENABLE(1U)
+
+#define S_TAHOE_ENABLE 2
+#define V_TAHOE_ENABLE(x) ((x) << S_TAHOE_ENABLE)
+#define F_TAHOE_ENABLE V_TAHOE_ENABLE(1U)
+
+#define S_MAX_REORDER_FRAGMENTS 12
+#define M_MAX_REORDER_FRAGMENTS 0x7
+#define V_MAX_REORDER_FRAGMENTS(x) ((x) << S_MAX_REORDER_FRAGMENTS)
+#define G_MAX_REORDER_FRAGMENTS(x) (((x) >> S_MAX_REORDER_FRAGMENTS) & M_MAX_REORDER_FRAGMENTS)
+
+#define A_TP_TIMER_RESOLUTION 0x390
+
+#define S_DELAYED_ACK_TIMER_RESOLUTION 0
+#define M_DELAYED_ACK_TIMER_RESOLUTION 0x3f
+#define V_DELAYED_ACK_TIMER_RESOLUTION(x) ((x) << S_DELAYED_ACK_TIMER_RESOLUTION)
+#define G_DELAYED_ACK_TIMER_RESOLUTION(x) (((x) >> S_DELAYED_ACK_TIMER_RESOLUTION) & M_DELAYED_ACK_TIMER_RESOLUTION)
+
+#define S_GENERIC_TIMER_RESOLUTION 16
+#define M_GENERIC_TIMER_RESOLUTION 0x3f
+#define V_GENERIC_TIMER_RESOLUTION(x) ((x) << S_GENERIC_TIMER_RESOLUTION)
+#define G_GENERIC_TIMER_RESOLUTION(x) (((x) >> S_GENERIC_TIMER_RESOLUTION) & M_GENERIC_TIMER_RESOLUTION)
+
+#define A_TP_2MSL 0x394
+
+#define S_2MSL 0
+#define M_2MSL 0x3fffffff
+#define V_2MSL(x) ((x) << S_2MSL)
+#define G_2MSL(x) (((x) >> S_2MSL) & M_2MSL)
+
+#define A_TP_RXT_MIN 0x398
+
+#define S_RETRANSMIT_TIMER_MIN 0
+#define M_RETRANSMIT_TIMER_MIN 0xffff
+#define V_RETRANSMIT_TIMER_MIN(x) ((x) << S_RETRANSMIT_TIMER_MIN)
+#define G_RETRANSMIT_TIMER_MIN(x) (((x) >> S_RETRANSMIT_TIMER_MIN) & M_RETRANSMIT_TIMER_MIN)
+
+#define A_TP_RXT_MAX 0x39c
+
+#define S_RETRANSMIT_TIMER_MAX 0
+#define M_RETRANSMIT_TIMER_MAX 0x3fffffff
+#define V_RETRANSMIT_TIMER_MAX(x) ((x) << S_RETRANSMIT_TIMER_MAX)
+#define G_RETRANSMIT_TIMER_MAX(x) (((x) >> S_RETRANSMIT_TIMER_MAX) & M_RETRANSMIT_TIMER_MAX)
+
+#define A_TP_PERS_MIN 0x3a0
+
+#define S_PERSIST_TIMER_MIN 0
+#define M_PERSIST_TIMER_MIN 0xffff
+#define V_PERSIST_TIMER_MIN(x) ((x) << S_PERSIST_TIMER_MIN)
+#define G_PERSIST_TIMER_MIN(x) (((x) >> S_PERSIST_TIMER_MIN) & M_PERSIST_TIMER_MIN)
+
+#define A_TP_PERS_MAX 0x3a4
+
+#define S_PERSIST_TIMER_MAX 0
+#define M_PERSIST_TIMER_MAX 0x3fffffff
+#define V_PERSIST_TIMER_MAX(x) ((x) << S_PERSIST_TIMER_MAX)
+#define G_PERSIST_TIMER_MAX(x) (((x) >> S_PERSIST_TIMER_MAX) & M_PERSIST_TIMER_MAX)
+
+#define A_TP_KEEP_IDLE 0x3ac
+
+#define S_KEEP_ALIVE_IDLE_TIME 0
+#define M_KEEP_ALIVE_IDLE_TIME 0x3fffffff
+#define V_KEEP_ALIVE_IDLE_TIME(x) ((x) << S_KEEP_ALIVE_IDLE_TIME)
+#define G_KEEP_ALIVE_IDLE_TIME(x) (((x) >> S_KEEP_ALIVE_IDLE_TIME) & M_KEEP_ALIVE_IDLE_TIME)
+
+#define A_TP_KEEP_INTVL 0x3b0
+
+#define S_KEEP_ALIVE_INTERVAL_TIME 0
+#define M_KEEP_ALIVE_INTERVAL_TIME 0x3fffffff
+#define V_KEEP_ALIVE_INTERVAL_TIME(x) ((x) << S_KEEP_ALIVE_INTERVAL_TIME)
+#define G_KEEP_ALIVE_INTERVAL_TIME(x) (((x) >> S_KEEP_ALIVE_INTERVAL_TIME) & M_KEEP_ALIVE_INTERVAL_TIME)
+
+#define A_TP_INIT_SRTT 0x3b4
+
+#define S_INITIAL_SRTT 0
+#define M_INITIAL_SRTT 0xffff
+#define V_INITIAL_SRTT(x) ((x) << S_INITIAL_SRTT)
+#define G_INITIAL_SRTT(x) (((x) >> S_INITIAL_SRTT) & M_INITIAL_SRTT)
+
+#define A_TP_DACK_TIME 0x3b8
+
+#define S_DELAYED_ACK_TIME 0
+#define M_DELAYED_ACK_TIME 0x7ff
+#define V_DELAYED_ACK_TIME(x) ((x) << S_DELAYED_ACK_TIME)
+#define G_DELAYED_ACK_TIME(x) (((x) >> S_DELAYED_ACK_TIME) & M_DELAYED_ACK_TIME)
+
+#define A_TP_FINWAIT2_TIME 0x3bc
+
+#define S_FINWAIT2_TIME 0
+#define M_FINWAIT2_TIME 0x3fffffff
+#define V_FINWAIT2_TIME(x) ((x) << S_FINWAIT2_TIME)
+#define G_FINWAIT2_TIME(x) (((x) >> S_FINWAIT2_TIME) & M_FINWAIT2_TIME)
+
+#define A_TP_FAST_FINWAIT2_TIME 0x3c0
+
+#define S_FAST_FINWAIT2_TIME 0
+#define M_FAST_FINWAIT2_TIME 0x3fffffff
+#define V_FAST_FINWAIT2_TIME(x) ((x) << S_FAST_FINWAIT2_TIME)
+#define G_FAST_FINWAIT2_TIME(x) (((x) >> S_FAST_FINWAIT2_TIME) & M_FAST_FINWAIT2_TIME)
+
+#define A_TP_SHIFT_CNT 0x3c4
+
+#define S_KEEPALIVE_MAX 0
+#define M_KEEPALIVE_MAX 0xff
+#define V_KEEPALIVE_MAX(x) ((x) << S_KEEPALIVE_MAX)
+#define G_KEEPALIVE_MAX(x) (((x) >> S_KEEPALIVE_MAX) & M_KEEPALIVE_MAX)
+
+#define S_WINDOWPROBE_MAX 8
+#define M_WINDOWPROBE_MAX 0xff
+#define V_WINDOWPROBE_MAX(x) ((x) << S_WINDOWPROBE_MAX)
+#define G_WINDOWPROBE_MAX(x) (((x) >> S_WINDOWPROBE_MAX) & M_WINDOWPROBE_MAX)
+
+#define S_RETRANSMISSION_MAX 16
+#define M_RETRANSMISSION_MAX 0xff
+#define V_RETRANSMISSION_MAX(x) ((x) << S_RETRANSMISSION_MAX)
+#define G_RETRANSMISSION_MAX(x) (((x) >> S_RETRANSMISSION_MAX) & M_RETRANSMISSION_MAX)
+
+#define S_SYN_MAX 24
+#define M_SYN_MAX 0xff
+#define V_SYN_MAX(x) ((x) << S_SYN_MAX)
+#define G_SYN_MAX(x) (((x) >> S_SYN_MAX) & M_SYN_MAX)
+
+#define A_TP_QOS_REG0 0x3e0
+
+#define S_L3_VALUE 0
+#define M_L3_VALUE 0x3f
+#define V_L3_VALUE(x) ((x) << S_L3_VALUE)
+#define G_L3_VALUE(x) (((x) >> S_L3_VALUE) & M_L3_VALUE)
+
+#define A_TP_QOS_REG1 0x3e4
+#define A_TP_QOS_REG2 0x3e8
+#define A_TP_QOS_REG3 0x3ec
+#define A_TP_QOS_REG4 0x3f0
+#define A_TP_QOS_REG5 0x3f4
+#define A_TP_QOS_REG6 0x3f8
+#define A_TP_QOS_REG7 0x3fc
+#define A_TP_MTU_REG0 0x404
+#define A_TP_MTU_REG1 0x408
+#define A_TP_MTU_REG2 0x40c
+#define A_TP_MTU_REG3 0x410
+#define A_TP_MTU_REG4 0x414
+#define A_TP_MTU_REG5 0x418
+#define A_TP_MTU_REG6 0x41c
+#define A_TP_MTU_REG7 0x420
#define A_TP_RESET 0x44c
+
#define S_TP_RESET 0
#define V_TP_RESET(x) ((x) << S_TP_RESET)
#define F_TP_RESET V_TP_RESET(1U)
+#define S_CM_MEMMGR_INIT 1
+#define V_CM_MEMMGR_INIT(x) ((x) << S_CM_MEMMGR_INIT)
+#define F_CM_MEMMGR_INIT V_CM_MEMMGR_INIT(1U)
+
+#define A_TP_MIB_INDEX 0x450
+#define A_TP_MIB_DATA 0x454
+#define A_TP_SYNC_TIME_HI 0x458
+#define A_TP_SYNC_TIME_LO 0x45c
+#define A_TP_CM_MM_RX_FLST_BASE 0x460
+
+#define S_CM_MEMMGR_RX_FREE_LIST_BASE 0
+#define M_CM_MEMMGR_RX_FREE_LIST_BASE 0xfffffff
+#define V_CM_MEMMGR_RX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_RX_FREE_LIST_BASE)
+#define G_CM_MEMMGR_RX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_RX_FREE_LIST_BASE) & M_CM_MEMMGR_RX_FREE_LIST_BASE)
+
+#define A_TP_CM_MM_TX_FLST_BASE 0x464
+
+#define S_CM_MEMMGR_TX_FREE_LIST_BASE 0
+#define M_CM_MEMMGR_TX_FREE_LIST_BASE 0xfffffff
+#define V_CM_MEMMGR_TX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_TX_FREE_LIST_BASE)
+#define G_CM_MEMMGR_TX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_TX_FREE_LIST_BASE) & M_CM_MEMMGR_TX_FREE_LIST_BASE)
+
+#define A_TP_CM_MM_P_FLST_BASE 0x468
+
+#define S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE 0
+#define M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE 0xfffffff
+#define V_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE)
+#define G_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) & M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE)
+
+#define A_TP_CM_MM_MAX_P 0x46c
+
+#define S_CM_MEMMGR_MAX_PSTRUCT 0
+#define M_CM_MEMMGR_MAX_PSTRUCT 0xfffffff
+#define V_CM_MEMMGR_MAX_PSTRUCT(x) ((x) << S_CM_MEMMGR_MAX_PSTRUCT)
+#define G_CM_MEMMGR_MAX_PSTRUCT(x) (((x) >> S_CM_MEMMGR_MAX_PSTRUCT) & M_CM_MEMMGR_MAX_PSTRUCT)
+
#define A_TP_INT_ENABLE 0x470
+
+#define S_TX_FREE_LIST_EMPTY 0
+#define V_TX_FREE_LIST_EMPTY(x) ((x) << S_TX_FREE_LIST_EMPTY)
+#define F_TX_FREE_LIST_EMPTY V_TX_FREE_LIST_EMPTY(1U)
+
+#define S_RX_FREE_LIST_EMPTY 1
+#define V_RX_FREE_LIST_EMPTY(x) ((x) << S_RX_FREE_LIST_EMPTY)
+#define F_RX_FREE_LIST_EMPTY V_RX_FREE_LIST_EMPTY(1U)
+
#define A_TP_INT_CAUSE 0x474
+#define A_TP_TIMER_SEPARATOR 0x4a4
+
+#define S_DISABLE_PAST_TIMER_INSERTION 0
+#define V_DISABLE_PAST_TIMER_INSERTION(x) ((x) << S_DISABLE_PAST_TIMER_INSERTION)
+#define F_DISABLE_PAST_TIMER_INSERTION V_DISABLE_PAST_TIMER_INSERTION(1U)
+
+#define S_MODULATION_TIMER_SEPARATOR 1
+#define M_MODULATION_TIMER_SEPARATOR 0x7fff
+#define V_MODULATION_TIMER_SEPARATOR(x) ((x) << S_MODULATION_TIMER_SEPARATOR)
+#define G_MODULATION_TIMER_SEPARATOR(x) (((x) >> S_MODULATION_TIMER_SEPARATOR) & M_MODULATION_TIMER_SEPARATOR)
+
+#define S_GLOBAL_TIMER_SEPARATOR 16
+#define M_GLOBAL_TIMER_SEPARATOR 0xffff
+#define V_GLOBAL_TIMER_SEPARATOR(x) ((x) << S_GLOBAL_TIMER_SEPARATOR)
+#define G_GLOBAL_TIMER_SEPARATOR(x) (((x) >> S_GLOBAL_TIMER_SEPARATOR) & M_GLOBAL_TIMER_SEPARATOR)
+
+#define A_TP_CM_FC_MODE 0x4b0
+#define A_TP_PC_CONGESTION_CNTL 0x4b4
#define A_TP_TX_DROP_CONFIG 0x4b8
#define S_ENABLE_TX_DROP 31
@@ -282,12 +1249,108 @@
#define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U)
#define S_DROP_TICKS_CNT 4
+#define M_DROP_TICKS_CNT 0x3ffffff
#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
+#define G_DROP_TICKS_CNT(x) (((x) >> S_DROP_TICKS_CNT) & M_DROP_TICKS_CNT)
#define S_NUM_PKTS_DROPPED 0
+#define M_NUM_PKTS_DROPPED 0xf
#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
+#define G_NUM_PKTS_DROPPED(x) (((x) >> S_NUM_PKTS_DROPPED) & M_NUM_PKTS_DROPPED)
+
+#define A_TP_TX_DROP_COUNT 0x4bc
+
+/* RAT registers */
+#define A_RAT_ROUTE_CONTROL 0x580
+
+#define S_USE_ROUTE_TABLE 0
+#define V_USE_ROUTE_TABLE(x) ((x) << S_USE_ROUTE_TABLE)
+#define F_USE_ROUTE_TABLE V_USE_ROUTE_TABLE(1U)
+
+#define S_ENABLE_CSPI 1
+#define V_ENABLE_CSPI(x) ((x) << S_ENABLE_CSPI)
+#define F_ENABLE_CSPI V_ENABLE_CSPI(1U)
+
+#define S_ENABLE_PCIX 2
+#define V_ENABLE_PCIX(x) ((x) << S_ENABLE_PCIX)
+#define F_ENABLE_PCIX V_ENABLE_PCIX(1U)
+
+#define A_RAT_ROUTE_TABLE_INDEX 0x584
+
+#define S_ROUTE_TABLE_INDEX 0
+#define M_ROUTE_TABLE_INDEX 0xf
+#define V_ROUTE_TABLE_INDEX(x) ((x) << S_ROUTE_TABLE_INDEX)
+#define G_ROUTE_TABLE_INDEX(x) (((x) >> S_ROUTE_TABLE_INDEX) & M_ROUTE_TABLE_INDEX)
+
+#define A_RAT_ROUTE_TABLE_DATA 0x588
+#define A_RAT_NO_ROUTE 0x58c
+
+#define S_CPL_OPCODE 0
+#define M_CPL_OPCODE 0xff
+#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE)
+#define G_CPL_OPCODE(x) (((x) >> S_CPL_OPCODE) & M_CPL_OPCODE)
+
+#define A_RAT_INTR_ENABLE 0x590
+
+#define S_ZEROROUTEERROR 0
+#define V_ZEROROUTEERROR(x) ((x) << S_ZEROROUTEERROR)
+#define F_ZEROROUTEERROR V_ZEROROUTEERROR(1U)
+
+#define S_CSPIFRAMINGERROR 1
+#define V_CSPIFRAMINGERROR(x) ((x) << S_CSPIFRAMINGERROR)
+#define F_CSPIFRAMINGERROR V_CSPIFRAMINGERROR(1U)
+
+#define S_SGEFRAMINGERROR 2
+#define V_SGEFRAMINGERROR(x) ((x) << S_SGEFRAMINGERROR)
+#define F_SGEFRAMINGERROR V_SGEFRAMINGERROR(1U)
+
+#define S_TPFRAMINGERROR 3
+#define V_TPFRAMINGERROR(x) ((x) << S_TPFRAMINGERROR)
+#define F_TPFRAMINGERROR V_TPFRAMINGERROR(1U)
+
+#define A_RAT_INTR_CAUSE 0x594
/* CSPI registers */
+#define A_CSPI_RX_AE_WM 0x810
+#define A_CSPI_RX_AF_WM 0x814
+#define A_CSPI_CALENDAR_LEN 0x818
+
+#define S_CALENDARLENGTH 0
+#define M_CALENDARLENGTH 0xffff
+#define V_CALENDARLENGTH(x) ((x) << S_CALENDARLENGTH)
+#define G_CALENDARLENGTH(x) (((x) >> S_CALENDARLENGTH) & M_CALENDARLENGTH)
+
+#define A_CSPI_FIFO_STATUS_ENABLE 0x820
+
+#define S_FIFOSTATUSENABLE 0
+#define V_FIFOSTATUSENABLE(x) ((x) << S_FIFOSTATUSENABLE)
+#define F_FIFOSTATUSENABLE V_FIFOSTATUSENABLE(1U)
+
+#define A_CSPI_MAXBURST1_MAXBURST2 0x828
+
+#define S_MAXBURST1 0
+#define M_MAXBURST1 0xffff
+#define V_MAXBURST1(x) ((x) << S_MAXBURST1)
+#define G_MAXBURST1(x) (((x) >> S_MAXBURST1) & M_MAXBURST1)
+
+#define S_MAXBURST2 16
+#define M_MAXBURST2 0xffff
+#define V_MAXBURST2(x) ((x) << S_MAXBURST2)
+#define G_MAXBURST2(x) (((x) >> S_MAXBURST2) & M_MAXBURST2)
+
+#define A_CSPI_TRAIN 0x82c
+
+#define S_CSPI_TRAIN_ALPHA 0
+#define M_CSPI_TRAIN_ALPHA 0xffff
+#define V_CSPI_TRAIN_ALPHA(x) ((x) << S_CSPI_TRAIN_ALPHA)
+#define G_CSPI_TRAIN_ALPHA(x) (((x) >> S_CSPI_TRAIN_ALPHA) & M_CSPI_TRAIN_ALPHA)
+
+#define S_CSPI_TRAIN_DATA_MAXT 16
+#define M_CSPI_TRAIN_DATA_MAXT 0xffff
+#define V_CSPI_TRAIN_DATA_MAXT(x) ((x) << S_CSPI_TRAIN_DATA_MAXT)
+#define G_CSPI_TRAIN_DATA_MAXT(x) (((x) >> S_CSPI_TRAIN_DATA_MAXT) & M_CSPI_TRAIN_DATA_MAXT)
+
+#define A_CSPI_INTR_STATUS 0x848
#define S_DIP4ERR 0
#define V_DIP4ERR(x) ((x) << S_DIP4ERR)
@@ -309,22 +1372,63 @@
#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
#define F_RAMPARITYERR V_RAMPARITYERR(1U)
-/* ESPI registers */
+#define A_CSPI_INTR_ENABLE 0x84c
+/* ESPI registers */
#define A_ESPI_SCH_TOKEN0 0x880
+
+#define S_SCHTOKEN0 0
+#define M_SCHTOKEN0 0xffff
+#define V_SCHTOKEN0(x) ((x) << S_SCHTOKEN0)
+#define G_SCHTOKEN0(x) (((x) >> S_SCHTOKEN0) & M_SCHTOKEN0)
+
#define A_ESPI_SCH_TOKEN1 0x884
+
+#define S_SCHTOKEN1 0
+#define M_SCHTOKEN1 0xffff
+#define V_SCHTOKEN1(x) ((x) << S_SCHTOKEN1)
+#define G_SCHTOKEN1(x) (((x) >> S_SCHTOKEN1) & M_SCHTOKEN1)
+
#define A_ESPI_SCH_TOKEN2 0x888
+
+#define S_SCHTOKEN2 0
+#define M_SCHTOKEN2 0xffff
+#define V_SCHTOKEN2(x) ((x) << S_SCHTOKEN2)
+#define G_SCHTOKEN2(x) (((x) >> S_SCHTOKEN2) & M_SCHTOKEN2)
+
#define A_ESPI_SCH_TOKEN3 0x88c
+
+#define S_SCHTOKEN3 0
+#define M_SCHTOKEN3 0xffff
+#define V_SCHTOKEN3(x) ((x) << S_SCHTOKEN3)
+#define G_SCHTOKEN3(x) (((x) >> S_SCHTOKEN3) & M_SCHTOKEN3)
+
#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
+
+#define S_ALMOSTEMPTY 0
+#define M_ALMOSTEMPTY 0xffff
+#define V_ALMOSTEMPTY(x) ((x) << S_ALMOSTEMPTY)
+#define G_ALMOSTEMPTY(x) (((x) >> S_ALMOSTEMPTY) & M_ALMOSTEMPTY)
+
#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
+
+#define S_ALMOSTFULL 0
+#define M_ALMOSTFULL 0xffff
+#define V_ALMOSTFULL(x) ((x) << S_ALMOSTFULL)
+#define G_ALMOSTFULL(x) (((x) >> S_ALMOSTFULL) & M_ALMOSTFULL)
+
#define A_ESPI_CALENDAR_LENGTH 0x898
#define A_PORT_CONFIG 0x89c
#define S_RX_NPORTS 0
+#define M_RX_NPORTS 0xff
#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
+#define G_RX_NPORTS(x) (((x) >> S_RX_NPORTS) & M_RX_NPORTS)
#define S_TX_NPORTS 8
+#define M_TX_NPORTS 0xff
#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
+#define G_TX_NPORTS(x) (((x) >> S_TX_NPORTS) & M_TX_NPORTS)
#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
@@ -332,12 +1436,124 @@
#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
#define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U)
+#define S_TXDROPENABLE 1
+#define V_TXDROPENABLE(x) ((x) << S_TXDROPENABLE)
+#define F_TXDROPENABLE V_TXDROPENABLE(1U)
+
+#define S_RXENDIANMODE 2
+#define V_RXENDIANMODE(x) ((x) << S_RXENDIANMODE)
+#define F_RXENDIANMODE V_RXENDIANMODE(1U)
+
+#define S_TXENDIANMODE 3
+#define V_TXENDIANMODE(x) ((x) << S_TXENDIANMODE)
+#define F_TXENDIANMODE V_TXENDIANMODE(1U)
+
#define S_INTEL1010MODE 4
#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
#define F_INTEL1010MODE V_INTEL1010MODE(1U)
#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
#define A_ESPI_TRAIN 0x8ac
+
+#define S_MAXTRAINALPHA 0
+#define M_MAXTRAINALPHA 0xffff
+#define V_MAXTRAINALPHA(x) ((x) << S_MAXTRAINALPHA)
+#define G_MAXTRAINALPHA(x) (((x) >> S_MAXTRAINALPHA) & M_MAXTRAINALPHA)
+
+#define S_MAXTRAINDATA 16
+#define M_MAXTRAINDATA 0xffff
+#define V_MAXTRAINDATA(x) ((x) << S_MAXTRAINDATA)
+#define G_MAXTRAINDATA(x) (((x) >> S_MAXTRAINDATA) & M_MAXTRAINDATA)
+
+#define A_RAM_STATUS 0x8b0
+
+#define S_RXFIFOPARITYERROR 0
+#define M_RXFIFOPARITYERROR 0x3ff
+#define V_RXFIFOPARITYERROR(x) ((x) << S_RXFIFOPARITYERROR)
+#define G_RXFIFOPARITYERROR(x) (((x) >> S_RXFIFOPARITYERROR) & M_RXFIFOPARITYERROR)
+
+#define S_TXFIFOPARITYERROR 10
+#define M_TXFIFOPARITYERROR 0x3ff
+#define V_TXFIFOPARITYERROR(x) ((x) << S_TXFIFOPARITYERROR)
+#define G_TXFIFOPARITYERROR(x) (((x) >> S_TXFIFOPARITYERROR) & M_TXFIFOPARITYERROR)
+
+#define S_RXFIFOOVERFLOW 20
+#define M_RXFIFOOVERFLOW 0x3ff
+#define V_RXFIFOOVERFLOW(x) ((x) << S_RXFIFOOVERFLOW)
+#define G_RXFIFOOVERFLOW(x) (((x) >> S_RXFIFOOVERFLOW) & M_RXFIFOOVERFLOW)
+
+#define A_TX_DROP_COUNT0 0x8b4
+
+#define S_TXPORT0DROPCNT 0
+#define M_TXPORT0DROPCNT 0xffff
+#define V_TXPORT0DROPCNT(x) ((x) << S_TXPORT0DROPCNT)
+#define G_TXPORT0DROPCNT(x) (((x) >> S_TXPORT0DROPCNT) & M_TXPORT0DROPCNT)
+
+#define S_TXPORT1DROPCNT 16
+#define M_TXPORT1DROPCNT 0xffff
+#define V_TXPORT1DROPCNT(x) ((x) << S_TXPORT1DROPCNT)
+#define G_TXPORT1DROPCNT(x) (((x) >> S_TXPORT1DROPCNT) & M_TXPORT1DROPCNT)
+
+#define A_TX_DROP_COUNT1 0x8b8
+
+#define S_TXPORT2DROPCNT 0
+#define M_TXPORT2DROPCNT 0xffff
+#define V_TXPORT2DROPCNT(x) ((x) << S_TXPORT2DROPCNT)
+#define G_TXPORT2DROPCNT(x) (((x) >> S_TXPORT2DROPCNT) & M_TXPORT2DROPCNT)
+
+#define S_TXPORT3DROPCNT 16
+#define M_TXPORT3DROPCNT 0xffff
+#define V_TXPORT3DROPCNT(x) ((x) << S_TXPORT3DROPCNT)
+#define G_TXPORT3DROPCNT(x) (((x) >> S_TXPORT3DROPCNT) & M_TXPORT3DROPCNT)
+
+#define A_RX_DROP_COUNT0 0x8bc
+
+#define S_RXPORT0DROPCNT 0
+#define M_RXPORT0DROPCNT 0xffff
+#define V_RXPORT0DROPCNT(x) ((x) << S_RXPORT0DROPCNT)
+#define G_RXPORT0DROPCNT(x) (((x) >> S_RXPORT0DROPCNT) & M_RXPORT0DROPCNT)
+
+#define S_RXPORT1DROPCNT 16
+#define M_RXPORT1DROPCNT 0xffff
+#define V_RXPORT1DROPCNT(x) ((x) << S_RXPORT1DROPCNT)
+#define G_RXPORT1DROPCNT(x) (((x) >> S_RXPORT1DROPCNT) & M_RXPORT1DROPCNT)
+
+#define A_RX_DROP_COUNT1 0x8c0
+
+#define S_RXPORT2DROPCNT 0
+#define M_RXPORT2DROPCNT 0xffff
+#define V_RXPORT2DROPCNT(x) ((x) << S_RXPORT2DROPCNT)
+#define G_RXPORT2DROPCNT(x) (((x) >> S_RXPORT2DROPCNT) & M_RXPORT2DROPCNT)
+
+#define S_RXPORT3DROPCNT 16
+#define M_RXPORT3DROPCNT 0xffff
+#define V_RXPORT3DROPCNT(x) ((x) << S_RXPORT3DROPCNT)
+#define G_RXPORT3DROPCNT(x) (((x) >> S_RXPORT3DROPCNT) & M_RXPORT3DROPCNT)
+
+#define A_DIP4_ERROR_COUNT 0x8c4
+
+#define S_DIP4ERRORCNT 0
+#define M_DIP4ERRORCNT 0xfff
+#define V_DIP4ERRORCNT(x) ((x) << S_DIP4ERRORCNT)
+#define G_DIP4ERRORCNT(x) (((x) >> S_DIP4ERRORCNT) & M_DIP4ERRORCNT)
+
+#define S_DIP4ERRORCNTSHADOW 12
+#define M_DIP4ERRORCNTSHADOW 0xfff
+#define V_DIP4ERRORCNTSHADOW(x) ((x) << S_DIP4ERRORCNTSHADOW)
+#define G_DIP4ERRORCNTSHADOW(x) (((x) >> S_DIP4ERRORCNTSHADOW) & M_DIP4ERRORCNTSHADOW)
+
+#define S_TRICN_RX_TRAIN_ERR 24
+#define V_TRICN_RX_TRAIN_ERR(x) ((x) << S_TRICN_RX_TRAIN_ERR)
+#define F_TRICN_RX_TRAIN_ERR V_TRICN_RX_TRAIN_ERR(1U)
+
+#define S_TRICN_RX_TRAINING 25
+#define V_TRICN_RX_TRAINING(x) ((x) << S_TRICN_RX_TRAINING)
+#define F_TRICN_RX_TRAINING V_TRICN_RX_TRAINING(1U)
+
+#define S_TRICN_RX_TRAIN_OK 26
+#define V_TRICN_RX_TRAIN_OK(x) ((x) << S_TRICN_RX_TRAIN_OK)
+#define F_TRICN_RX_TRAIN_OK V_TRICN_RX_TRAIN_OK(1U)
+
#define A_ESPI_INTR_STATUS 0x8c8
#define S_DIP2PARITYERR 5
@@ -347,19 +1563,56 @@
#define A_ESPI_INTR_ENABLE 0x8cc
#define A_RX_DROP_THRESHOLD 0x8d0
#define A_ESPI_RX_RESET 0x8ec
+
+#define S_ESPI_RX_LNK_RST 0
+#define V_ESPI_RX_LNK_RST(x) ((x) << S_ESPI_RX_LNK_RST)
+#define F_ESPI_RX_LNK_RST V_ESPI_RX_LNK_RST(1U)
+
+#define S_ESPI_RX_CORE_RST 1
+#define V_ESPI_RX_CORE_RST(x) ((x) << S_ESPI_RX_CORE_RST)
+#define F_ESPI_RX_CORE_RST V_ESPI_RX_CORE_RST(1U)
+
+#define S_RX_CLK_STATUS 2
+#define V_RX_CLK_STATUS(x) ((x) << S_RX_CLK_STATUS)
+#define F_RX_CLK_STATUS V_RX_CLK_STATUS(1U)
+
#define A_ESPI_MISC_CONTROL 0x8f0
#define S_OUT_OF_SYNC_COUNT 0
+#define M_OUT_OF_SYNC_COUNT 0xf
#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
+#define G_OUT_OF_SYNC_COUNT(x) (((x) >> S_OUT_OF_SYNC_COUNT) & M_OUT_OF_SYNC_COUNT)
+
+#define S_DIP2_COUNT_MODE_ENABLE 4
+#define V_DIP2_COUNT_MODE_ENABLE(x) ((x) << S_DIP2_COUNT_MODE_ENABLE)
+#define F_DIP2_COUNT_MODE_ENABLE V_DIP2_COUNT_MODE_ENABLE(1U)
#define S_DIP2_PARITY_ERR_THRES 5
+#define M_DIP2_PARITY_ERR_THRES 0xf
#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
+#define G_DIP2_PARITY_ERR_THRES(x) (((x) >> S_DIP2_PARITY_ERR_THRES) & M_DIP2_PARITY_ERR_THRES)
#define S_DIP4_THRES 9
+#define M_DIP4_THRES 0xfff
#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
+#define G_DIP4_THRES(x) (((x) >> S_DIP4_THRES) & M_DIP4_THRES)
+
+#define S_DIP4_THRES_ENABLE 21
+#define V_DIP4_THRES_ENABLE(x) ((x) << S_DIP4_THRES_ENABLE)
+#define F_DIP4_THRES_ENABLE V_DIP4_THRES_ENABLE(1U)
+
+#define S_FORCE_DISABLE_STATUS 22
+#define V_FORCE_DISABLE_STATUS(x) ((x) << S_FORCE_DISABLE_STATUS)
+#define F_FORCE_DISABLE_STATUS V_FORCE_DISABLE_STATUS(1U)
+
+#define S_DYNAMIC_DESKEW 23
+#define V_DYNAMIC_DESKEW(x) ((x) << S_DYNAMIC_DESKEW)
+#define F_DYNAMIC_DESKEW V_DYNAMIC_DESKEW(1U)
#define S_MONITORED_PORT_NUM 25
+#define M_MONITORED_PORT_NUM 0x3
#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
+#define G_MONITORED_PORT_NUM(x) (((x) >> S_MONITORED_PORT_NUM) & M_MONITORED_PORT_NUM)
#define S_MONITORED_DIRECTION 27
#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
@@ -370,33 +1623,125 @@
#define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U)
#define A_ESPI_DIP2_ERR_COUNT 0x8f4
+
+#define S_DIP2_ERR_CNT 0
+#define M_DIP2_ERR_CNT 0xf
+#define V_DIP2_ERR_CNT(x) ((x) << S_DIP2_ERR_CNT)
+#define G_DIP2_ERR_CNT(x) (((x) >> S_DIP2_ERR_CNT) & M_DIP2_ERR_CNT)
+
#define A_ESPI_CMD_ADDR 0x8f8
#define S_WRITE_DATA 0
+#define M_WRITE_DATA 0xff
#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
+#define G_WRITE_DATA(x) (((x) >> S_WRITE_DATA) & M_WRITE_DATA)
#define S_REGISTER_OFFSET 8
+#define M_REGISTER_OFFSET 0xf
#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
+#define G_REGISTER_OFFSET(x) (((x) >> S_REGISTER_OFFSET) & M_REGISTER_OFFSET)
#define S_CHANNEL_ADDR 12
+#define M_CHANNEL_ADDR 0xf
#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
+#define G_CHANNEL_ADDR(x) (((x) >> S_CHANNEL_ADDR) & M_CHANNEL_ADDR)
#define S_MODULE_ADDR 16
+#define M_MODULE_ADDR 0x3
#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
+#define G_MODULE_ADDR(x) (((x) >> S_MODULE_ADDR) & M_MODULE_ADDR)
#define S_BUNDLE_ADDR 20
+#define M_BUNDLE_ADDR 0x3
#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
+#define G_BUNDLE_ADDR(x) (((x) >> S_BUNDLE_ADDR) & M_BUNDLE_ADDR)
#define S_SPI4_COMMAND 24
+#define M_SPI4_COMMAND 0xff
#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
+#define G_SPI4_COMMAND(x) (((x) >> S_SPI4_COMMAND) & M_SPI4_COMMAND)
#define A_ESPI_GOSTAT 0x8fc
+
+#define S_READ_DATA 0
+#define M_READ_DATA 0xff
+#define V_READ_DATA(x) ((x) << S_READ_DATA)
+#define G_READ_DATA(x) (((x) >> S_READ_DATA) & M_READ_DATA)
+
#define S_ESPI_CMD_BUSY 8
#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
#define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U)
-/* PL registers */
+#define S_ERROR_ACK 9
+#define V_ERROR_ACK(x) ((x) << S_ERROR_ACK)
+#define F_ERROR_ACK V_ERROR_ACK(1U)
+
+#define S_UNMAPPED_ERR 10
+#define V_UNMAPPED_ERR(x) ((x) << S_UNMAPPED_ERR)
+#define F_UNMAPPED_ERR V_UNMAPPED_ERR(1U)
+
+#define S_TRANSACTION_TIMER 16
+#define M_TRANSACTION_TIMER 0xff
+#define V_TRANSACTION_TIMER(x) ((x) << S_TRANSACTION_TIMER)
+#define G_TRANSACTION_TIMER(x) (((x) >> S_TRANSACTION_TIMER) & M_TRANSACTION_TIMER)
+
+
+/* ULP registers */
+#define A_ULP_ULIMIT 0x980
+#define A_ULP_TAGMASK 0x984
+#define A_ULP_HREG_INDEX 0x988
+#define A_ULP_HREG_DATA 0x98c
+#define A_ULP_INT_ENABLE 0x990
+#define A_ULP_INT_CAUSE 0x994
+#define S_HREG_PAR_ERR 0
+#define V_HREG_PAR_ERR(x) ((x) << S_HREG_PAR_ERR)
+#define F_HREG_PAR_ERR V_HREG_PAR_ERR(1U)
+
+#define S_EGRS_DATA_PAR_ERR 1
+#define V_EGRS_DATA_PAR_ERR(x) ((x) << S_EGRS_DATA_PAR_ERR)
+#define F_EGRS_DATA_PAR_ERR V_EGRS_DATA_PAR_ERR(1U)
+
+#define S_INGRS_DATA_PAR_ERR 2
+#define V_INGRS_DATA_PAR_ERR(x) ((x) << S_INGRS_DATA_PAR_ERR)
+#define F_INGRS_DATA_PAR_ERR V_INGRS_DATA_PAR_ERR(1U)
+
+#define S_PM_INTR 3
+#define V_PM_INTR(x) ((x) << S_PM_INTR)
+#define F_PM_INTR V_PM_INTR(1U)
+
+#define S_PM_E2C_SYNC_ERR 4
+#define V_PM_E2C_SYNC_ERR(x) ((x) << S_PM_E2C_SYNC_ERR)
+#define F_PM_E2C_SYNC_ERR V_PM_E2C_SYNC_ERR(1U)
+
+#define S_PM_C2E_SYNC_ERR 5
+#define V_PM_C2E_SYNC_ERR(x) ((x) << S_PM_C2E_SYNC_ERR)
+#define F_PM_C2E_SYNC_ERR V_PM_C2E_SYNC_ERR(1U)
+
+#define S_PM_E2C_EMPTY_ERR 6
+#define V_PM_E2C_EMPTY_ERR(x) ((x) << S_PM_E2C_EMPTY_ERR)
+#define F_PM_E2C_EMPTY_ERR V_PM_E2C_EMPTY_ERR(1U)
+
+#define S_PM_C2E_EMPTY_ERR 7
+#define V_PM_C2E_EMPTY_ERR(x) ((x) << S_PM_C2E_EMPTY_ERR)
+#define F_PM_C2E_EMPTY_ERR V_PM_C2E_EMPTY_ERR(1U)
+
+#define S_PM_PAR_ERR 8
+#define M_PM_PAR_ERR 0xffff
+#define V_PM_PAR_ERR(x) ((x) << S_PM_PAR_ERR)
+#define G_PM_PAR_ERR(x) (((x) >> S_PM_PAR_ERR) & M_PM_PAR_ERR)
+
+#define S_PM_E2C_WRT_FULL 24
+#define V_PM_E2C_WRT_FULL(x) ((x) << S_PM_E2C_WRT_FULL)
+#define F_PM_E2C_WRT_FULL V_PM_E2C_WRT_FULL(1U)
+
+#define S_PM_C2E_WRT_FULL 25
+#define V_PM_C2E_WRT_FULL(x) ((x) << S_PM_C2E_WRT_FULL)
+#define F_PM_C2E_WRT_FULL V_PM_C2E_WRT_FULL(1U)
+
+#define A_ULP_PIO_CTRL 0x998
+
+/* PL registers */
#define A_PL_ENABLE 0xa00
#define S_PL_INTR_SGE_ERR 0
@@ -407,14 +1752,38 @@
#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
#define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U)
+#define S_PL_INTR_MC3 2
+#define V_PL_INTR_MC3(x) ((x) << S_PL_INTR_MC3)
+#define F_PL_INTR_MC3 V_PL_INTR_MC3(1U)
+
+#define S_PL_INTR_MC4 3
+#define V_PL_INTR_MC4(x) ((x) << S_PL_INTR_MC4)
+#define F_PL_INTR_MC4 V_PL_INTR_MC4(1U)
+
+#define S_PL_INTR_MC5 4
+#define V_PL_INTR_MC5(x) ((x) << S_PL_INTR_MC5)
+#define F_PL_INTR_MC5 V_PL_INTR_MC5(1U)
+
+#define S_PL_INTR_RAT 5
+#define V_PL_INTR_RAT(x) ((x) << S_PL_INTR_RAT)
+#define F_PL_INTR_RAT V_PL_INTR_RAT(1U)
+
#define S_PL_INTR_TP 6
#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
#define F_PL_INTR_TP V_PL_INTR_TP(1U)
+#define S_PL_INTR_ULP 7
+#define V_PL_INTR_ULP(x) ((x) << S_PL_INTR_ULP)
+#define F_PL_INTR_ULP V_PL_INTR_ULP(1U)
+
#define S_PL_INTR_ESPI 8
#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
#define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U)
+#define S_PL_INTR_CSPI 9
+#define V_PL_INTR_CSPI(x) ((x) << S_PL_INTR_CSPI)
+#define F_PL_INTR_CSPI V_PL_INTR_CSPI(1U)
+
#define S_PL_INTR_PCIX 10
#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
#define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U)
@@ -426,43 +1795,374 @@
#define A_PL_CAUSE 0xa04
/* MC5 registers */
-
#define A_MC5_CONFIG 0xc04
+#define S_MODE 0
+#define V_MODE(x) ((x) << S_MODE)
+#define F_MODE V_MODE(1U)
+
#define S_TCAM_RESET 1
#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
#define F_TCAM_RESET V_TCAM_RESET(1U)
+#define S_TCAM_READY 2
+#define V_TCAM_READY(x) ((x) << S_TCAM_READY)
+#define F_TCAM_READY V_TCAM_READY(1U)
+
+#define S_DBGI_ENABLE 4
+#define V_DBGI_ENABLE(x) ((x) << S_DBGI_ENABLE)
+#define F_DBGI_ENABLE V_DBGI_ENABLE(1U)
+
#define S_M_BUS_ENABLE 5
#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
#define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U)
-/* PCICFG registers */
+#define S_PARITY_ENABLE 6
+#define V_PARITY_ENABLE(x) ((x) << S_PARITY_ENABLE)
+#define F_PARITY_ENABLE V_PARITY_ENABLE(1U)
+
+#define S_SYN_ISSUE_MODE 7
+#define M_SYN_ISSUE_MODE 0x3
+#define V_SYN_ISSUE_MODE(x) ((x) << S_SYN_ISSUE_MODE)
+#define G_SYN_ISSUE_MODE(x) (((x) >> S_SYN_ISSUE_MODE) & M_SYN_ISSUE_MODE)
+
+#define S_BUILD 16
+#define V_BUILD(x) ((x) << S_BUILD)
+#define F_BUILD V_BUILD(1U)
+
+#define S_COMPRESSION_ENABLE 17
+#define V_COMPRESSION_ENABLE(x) ((x) << S_COMPRESSION_ENABLE)
+#define F_COMPRESSION_ENABLE V_COMPRESSION_ENABLE(1U)
+
+#define S_NUM_LIP 18
+#define M_NUM_LIP 0x3f
+#define V_NUM_LIP(x) ((x) << S_NUM_LIP)
+#define G_NUM_LIP(x) (((x) >> S_NUM_LIP) & M_NUM_LIP)
+
+#define S_TCAM_PART_CNT 24
+#define M_TCAM_PART_CNT 0x3
+#define V_TCAM_PART_CNT(x) ((x) << S_TCAM_PART_CNT)
+#define G_TCAM_PART_CNT(x) (((x) >> S_TCAM_PART_CNT) & M_TCAM_PART_CNT)
+
+#define S_TCAM_PART_TYPE 26
+#define M_TCAM_PART_TYPE 0x3
+#define V_TCAM_PART_TYPE(x) ((x) << S_TCAM_PART_TYPE)
+#define G_TCAM_PART_TYPE(x) (((x) >> S_TCAM_PART_TYPE) & M_TCAM_PART_TYPE)
+
+#define S_TCAM_PART_SIZE 28
+#define M_TCAM_PART_SIZE 0x3
+#define V_TCAM_PART_SIZE(x) ((x) << S_TCAM_PART_SIZE)
+#define G_TCAM_PART_SIZE(x) (((x) >> S_TCAM_PART_SIZE) & M_TCAM_PART_SIZE)
+
+#define S_TCAM_PART_TYPE_HI 30
+#define V_TCAM_PART_TYPE_HI(x) ((x) << S_TCAM_PART_TYPE_HI)
+#define F_TCAM_PART_TYPE_HI V_TCAM_PART_TYPE_HI(1U)
+
+#define A_MC5_SIZE 0xc08
+
+#define S_SIZE 0
+#define M_SIZE 0x3fffff
+#define V_SIZE(x) ((x) << S_SIZE)
+#define G_SIZE(x) (((x) >> S_SIZE) & M_SIZE)
+
+#define A_MC5_ROUTING_TABLE_INDEX 0xc0c
+#define S_START_OF_ROUTING_TABLE 0
+#define M_START_OF_ROUTING_TABLE 0x3fffff
+#define V_START_OF_ROUTING_TABLE(x) ((x) << S_START_OF_ROUTING_TABLE)
+#define G_START_OF_ROUTING_TABLE(x) (((x) >> S_START_OF_ROUTING_TABLE) & M_START_OF_ROUTING_TABLE)
+
+#define A_MC5_SERVER_INDEX 0xc14
+
+#define S_START_OF_SERVER_INDEX 0
+#define M_START_OF_SERVER_INDEX 0x3fffff
+#define V_START_OF_SERVER_INDEX(x) ((x) << S_START_OF_SERVER_INDEX)
+#define G_START_OF_SERVER_INDEX(x) (((x) >> S_START_OF_SERVER_INDEX) & M_START_OF_SERVER_INDEX)
+
+#define A_MC5_LIP_RAM_ADDR 0xc18
+
+#define S_LOCAL_IP_RAM_ADDR 0
+#define M_LOCAL_IP_RAM_ADDR 0x3f
+#define V_LOCAL_IP_RAM_ADDR(x) ((x) << S_LOCAL_IP_RAM_ADDR)
+#define G_LOCAL_IP_RAM_ADDR(x) (((x) >> S_LOCAL_IP_RAM_ADDR) & M_LOCAL_IP_RAM_ADDR)
+
+#define S_RAM_WRITE_ENABLE 8
+#define V_RAM_WRITE_ENABLE(x) ((x) << S_RAM_WRITE_ENABLE)
+#define F_RAM_WRITE_ENABLE V_RAM_WRITE_ENABLE(1U)
+
+#define A_MC5_LIP_RAM_DATA 0xc1c
+#define A_MC5_RSP_LATENCY 0xc20
+
+#define S_SEARCH_RESPONSE_LATENCY 0
+#define M_SEARCH_RESPONSE_LATENCY 0x1f
+#define V_SEARCH_RESPONSE_LATENCY(x) ((x) << S_SEARCH_RESPONSE_LATENCY)
+#define G_SEARCH_RESPONSE_LATENCY(x) (((x) >> S_SEARCH_RESPONSE_LATENCY) & M_SEARCH_RESPONSE_LATENCY)
+
+#define S_LEARN_RESPONSE_LATENCY 8
+#define M_LEARN_RESPONSE_LATENCY 0x1f
+#define V_LEARN_RESPONSE_LATENCY(x) ((x) << S_LEARN_RESPONSE_LATENCY)
+#define G_LEARN_RESPONSE_LATENCY(x) (((x) >> S_LEARN_RESPONSE_LATENCY) & M_LEARN_RESPONSE_LATENCY)
+
+#define A_MC5_PARITY_LATENCY 0xc24
+
+#define S_SRCHLAT 0
+#define M_SRCHLAT 0x1f
+#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
+#define G_SRCHLAT(x) (((x) >> S_SRCHLAT) & M_SRCHLAT)
+
+#define S_PARLAT 8
+#define M_PARLAT 0x1f
+#define V_PARLAT(x) ((x) << S_PARLAT)
+#define G_PARLAT(x) (((x) >> S_PARLAT) & M_PARLAT)
+
+#define A_MC5_WR_LRN_VERIFY 0xc28
+
+#define S_POVEREN 0
+#define V_POVEREN(x) ((x) << S_POVEREN)
+#define F_POVEREN V_POVEREN(1U)
+
+#define S_LRNVEREN 1
+#define V_LRNVEREN(x) ((x) << S_LRNVEREN)
+#define F_LRNVEREN V_LRNVEREN(1U)
+
+#define S_VWVEREN 2
+#define V_VWVEREN(x) ((x) << S_VWVEREN)
+#define F_VWVEREN V_VWVEREN(1U)
+
+#define A_MC5_PART_ID_INDEX 0xc2c
+
+#define S_IDINDEX 0
+#define M_IDINDEX 0xf
+#define V_IDINDEX(x) ((x) << S_IDINDEX)
+#define G_IDINDEX(x) (((x) >> S_IDINDEX) & M_IDINDEX)
+
+#define A_MC5_RESET_MAX 0xc30
+
+#define S_RSTMAX 0
+#define M_RSTMAX 0x1ff
+#define V_RSTMAX(x) ((x) << S_RSTMAX)
+#define G_RSTMAX(x) (((x) >> S_RSTMAX) & M_RSTMAX)
+
+#define A_MC5_INT_ENABLE 0xc40
+
+#define S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR 0
+#define V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR)
+#define F_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(1U)
+
+#define S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR 1
+#define V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR)
+#define F_MC5_INT_HIT_IN_ACTIVE_REGION_ERR V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(1U)
+
+#define S_MC5_INT_HIT_IN_RT_REGION_ERR 2
+#define V_MC5_INT_HIT_IN_RT_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_RT_REGION_ERR)
+#define F_MC5_INT_HIT_IN_RT_REGION_ERR V_MC5_INT_HIT_IN_RT_REGION_ERR(1U)
+
+#define S_MC5_INT_MISS_ERR 3
+#define V_MC5_INT_MISS_ERR(x) ((x) << S_MC5_INT_MISS_ERR)
+#define F_MC5_INT_MISS_ERR V_MC5_INT_MISS_ERR(1U)
+
+#define S_MC5_INT_LIP0_ERR 4
+#define V_MC5_INT_LIP0_ERR(x) ((x) << S_MC5_INT_LIP0_ERR)
+#define F_MC5_INT_LIP0_ERR V_MC5_INT_LIP0_ERR(1U)
+
+#define S_MC5_INT_LIP_MISS_ERR 5
+#define V_MC5_INT_LIP_MISS_ERR(x) ((x) << S_MC5_INT_LIP_MISS_ERR)
+#define F_MC5_INT_LIP_MISS_ERR V_MC5_INT_LIP_MISS_ERR(1U)
+
+#define S_MC5_INT_PARITY_ERR 6
+#define V_MC5_INT_PARITY_ERR(x) ((x) << S_MC5_INT_PARITY_ERR)
+#define F_MC5_INT_PARITY_ERR V_MC5_INT_PARITY_ERR(1U)
+
+#define S_MC5_INT_ACTIVE_REGION_FULL 7
+#define V_MC5_INT_ACTIVE_REGION_FULL(x) ((x) << S_MC5_INT_ACTIVE_REGION_FULL)
+#define F_MC5_INT_ACTIVE_REGION_FULL V_MC5_INT_ACTIVE_REGION_FULL(1U)
+
+#define S_MC5_INT_NFA_SRCH_ERR 8
+#define V_MC5_INT_NFA_SRCH_ERR(x) ((x) << S_MC5_INT_NFA_SRCH_ERR)
+#define F_MC5_INT_NFA_SRCH_ERR V_MC5_INT_NFA_SRCH_ERR(1U)
+
+#define S_MC5_INT_SYN_COOKIE 9
+#define V_MC5_INT_SYN_COOKIE(x) ((x) << S_MC5_INT_SYN_COOKIE)
+#define F_MC5_INT_SYN_COOKIE V_MC5_INT_SYN_COOKIE(1U)
+
+#define S_MC5_INT_SYN_COOKIE_BAD 10
+#define V_MC5_INT_SYN_COOKIE_BAD(x) ((x) << S_MC5_INT_SYN_COOKIE_BAD)
+#define F_MC5_INT_SYN_COOKIE_BAD V_MC5_INT_SYN_COOKIE_BAD(1U)
+
+#define S_MC5_INT_SYN_COOKIE_OFF 11
+#define V_MC5_INT_SYN_COOKIE_OFF(x) ((x) << S_MC5_INT_SYN_COOKIE_OFF)
+#define F_MC5_INT_SYN_COOKIE_OFF V_MC5_INT_SYN_COOKIE_OFF(1U)
+
+#define S_MC5_INT_UNKNOWN_CMD 15
+#define V_MC5_INT_UNKNOWN_CMD(x) ((x) << S_MC5_INT_UNKNOWN_CMD)
+#define F_MC5_INT_UNKNOWN_CMD V_MC5_INT_UNKNOWN_CMD(1U)
+
+#define S_MC5_INT_REQUESTQ_PARITY_ERR 16
+#define V_MC5_INT_REQUESTQ_PARITY_ERR(x) ((x) << S_MC5_INT_REQUESTQ_PARITY_ERR)
+#define F_MC5_INT_REQUESTQ_PARITY_ERR V_MC5_INT_REQUESTQ_PARITY_ERR(1U)
+
+#define S_MC5_INT_DISPATCHQ_PARITY_ERR 17
+#define V_MC5_INT_DISPATCHQ_PARITY_ERR(x) ((x) << S_MC5_INT_DISPATCHQ_PARITY_ERR)
+#define F_MC5_INT_DISPATCHQ_PARITY_ERR V_MC5_INT_DISPATCHQ_PARITY_ERR(1U)
+
+#define S_MC5_INT_DEL_ACT_EMPTY 18
+#define V_MC5_INT_DEL_ACT_EMPTY(x) ((x) << S_MC5_INT_DEL_ACT_EMPTY)
+#define F_MC5_INT_DEL_ACT_EMPTY V_MC5_INT_DEL_ACT_EMPTY(1U)
+
+#define A_MC5_INT_CAUSE 0xc44
+#define A_MC5_INT_TID 0xc48
+#define A_MC5_INT_PTID 0xc4c
+#define A_MC5_DBGI_CONFIG 0xc74
+#define A_MC5_DBGI_REQ_CMD 0xc78
+
+#define S_CMDMODE 0
+#define M_CMDMODE 0x7
+#define V_CMDMODE(x) ((x) << S_CMDMODE)
+#define G_CMDMODE(x) (((x) >> S_CMDMODE) & M_CMDMODE)
+
+#define S_SADRSEL 4
+#define V_SADRSEL(x) ((x) << S_SADRSEL)
+#define F_SADRSEL V_SADRSEL(1U)
+
+#define S_WRITE_BURST_SIZE 22
+#define M_WRITE_BURST_SIZE 0x3ff
+#define V_WRITE_BURST_SIZE(x) ((x) << S_WRITE_BURST_SIZE)
+#define G_WRITE_BURST_SIZE(x) (((x) >> S_WRITE_BURST_SIZE) & M_WRITE_BURST_SIZE)
+
+#define A_MC5_DBGI_REQ_ADDR0 0xc7c
+#define A_MC5_DBGI_REQ_ADDR1 0xc80
+#define A_MC5_DBGI_REQ_ADDR2 0xc84
+#define A_MC5_DBGI_REQ_DATA0 0xc88
+#define A_MC5_DBGI_REQ_DATA1 0xc8c
+#define A_MC5_DBGI_REQ_DATA2 0xc90
+#define A_MC5_DBGI_REQ_DATA3 0xc94
+#define A_MC5_DBGI_REQ_DATA4 0xc98
+#define A_MC5_DBGI_REQ_MASK0 0xc9c
+#define A_MC5_DBGI_REQ_MASK1 0xca0
+#define A_MC5_DBGI_REQ_MASK2 0xca4
+#define A_MC5_DBGI_REQ_MASK3 0xca8
+#define A_MC5_DBGI_REQ_MASK4 0xcac
+#define A_MC5_DBGI_RSP_STATUS 0xcb0
+
+#define S_DBGI_RSP_VALID 0
+#define V_DBGI_RSP_VALID(x) ((x) << S_DBGI_RSP_VALID)
+#define F_DBGI_RSP_VALID V_DBGI_RSP_VALID(1U)
+
+#define S_DBGI_RSP_HIT 1
+#define V_DBGI_RSP_HIT(x) ((x) << S_DBGI_RSP_HIT)
+#define F_DBGI_RSP_HIT V_DBGI_RSP_HIT(1U)
+
+#define S_DBGI_RSP_ERR 2
+#define V_DBGI_RSP_ERR(x) ((x) << S_DBGI_RSP_ERR)
+#define F_DBGI_RSP_ERR V_DBGI_RSP_ERR(1U)
+
+#define S_DBGI_RSP_ERR_REASON 8
+#define M_DBGI_RSP_ERR_REASON 0x7
+#define V_DBGI_RSP_ERR_REASON(x) ((x) << S_DBGI_RSP_ERR_REASON)
+#define G_DBGI_RSP_ERR_REASON(x) (((x) >> S_DBGI_RSP_ERR_REASON) & M_DBGI_RSP_ERR_REASON)
+
+#define A_MC5_DBGI_RSP_DATA0 0xcb4
+#define A_MC5_DBGI_RSP_DATA1 0xcb8
+#define A_MC5_DBGI_RSP_DATA2 0xcbc
+#define A_MC5_DBGI_RSP_DATA3 0xcc0
+#define A_MC5_DBGI_RSP_DATA4 0xcc4
+#define A_MC5_DBGI_RSP_LAST_CMD 0xcc8
+#define A_MC5_POPEN_DATA_WR_CMD 0xccc
+#define A_MC5_POPEN_MASK_WR_CMD 0xcd0
+#define A_MC5_AOPEN_SRCH_CMD 0xcd4
+#define A_MC5_AOPEN_LRN_CMD 0xcd8
+#define A_MC5_SYN_SRCH_CMD 0xcdc
+#define A_MC5_SYN_LRN_CMD 0xce0
+#define A_MC5_ACK_SRCH_CMD 0xce4
+#define A_MC5_ACK_LRN_CMD 0xce8
+#define A_MC5_ILOOKUP_CMD 0xcec
+#define A_MC5_ELOOKUP_CMD 0xcf0
+#define A_MC5_DATA_WRITE_CMD 0xcf4
+#define A_MC5_DATA_READ_CMD 0xcf8
+#define A_MC5_MASK_WRITE_CMD 0xcfc
+
+/* PCICFG registers */
#define A_PCICFG_PM_CSR 0x44
#define A_PCICFG_VPD_ADDR 0x4a
+#define S_VPD_ADDR 0
+#define M_VPD_ADDR 0x7fff
+#define V_VPD_ADDR(x) ((x) << S_VPD_ADDR)
+#define G_VPD_ADDR(x) (((x) >> S_VPD_ADDR) & M_VPD_ADDR)
+
#define S_VPD_OP_FLAG 15
#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
#define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U)
#define A_PCICFG_VPD_DATA 0x4c
-
+#define A_PCICFG_PCIX_CMD 0x60
#define A_PCICFG_INTR_ENABLE 0xf4
-#define A_PCICFG_INTR_CAUSE 0xf8
+#define S_MASTER_PARITY_ERR 0
+#define V_MASTER_PARITY_ERR(x) ((x) << S_MASTER_PARITY_ERR)
+#define F_MASTER_PARITY_ERR V_MASTER_PARITY_ERR(1U)
+
+#define S_SIG_TARGET_ABORT 1
+#define V_SIG_TARGET_ABORT(x) ((x) << S_SIG_TARGET_ABORT)
+#define F_SIG_TARGET_ABORT V_SIG_TARGET_ABORT(1U)
+
+#define S_RCV_TARGET_ABORT 2
+#define V_RCV_TARGET_ABORT(x) ((x) << S_RCV_TARGET_ABORT)
+#define F_RCV_TARGET_ABORT V_RCV_TARGET_ABORT(1U)
+
+#define S_RCV_MASTER_ABORT 3
+#define V_RCV_MASTER_ABORT(x) ((x) << S_RCV_MASTER_ABORT)
+#define F_RCV_MASTER_ABORT V_RCV_MASTER_ABORT(1U)
+
+#define S_SIG_SYS_ERR 4
+#define V_SIG_SYS_ERR(x) ((x) << S_SIG_SYS_ERR)
+#define F_SIG_SYS_ERR V_SIG_SYS_ERR(1U)
+
+#define S_DET_PARITY_ERR 5
+#define V_DET_PARITY_ERR(x) ((x) << S_DET_PARITY_ERR)
+#define F_DET_PARITY_ERR V_DET_PARITY_ERR(1U)
+
+#define S_PIO_PARITY_ERR 6
+#define V_PIO_PARITY_ERR(x) ((x) << S_PIO_PARITY_ERR)
+#define F_PIO_PARITY_ERR V_PIO_PARITY_ERR(1U)
+
+#define S_WF_PARITY_ERR 7
+#define V_WF_PARITY_ERR(x) ((x) << S_WF_PARITY_ERR)
+#define F_WF_PARITY_ERR V_WF_PARITY_ERR(1U)
+
+#define S_RF_PARITY_ERR 8
+#define M_RF_PARITY_ERR 0x3
+#define V_RF_PARITY_ERR(x) ((x) << S_RF_PARITY_ERR)
+#define G_RF_PARITY_ERR(x) (((x) >> S_RF_PARITY_ERR) & M_RF_PARITY_ERR)
+
+#define S_CF_PARITY_ERR 10
+#define M_CF_PARITY_ERR 0x3
+#define V_CF_PARITY_ERR(x) ((x) << S_CF_PARITY_ERR)
+#define G_CF_PARITY_ERR(x) (((x) >> S_CF_PARITY_ERR) & M_CF_PARITY_ERR)
+
+#define A_PCICFG_INTR_CAUSE 0xf8
#define A_PCICFG_MODE 0xfc
#define S_PCI_MODE_64BIT 0
#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
#define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U)
+#define S_PCI_MODE_66MHZ 1
+#define V_PCI_MODE_66MHZ(x) ((x) << S_PCI_MODE_66MHZ)
+#define F_PCI_MODE_66MHZ V_PCI_MODE_66MHZ(1U)
+
+#define S_PCI_MODE_PCIX_INITPAT 2
+#define M_PCI_MODE_PCIX_INITPAT 0x7
+#define V_PCI_MODE_PCIX_INITPAT(x) ((x) << S_PCI_MODE_PCIX_INITPAT)
+#define G_PCI_MODE_PCIX_INITPAT(x) (((x) >> S_PCI_MODE_PCIX_INITPAT) & M_PCI_MODE_PCIX_INITPAT)
+
#define S_PCI_MODE_PCIX 5
#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
#define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U)
#define S_PCI_MODE_CLK 6
#define M_PCI_MODE_CLK 0x3
+#define V_PCI_MODE_CLK(x) ((x) << S_PCI_MODE_CLK)
#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
#endif /* _CXGB_REGS_H_ */
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 9799c12380fc..0ca8d876e16f 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -42,12 +42,14 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
+#include <linux/ktime.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/mm.h>
+#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/if_arp.h>
@@ -57,10 +59,8 @@
#include "regs.h"
#include "espi.h"
-
-#ifdef NETIF_F_TSO
-#include <linux/tcp.h>
-#endif
+/* This belongs in if_ether.h */
+#define ETH_P_CPL5 0xf
#define SGE_CMDQ_N 2
#define SGE_FREELQ_N 2
@@ -73,6 +73,7 @@
#define SGE_INTRTIMER_NRES 1000
#define SGE_RX_COPY_THRES 256
#define SGE_RX_SM_BUF_SIZE 1536
+#define SGE_TX_DESC_MAX_PLEN 16384
# define SGE_RX_DROP_THRES 2
@@ -184,17 +185,17 @@ struct cmdQ {
unsigned long status; /* HW DMA fetch status */
unsigned int in_use; /* # of in-use command descriptors */
unsigned int size; /* # of descriptors */
- unsigned int processed; /* total # of descs HW has processed */
- unsigned int cleaned; /* total # of descs SW has reclaimed */
- unsigned int stop_thres; /* SW TX queue suspend threshold */
+ unsigned int processed; /* total # of descs HW has processed */
+ unsigned int cleaned; /* total # of descs SW has reclaimed */
+ unsigned int stop_thres; /* SW TX queue suspend threshold */
u16 pidx; /* producer index (SW) */
u16 cidx; /* consumer index (HW) */
u8 genbit; /* current generation (=valid) bit */
- u8 sop; /* is next entry start of packet? */
+ u8 sop; /* is next entry start of packet? */
struct cmdQ_e *entries; /* HW command descriptor Q */
struct cmdQ_ce *centries; /* SW command context descriptor Q */
- spinlock_t lock; /* Lock to protect cmdQ enqueuing */
dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
+ spinlock_t lock; /* Lock to protect cmdQ enqueuing */
};
struct freelQ {
@@ -203,8 +204,8 @@ struct freelQ {
u16 pidx; /* producer index (SW) */
u16 cidx; /* consumer index (HW) */
u16 rx_buffer_size; /* Buffer size on this free list */
- u16 dma_offset; /* DMA offset to align IP headers */
- u16 recycleq_idx; /* skb recycle q to use */
+ u16 dma_offset; /* DMA offset to align IP headers */
+ u16 recycleq_idx; /* skb recycle q to use */
u8 genbit; /* current generation (=valid) bit */
struct freelQ_e *entries; /* HW freelist descriptor Q */
struct freelQ_ce *centries; /* SW freelist context descriptor Q */
@@ -226,6 +227,29 @@ enum {
CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
};
+/* T204 TX SW scheduler */
+
+/* Per T204 TX port */
+struct sched_port {
+ unsigned int avail; /* available bits - quota */
+ unsigned int drain_bits_per_1024ns; /* drain rate */
+ unsigned int speed; /* drain rate, mbps */
+ unsigned int mtu; /* mtu size */
+ struct sk_buff_head skbq; /* pending skbs */
+};
+
+/* Per T204 device */
+struct sched {
+ ktime_t last_updated; /* last time quotas were computed */
+ unsigned int max_avail; /* max bits to be sent to any port */
+ unsigned int port; /* port index (round robin ports) */
+ unsigned int num; /* num skbs in per port queues */
+ struct sched_port p[MAX_NPORTS];
+ struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
+};
+static void restart_sched(unsigned long);
+
+
/*
* Main SGE data structure
*
@@ -243,18 +267,240 @@ struct sge {
unsigned int rx_pkt_pad; /* RX padding for L2 packets */
unsigned int jumbo_fl; /* jumbo freelist Q index */
unsigned int intrtimer_nres; /* no-resource interrupt timer */
- unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
+ unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
struct timer_list espibug_timer;
- unsigned int espibug_timeout;
- struct sk_buff *espibug_skb;
+ unsigned long espibug_timeout;
+ struct sk_buff *espibug_skb[MAX_NPORTS];
u32 sge_control; /* shadow value of sge control reg */
struct sge_intr_counts stats;
- struct sge_port_stats port_stats[MAX_NPORTS];
+ struct sge_port_stats *port_stats[MAX_NPORTS];
+ struct sched *tx_sched;
struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
};
/*
+ * stop tasklet and free all pending skb's
+ */
+static void tx_sched_stop(struct sge *sge)
+{
+ struct sched *s = sge->tx_sched;
+ int i;
+
+ tasklet_kill(&s->sched_tsk);
+
+ for (i = 0; i < MAX_NPORTS; i++)
+ __skb_queue_purge(&s->p[s->port].skbq);
+}
+
+/*
+ * t1_sched_update_parms() is called when the MTU or link speed changes. It
+ * re-computes scheduler parameters to scope with the change.
+ */
+unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
+ unsigned int mtu, unsigned int speed)
+{
+ struct sched *s = sge->tx_sched;
+ struct sched_port *p = &s->p[port];
+ unsigned int max_avail_segs;
+
+ pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
+ if (speed)
+ p->speed = speed;
+ if (mtu)
+ p->mtu = mtu;
+
+ if (speed || mtu) {
+ unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
+ do_div(drain, (p->mtu + 50) * 1000);
+ p->drain_bits_per_1024ns = (unsigned int) drain;
+
+ if (p->speed < 1000)
+ p->drain_bits_per_1024ns =
+ 90 * p->drain_bits_per_1024ns / 100;
+ }
+
+ if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
+ p->drain_bits_per_1024ns -= 16;
+ s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
+ max_avail_segs = max(1U, 4096 / (p->mtu - 40));
+ } else {
+ s->max_avail = 16384;
+ max_avail_segs = max(1U, 9000 / (p->mtu - 40));
+ }
+
+ pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
+ "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
+ p->speed, s->max_avail, max_avail_segs,
+ p->drain_bits_per_1024ns);
+
+ return max_avail_segs * (p->mtu - 40);
+}
+
+/*
+ * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
+ * data that can be pushed per port.
+ */
+void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
+{
+ struct sched *s = sge->tx_sched;
+ unsigned int i;
+
+ s->max_avail = val;
+ for (i = 0; i < MAX_NPORTS; i++)
+ t1_sched_update_parms(sge, i, 0, 0);
+}
+
+/*
+ * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
+ * is draining.
+ */
+void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
+ unsigned int val)
+{
+ struct sched *s = sge->tx_sched;
+ struct sched_port *p = &s->p[port];
+ p->drain_bits_per_1024ns = val * 1024 / 1000;
+ t1_sched_update_parms(sge, port, 0, 0);
+}
+
+
+/*
+ * get_clock() implements a ns clock (see ktime_get)
+ */
+static inline ktime_t get_clock(void)
+{
+ struct timespec ts;
+
+ ktime_get_ts(&ts);
+ return timespec_to_ktime(ts);
+}
+
+/*
+ * tx_sched_init() allocates resources and does basic initialization.
+ */
+static int tx_sched_init(struct sge *sge)
+{
+ struct sched *s;
+ int i;
+
+ s = kzalloc(sizeof (struct sched), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ pr_debug("tx_sched_init\n");
+ tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
+ sge->tx_sched = s;
+
+ for (i = 0; i < MAX_NPORTS; i++) {
+ skb_queue_head_init(&s->p[i].skbq);
+ t1_sched_update_parms(sge, i, 1500, 1000);
+ }
+
+ return 0;
+}
+
+/*
+ * sched_update_avail() computes the delta since the last time it was called
+ * and updates the per port quota (number of bits that can be sent to the any
+ * port).
+ */
+static inline int sched_update_avail(struct sge *sge)
+{
+ struct sched *s = sge->tx_sched;
+ ktime_t now = get_clock();
+ unsigned int i;
+ long long delta_time_ns;
+
+ delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
+
+ pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
+ if (delta_time_ns < 15000)
+ return 0;
+
+ for (i = 0; i < MAX_NPORTS; i++) {
+ struct sched_port *p = &s->p[i];
+ unsigned int delta_avail;
+
+ delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
+ p->avail = min(p->avail + delta_avail, s->max_avail);
+ }
+
+ s->last_updated = now;
+
+ return 1;
+}
+
+/*
+ * sched_skb() is called from two different places. In the tx path, any
+ * packet generating load on an output port will call sched_skb()
+ * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
+ * context (skb == NULL).
+ * The scheduler only returns a skb (which will then be sent) if the
+ * length of the skb is <= the current quota of the output port.
+ */
+static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
+ unsigned int credits)
+{
+ struct sched *s = sge->tx_sched;
+ struct sk_buff_head *skbq;
+ unsigned int i, len, update = 1;
+
+ pr_debug("sched_skb %p\n", skb);
+ if (!skb) {
+ if (!s->num)
+ return NULL;
+ } else {
+ skbq = &s->p[skb->dev->if_port].skbq;
+ __skb_queue_tail(skbq, skb);
+ s->num++;
+ skb = NULL;
+ }
+
+ if (credits < MAX_SKB_FRAGS + 1)
+ goto out;
+
+ again:
+ for (i = 0; i < MAX_NPORTS; i++) {
+ s->port = ++s->port & (MAX_NPORTS - 1);
+ skbq = &s->p[s->port].skbq;
+
+ skb = skb_peek(skbq);
+
+ if (!skb)
+ continue;
+
+ len = skb->len;
+ if (len <= s->p[s->port].avail) {
+ s->p[s->port].avail -= len;
+ s->num--;
+ __skb_unlink(skb, skbq);
+ goto out;
+ }
+ skb = NULL;
+ }
+
+ if (update-- && sched_update_avail(sge))
+ goto again;
+
+ out:
+ /* If there are more pending skbs, we use the hardware to schedule us
+ * again.
+ */
+ if (s->num && !skb) {
+ struct cmdQ *q = &sge->cmdQ[0];
+ clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+ set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
+ }
+ }
+ pr_debug("sched_skb ret %p\n", skb);
+
+ return skb;
+}
+
+/*
* PIO to indicate that memory mapped Q contains valid descriptor(s).
*/
static inline void doorbell_pio(struct adapter *adapter, u32 val)
@@ -335,10 +581,9 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
goto err_no_mem;
memset(q->entries, 0, size);
size = sizeof(struct freelQ_ce) * q->size;
- q->centries = kmalloc(size, GFP_KERNEL);
+ q->centries = kzalloc(size, GFP_KERNEL);
if (!q->centries)
goto err_no_mem;
- memset(q->centries, 0, size);
}
/*
@@ -351,8 +596,11 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
sizeof(struct cpl_rx_data) +
sge->freelQ[!sge->jumbo_fl].dma_offset;
- sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ size = (16 * 1024) -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
/*
* Setup which skb recycle Q should be used when recycling buffers from
@@ -389,17 +637,23 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
q->in_use -= n;
ce = &q->centries[cidx];
while (n--) {
- if (q->sop)
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
- PCI_DMA_TODEVICE);
- else
- pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
- PCI_DMA_TODEVICE);
- q->sop = 0;
+ if (q->sop) {
+ if (likely(pci_unmap_len(ce, dma_len))) {
+ pci_unmap_single(pdev,
+ pci_unmap_addr(ce, dma_addr),
+ pci_unmap_len(ce, dma_len),
+ PCI_DMA_TODEVICE);
+ q->sop = 0;
+ }
+ } else {
+ if (likely(pci_unmap_len(ce, dma_len))) {
+ pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
+ pci_unmap_len(ce, dma_len),
+ PCI_DMA_TODEVICE);
+ }
+ }
if (ce->skb) {
- dev_kfree_skb(ce->skb);
+ dev_kfree_skb_any(ce->skb);
q->sop = 1;
}
ce++;
@@ -463,10 +717,9 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
goto err_no_mem;
memset(q->entries, 0, size);
size = sizeof(struct cmdQ_ce) * q->size;
- q->centries = kmalloc(size, GFP_KERNEL);
+ q->centries = kzalloc(size, GFP_KERNEL);
if (!q->centries)
goto err_no_mem;
- memset(q->centries, 0, size);
}
/*
@@ -506,7 +759,7 @@ void t1_set_vlan_accel(struct adapter *adapter, int on_off)
sge->sge_control |= F_VLAN_XTRACT;
if (adapter->open_device_map) {
writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
- readl(adapter->regs + A_SG_CONTROL); /* flush */
+ readl(adapter->regs + A_SG_CONTROL); /* flush */
}
}
@@ -540,7 +793,6 @@ static void configure_sge(struct sge *sge, struct sge_params *p)
sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
- F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
V_RX_PKT_OFFSET(sge->rx_pkt_pad);
#if defined(__BIG_ENDIAN_BITFIELD)
@@ -568,9 +820,12 @@ static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
*/
void t1_sge_destroy(struct sge *sge)
{
- if (sge->espibug_skb)
- kfree_skb(sge->espibug_skb);
+ int i;
+ for_each_port(sge->adapter, i)
+ free_percpu(sge->port_stats[i]);
+
+ kfree(sge->tx_sched);
free_tx_resources(sge);
free_rx_resources(sge);
kfree(sge);
@@ -735,14 +990,28 @@ int t1_sge_intr_error_handler(struct sge *sge)
return 0;
}
-const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
+const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
{
return &sge->stats;
}
-const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
+void t1_sge_get_port_stats(const struct sge *sge, int port,
+ struct sge_port_stats *ss)
{
- return &sge->port_stats[port];
+ int cpu;
+
+ memset(ss, 0, sizeof(*ss));
+ for_each_possible_cpu(cpu) {
+ struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
+
+ ss->rx_packets += st->rx_packets;
+ ss->rx_cso_good += st->rx_cso_good;
+ ss->tx_packets += st->tx_packets;
+ ss->tx_cso += st->tx_cso;
+ ss->tx_tso += st->tx_tso;
+ ss->vlan_xtract += st->vlan_xtract;
+ ss->vlan_insert += st->vlan_insert;
+ }
}
/**
@@ -856,6 +1125,99 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
}
/*
+ * T1/T2 SGE limits the maximum DMA size per TX descriptor to
+ * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
+ * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
+ * Note that the *_large_page_tx_descs stuff will be optimized out when
+ * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
+ *
+ * compute_large_page_descs() computes how many additional descriptors are
+ * required to break down the stack's request.
+ */
+static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
+{
+ unsigned int count = 0;
+ if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
+ unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int i, len = skb->len - skb->data_len;
+ while (len > SGE_TX_DESC_MAX_PLEN) {
+ count++;
+ len -= SGE_TX_DESC_MAX_PLEN;
+ }
+ for (i = 0; nfrags--; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ len = frag->size;
+ while (len > SGE_TX_DESC_MAX_PLEN) {
+ count++;
+ len -= SGE_TX_DESC_MAX_PLEN;
+ }
+ }
+ }
+ return count;
+}
+
+/*
+ * Write a cmdQ entry.
+ *
+ * Since this function writes the 'flags' field, it must not be used to
+ * write the first cmdQ entry.
+ */
+static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
+ unsigned int len, unsigned int gen,
+ unsigned int eop)
+{
+ if (unlikely(len > SGE_TX_DESC_MAX_PLEN))
+ BUG();
+ e->addr_lo = (u32)mapping;
+ e->addr_hi = (u64)mapping >> 32;
+ e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
+ e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
+}
+
+/*
+ * See comment for previous function.
+ *
+ * write_tx_descs_large_page() writes additional SGE tx descriptors if
+ * *desc_len exceeds HW's capability.
+ */
+static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
+ struct cmdQ_e **e,
+ struct cmdQ_ce **ce,
+ unsigned int *gen,
+ dma_addr_t *desc_mapping,
+ unsigned int *desc_len,
+ unsigned int nfrags,
+ struct cmdQ *q)
+{
+ if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
+ struct cmdQ_e *e1 = *e;
+ struct cmdQ_ce *ce1 = *ce;
+
+ while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
+ *desc_len -= SGE_TX_DESC_MAX_PLEN;
+ write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
+ *gen, nfrags == 0 && *desc_len == 0);
+ ce1->skb = NULL;
+ pci_unmap_len_set(ce1, dma_len, 0);
+ *desc_mapping += SGE_TX_DESC_MAX_PLEN;
+ if (*desc_len) {
+ ce1++;
+ e1++;
+ if (++pidx == q->size) {
+ pidx = 0;
+ *gen ^= 1;
+ ce1 = q->centries;
+ e1 = q->entries;
+ }
+ }
+ }
+ *e = e1;
+ *ce = ce1;
+ }
+ return pidx;
+}
+
+/*
* Write the command descriptors to transmit the given skb starting at
* descriptor pidx with the given generation.
*/
@@ -863,50 +1225,84 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
unsigned int pidx, unsigned int gen,
struct cmdQ *q)
{
- dma_addr_t mapping;
+ dma_addr_t mapping, desc_mapping;
struct cmdQ_e *e, *e1;
struct cmdQ_ce *ce;
- unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int i, flags, first_desc_len, desc_len,
+ nfrags = skb_shinfo(skb)->nr_frags;
- mapping = pci_map_single(adapter->pdev, skb->data,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ e = e1 = &q->entries[pidx];
ce = &q->centries[pidx];
+
+ mapping = pci_map_single(adapter->pdev, skb->data,
+ skb->len - skb->data_len, PCI_DMA_TODEVICE);
+
+ desc_mapping = mapping;
+ desc_len = skb->len - skb->data_len;
+
+ flags = F_CMD_DATAVALID | F_CMD_SOP |
+ V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
+ V_CMD_GEN2(gen);
+ first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
+ desc_len : SGE_TX_DESC_MAX_PLEN;
+ e->addr_lo = (u32)desc_mapping;
+ e->addr_hi = (u64)desc_mapping >> 32;
+ e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
+ ce->skb = NULL;
+ pci_unmap_len_set(ce, dma_len, 0);
+
+ if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
+ desc_len > SGE_TX_DESC_MAX_PLEN) {
+ desc_mapping += first_desc_len;
+ desc_len -= first_desc_len;
+ e1++;
+ ce++;
+ if (++pidx == q->size) {
+ pidx = 0;
+ gen ^= 1;
+ e1 = q->entries;
+ ce = q->centries;
+ }
+ pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
+ &desc_mapping, &desc_len,
+ nfrags, q);
+
+ if (likely(desc_len))
+ write_tx_desc(e1, desc_mapping, desc_len, gen,
+ nfrags == 0);
+ }
+
ce->skb = NULL;
pci_unmap_addr_set(ce, dma_addr, mapping);
pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
- flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
- V_CMD_GEN2(gen);
- e = &q->entries[pidx];
- e->addr_lo = (u32)mapping;
- e->addr_hi = (u64)mapping >> 32;
- e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
- for (e1 = e, i = 0; nfrags--; i++) {
+ for (i = 0; nfrags--; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- ce++;
e1++;
+ ce++;
if (++pidx == q->size) {
pidx = 0;
gen ^= 1;
- ce = q->centries;
e1 = q->entries;
+ ce = q->centries;
}
mapping = pci_map_page(adapter->pdev, frag->page,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
+ desc_mapping = mapping;
+ desc_len = frag->size;
+
+ pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
+ &desc_mapping, &desc_len,
+ nfrags, q);
+ if (likely(desc_len))
+ write_tx_desc(e1, desc_mapping, desc_len, gen,
+ nfrags == 0);
ce->skb = NULL;
pci_unmap_addr_set(ce, dma_addr, mapping);
pci_unmap_len_set(ce, dma_len, frag->size);
-
- e1->addr_lo = (u32)mapping;
- e1->addr_hi = (u64)mapping >> 32;
- e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
- e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
- V_CMD_GEN2(gen);
}
-
ce->skb = skb;
wmb();
e->flags = flags;
@@ -920,26 +1316,56 @@ static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
unsigned int reclaim = q->processed - q->cleaned;
if (reclaim) {
+ pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
+ q->processed, q->cleaned);
free_cmdQ_buffers(sge, q, reclaim);
q->cleaned += reclaim;
}
}
-#ifndef SET_ETHTOOL_OPS
-# define __netif_rx_complete(dev) netif_rx_complete(dev)
-#endif
-
/*
- * We cannot use the standard netif_rx_schedule_prep() because we have multiple
- * ports plus the TOE all multiplexing onto a single response queue, therefore
- * accepting new responses cannot depend on the state of any particular port.
- * So define our own equivalent that omits the netif_running() test.
+ * Called from tasklet. Checks the scheduler for any
+ * pending skbs that can be sent.
*/
-static inline int napi_schedule_prep(struct net_device *dev)
+static void restart_sched(unsigned long arg)
{
- return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
-}
+ struct sge *sge = (struct sge *) arg;
+ struct adapter *adapter = sge->adapter;
+ struct cmdQ *q = &sge->cmdQ[0];
+ struct sk_buff *skb;
+ unsigned int credits, queued_skb = 0;
+ spin_lock(&q->lock);
+ reclaim_completed_tx(sge, q);
+
+ credits = q->size - q->in_use;
+ pr_debug("restart_sched credits=%d\n", credits);
+ while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
+ unsigned int genbit, pidx, count;
+ count = 1 + skb_shinfo(skb)->nr_frags;
+ count += compute_large_page_tx_descs(skb);
+ q->in_use += count;
+ genbit = q->genbit;
+ pidx = q->pidx;
+ q->pidx += count;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->genbit ^= 1;
+ }
+ write_tx_descs(adapter, skb, pidx, genbit, q);
+ credits = q->size - q->in_use;
+ queued_skb = 1;
+ }
+
+ if (queued_skb) {
+ clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+ set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+ }
+ }
+ spin_unlock(&q->lock);
+}
/**
* sge_rx - process an ingress ethernet packet
@@ -954,31 +1380,39 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
struct sk_buff *skb;
struct cpl_rx_pkt *p;
struct adapter *adapter = sge->adapter;
+ struct sge_port_stats *st;
- sge->stats.ethernet_pkts++;
skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
SGE_RX_DROP_THRES);
- if (!skb) {
- sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
+ if (unlikely(!skb)) {
+ sge->stats.rx_drops++;
return 0;
}
p = (struct cpl_rx_pkt *)skb->data;
skb_pull(skb, sizeof(*p));
+ if (p->iff >= adapter->params.nports) {
+ kfree_skb(skb);
+ return 0;
+ }
+
skb->dev = adapter->port[p->iff].dev;
skb->dev->last_rx = jiffies;
+ st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
+ st->rx_packets++;
+
skb->protocol = eth_type_trans(skb, skb->dev);
if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
skb->protocol == htons(ETH_P_IP) &&
(skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
- sge->port_stats[p->iff].rx_cso_good++;
+ ++st->rx_cso_good;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
skb->ip_summed = CHECKSUM_NONE;
if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
- sge->port_stats[p->iff].vlan_xtract++;
+ st->vlan_xtract++;
if (adapter->params.sge.polling)
vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
ntohs(p->vlan));
@@ -1039,18 +1473,24 @@ static unsigned int update_tx_info(struct adapter *adapter,
struct cmdQ *cmdq = &sge->cmdQ[0];
cmdq->processed += pr0;
-
+ if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
+ freelQs_empty(sge);
+ flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
+ }
if (flags & F_CMDQ0_ENABLE) {
clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
-
+
if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
!test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
}
- flags &= ~F_CMDQ0_ENABLE;
+ if (sge->tx_sched)
+ tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
+
+ flags &= ~F_CMDQ0_ENABLE;
}
-
+
if (unlikely(sge->stopped_tx_queues != 0))
restart_tx_queues(sge);
@@ -1241,20 +1681,21 @@ static irqreturn_t t1_interrupt_napi(int irq, void *data)
if (e->GenerationBit == q->genbit) {
if (e->DataValid ||
process_pure_responses(adapter, e)) {
- if (likely(napi_schedule_prep(sge->netdev)))
+ if (likely(__netif_rx_schedule_prep(sge->netdev)))
__netif_rx_schedule(sge->netdev);
- else
- printk(KERN_CRIT
+ else if (net_ratelimit())
+ printk(KERN_INFO
"NAPI schedule failure!\n");
} else
- writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+ writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+
handled = 1;
goto unlock;
} else
- writel(q->cidx, adapter->regs + A_SG_SLEEPING);
- } else
- if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
- printk(KERN_ERR "data interrupt while NAPI running\n");
+ writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+ } else if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) {
+ printk(KERN_ERR "data interrupt while NAPI running\n");
+ }
handled = t1_slow_intr_handler(adapter);
if (!handled)
@@ -1335,34 +1776,59 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
{
struct sge *sge = adapter->sge;
struct cmdQ *q = &sge->cmdQ[qid];
- unsigned int credits, pidx, genbit, count;
+ unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
+
+ if (!spin_trylock(&q->lock))
+ return NETDEV_TX_LOCKED;
- spin_lock(&q->lock);
reclaim_completed_tx(sge, q);
pidx = q->pidx;
credits = q->size - q->in_use;
count = 1 + skb_shinfo(skb)->nr_frags;
+ count += compute_large_page_tx_descs(skb);
- { /* Ethernet packet */
- if (unlikely(credits < count)) {
+ /* Ethernet packet */
+ if (unlikely(credits < count)) {
+ if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
set_bit(dev->if_port, &sge->stopped_tx_queues);
sge->stats.cmdQ_full[2]++;
- spin_unlock(&q->lock);
- if (!netif_queue_stopped(dev))
- CH_ERR("%s: Tx ring full while queue awake!\n",
- adapter->name);
- return NETDEV_TX_BUSY;
+ CH_ERR("%s: Tx ring full while queue awake!\n",
+ adapter->name);
}
- if (unlikely(credits - count < q->stop_thres)) {
- sge->stats.cmdQ_full[2]++;
- netif_stop_queue(dev);
- set_bit(dev->if_port, &sge->stopped_tx_queues);
+ spin_unlock(&q->lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(credits - count < q->stop_thres)) {
+ netif_stop_queue(dev);
+ set_bit(dev->if_port, &sge->stopped_tx_queues);
+ sge->stats.cmdQ_full[2]++;
+ }
+
+ /* T204 cmdQ0 skbs that are destined for a certain port have to go
+ * through the scheduler.
+ */
+ if (sge->tx_sched && !qid && skb->dev) {
+ use_sched:
+ use_sched_skb = 1;
+ /* Note that the scheduler might return a different skb than
+ * the one passed in.
+ */
+ skb = sched_skb(sge, skb, credits);
+ if (!skb) {
+ spin_unlock(&q->lock);
+ return NETDEV_TX_OK;
}
+ pidx = q->pidx;
+ count = 1 + skb_shinfo(skb)->nr_frags;
+ count += compute_large_page_tx_descs(skb);
}
+
q->in_use += count;
genbit = q->genbit;
+ pidx = q->pidx;
q->pidx += count;
if (q->pidx >= q->size) {
q->pidx -= q->size;
@@ -1388,6 +1854,14 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
}
}
+
+ if (use_sched_skb) {
+ if (spin_trylock(&q->lock)) {
+ credits = q->size - q->in_use;
+ skb = NULL;
+ goto use_sched;
+ }
+ }
return NETDEV_TX_OK;
}
@@ -1412,16 +1886,20 @@ static inline int eth_hdr_len(const void *data)
int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct adapter *adapter = dev->priv;
- struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
struct sge *sge = adapter->sge;
+ struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id());
struct cpl_tx_pkt *cpl;
+ struct sk_buff *orig_skb = skb;
+ int ret;
+
+ if (skb->protocol == htons(ETH_P_CPL5))
+ goto send;
-#ifdef NETIF_F_TSO
- if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_size) {
int eth_type;
struct cpl_tx_pkt_lso *hdr;
- st->tso++;
+ ++st->tx_tso;
eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
CPL_ETH_II : CPL_ETH_II_VLAN;
@@ -1432,13 +1910,10 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr->ip_hdr_words = skb->nh.iph->ihl;
hdr->tcp_hdr_words = skb->h.th->doff;
hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
- skb_shinfo(skb)->gso_size));
+ skb_shinfo(skb)->gso_size));
hdr->len = htonl(skb->len - sizeof(*hdr));
cpl = (struct cpl_tx_pkt *)hdr;
- sge->stats.tx_lso_pkts++;
- } else
-#endif
- {
+ } else {
/*
* Packets shorter than ETH_HLEN can break the MAC, drop them
* early. Also, we may get oversized packets because some
@@ -1447,6 +1922,8 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (unlikely(skb->len < ETH_HLEN ||
skb->len > dev->mtu + eth_hdr_len(skb->data))) {
+ pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
+ skb->len, eth_hdr_len(skb->data), dev->mtu);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1456,9 +1933,9 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
* components, such as pktgen, do not handle it right.
* Complain when this happens but try to fix things up.
*/
- if (unlikely(skb_headroom(skb) <
- dev->hard_header_len - ETH_HLEN)) {
- struct sk_buff *orig_skb = skb;
+ if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
+ pr_debug("%s: headroom %d header_len %d\n", dev->name,
+ skb_headroom(skb), dev->hard_header_len);
if (net_ratelimit())
printk(KERN_ERR "%s: inadequate headroom in "
@@ -1471,19 +1948,21 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
skb->ip_summed == CHECKSUM_PARTIAL &&
- skb->nh.iph->protocol == IPPROTO_UDP)
+ skb->nh.iph->protocol == IPPROTO_UDP) {
if (unlikely(skb_checksum_help(skb))) {
+ pr_debug("%s: unable to do udp checksum\n", dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+ }
/* Hmmm, assuming to catch the gratious arp... and we'll use
* it to flush out stuck espi packets...
- */
- if (unlikely(!adapter->sge->espibug_skb)) {
+ */
+ if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
if (skb->protocol == htons(ETH_P_ARP) &&
skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
- adapter->sge->espibug_skb = skb;
+ adapter->sge->espibug_skb[dev->if_port] = skb;
/* We want to re-use this skb later. We
* simply bump the reference count and it
* will not be freed...
@@ -1499,8 +1978,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* the length field isn't used so don't bother setting it */
st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
- sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_PARTIAL);
- sge->stats.tx_reg_pkts++;
}
cpl->iff = dev->if_port;
@@ -1513,8 +1990,19 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
cpl->vlan_valid = 0;
+send:
+ st->tx_packets++;
dev->trans_start = jiffies;
- return t1_sge_tx(skb, adapter, 0, dev);
+ ret = t1_sge_tx(skb, adapter, 0, dev);
+
+ /* If transmit busy, and we reallocated skb's due to headroom limit,
+ * then silently discard to avoid leak.
+ */
+ if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
+ dev_kfree_skb_any(skb);
+ ret = NETDEV_TX_OK;
+ }
+ return ret;
}
/*
@@ -1532,10 +2020,9 @@ static void sge_tx_reclaim_cb(unsigned long data)
continue;
reclaim_completed_tx(sge, q);
- if (i == 0 && q->in_use) /* flush pending credits */
- writel(F_CMDQ0_ENABLE,
- sge->adapter->regs + A_SG_DOORBELL);
-
+ if (i == 0 && q->in_use) { /* flush pending credits */
+ writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
+ }
spin_unlock(&q->lock);
}
mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
@@ -1582,11 +2069,20 @@ int t1_sge_configure(struct sge *sge, struct sge_params *p)
*/
void t1_sge_stop(struct sge *sge)
{
+ int i;
writel(0, sge->adapter->regs + A_SG_CONTROL);
- (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+ readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
if (is_T2(sge->adapter))
del_timer_sync(&sge->espibug_timer);
+
del_timer_sync(&sge->tx_reclaim_timer);
+ if (sge->tx_sched)
+ tx_sched_stop(sge);
+
+ for (i = 0; i < MAX_NPORTS; i++)
+ if (sge->espibug_skb[i])
+ kfree_skb(sge->espibug_skb[i]);
}
/*
@@ -1599,74 +2095,128 @@ void t1_sge_start(struct sge *sge)
writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
- (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+ readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
- if (is_T2(sge->adapter))
+ if (is_T2(sge->adapter))
mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
}
/*
* Callback for the T2 ESPI 'stuck packet feature' workaorund
*/
-static void espibug_workaround(void *data)
+static void espibug_workaround_t204(unsigned long data)
{
struct adapter *adapter = (struct adapter *)data;
struct sge *sge = adapter->sge;
+ unsigned int nports = adapter->params.nports;
+ u32 seop[MAX_NPORTS];
- if (netif_running(adapter->port[0].dev)) {
- struct sk_buff *skb = sge->espibug_skb;
-
- u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
-
- if ((seop & 0xfff0fff) == 0xfff && skb) {
- if (!skb->cb[0]) {
- u8 ch_mac_addr[ETH_ALEN] =
- {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
- memcpy(skb->data + sizeof(struct cpl_tx_pkt),
- ch_mac_addr, ETH_ALEN);
- memcpy(skb->data + skb->len - 10, ch_mac_addr,
- ETH_ALEN);
- skb->cb[0] = 0xff;
+ if (adapter->open_device_map & PORT_MASK) {
+ int i;
+ if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) {
+ return;
+ }
+ for (i = 0; i < nports; i++) {
+ struct sk_buff *skb = sge->espibug_skb[i];
+ if ( (netif_running(adapter->port[i].dev)) &&
+ !(netif_queue_stopped(adapter->port[i].dev)) &&
+ (seop[i] && ((seop[i] & 0xfff) == 0)) &&
+ skb ) {
+ if (!skb->cb[0]) {
+ u8 ch_mac_addr[ETH_ALEN] =
+ {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
+ memcpy(skb->data + sizeof(struct cpl_tx_pkt),
+ ch_mac_addr, ETH_ALEN);
+ memcpy(skb->data + skb->len - 10,
+ ch_mac_addr, ETH_ALEN);
+ skb->cb[0] = 0xff;
+ }
+
+ /* bump the reference count to avoid freeing of
+ * the skb once the DMA has completed.
+ */
+ skb = skb_get(skb);
+ t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
}
-
- /* bump the reference count to avoid freeing of the
- * skb once the DMA has completed.
- */
- skb = skb_get(skb);
- t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
}
}
mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
}
+static void espibug_workaround(unsigned long data)
+{
+ struct adapter *adapter = (struct adapter *)data;
+ struct sge *sge = adapter->sge;
+
+ if (netif_running(adapter->port[0].dev)) {
+ struct sk_buff *skb = sge->espibug_skb[0];
+ u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
+
+ if ((seop & 0xfff0fff) == 0xfff && skb) {
+ if (!skb->cb[0]) {
+ u8 ch_mac_addr[ETH_ALEN] =
+ {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
+ memcpy(skb->data + sizeof(struct cpl_tx_pkt),
+ ch_mac_addr, ETH_ALEN);
+ memcpy(skb->data + skb->len - 10, ch_mac_addr,
+ ETH_ALEN);
+ skb->cb[0] = 0xff;
+ }
+
+ /* bump the reference count to avoid freeing of the
+ * skb once the DMA has completed.
+ */
+ skb = skb_get(skb);
+ t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
+ }
+ }
+ mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
/*
* Creates a t1_sge structure and returns suggested resource parameters.
*/
struct sge * __devinit t1_sge_create(struct adapter *adapter,
struct sge_params *p)
{
- struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
+ struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
+ int i;
if (!sge)
return NULL;
- memset(sge, 0, sizeof(*sge));
sge->adapter = adapter;
sge->netdev = adapter->port[0].dev;
sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+ for_each_port(adapter, i) {
+ sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
+ if (!sge->port_stats[i])
+ goto nomem_port;
+ }
+
init_timer(&sge->tx_reclaim_timer);
sge->tx_reclaim_timer.data = (unsigned long)sge;
sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
if (is_T2(sge->adapter)) {
init_timer(&sge->espibug_timer);
- sge->espibug_timer.function = (void *)&espibug_workaround;
+
+ if (adapter->params.nports > 1) {
+ tx_sched_init(sge);
+ sge->espibug_timer.function = espibug_workaround_t204;
+ } else {
+ sge->espibug_timer.function = espibug_workaround;
+ }
sge->espibug_timer.data = (unsigned long)sge->adapter;
+
sge->espibug_timeout = 1;
+ /* for T204, every 10ms */
+ if (adapter->params.nports > 1)
+ sge->espibug_timeout = HZ/100;
}
@@ -1674,10 +2224,25 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
p->cmdQ_size[1] = SGE_CMDQ1_E_N;
p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
- p->rx_coalesce_usecs = 50;
+ if (sge->tx_sched) {
+ if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
+ p->rx_coalesce_usecs = 15;
+ else
+ p->rx_coalesce_usecs = 50;
+ } else
+ p->rx_coalesce_usecs = 50;
+
p->coalesce_enable = 0;
p->sample_interval_usecs = 0;
p->polling = 0;
return sge;
+nomem_port:
+ while (i >= 0) {
+ free_percpu(sge->port_stats[i]);
+ --i;
+ }
+ kfree(sge);
+ return NULL;
+
}
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
index 91af47bab7be..7ceb0117d039 100644
--- a/drivers/net/chelsio/sge.h
+++ b/drivers/net/chelsio/sge.h
@@ -44,6 +44,9 @@
#include <asm/byteorder.h>
struct sge_intr_counts {
+ unsigned int rx_drops; /* # of packets dropped due to no mem */
+ unsigned int pure_rsps; /* # of non-payload responses */
+ unsigned int unhandled_irqs; /* # of unhandled interrupts */
unsigned int respQ_empty; /* # times respQ empty */
unsigned int respQ_overflow; /* # respQ overflow (fatal) */
unsigned int freelistQ_empty; /* # times freelist empty */
@@ -51,24 +54,16 @@ struct sge_intr_counts {
unsigned int pkt_mismatch;
unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */
unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
- unsigned int ethernet_pkts; /* # of Ethernet packets received */
- unsigned int offload_pkts; /* # of offload packets received */
- unsigned int offload_bundles; /* # of offload pkt bundles delivered */
- unsigned int pure_rsps; /* # of non-payload responses */
- unsigned int unhandled_irqs; /* # of unhandled interrupts */
- unsigned int tx_ipfrags;
- unsigned int tx_reg_pkts;
- unsigned int tx_lso_pkts;
- unsigned int tx_do_cksum;
};
struct sge_port_stats {
- unsigned long rx_cso_good; /* # of successful RX csum offloads */
- unsigned long tx_cso; /* # of TX checksum offloads */
- unsigned long vlan_xtract; /* # of VLAN tag extractions */
- unsigned long vlan_insert; /* # of VLAN tag extractions */
- unsigned long tso; /* # of TSO requests */
- unsigned long rx_drops; /* # of packets dropped due to no mem */
+ u64 rx_packets; /* # of Ethernet packets received */
+ u64 rx_cso_good; /* # of successful RX csum offloads */
+ u64 tx_packets; /* # of TX packets */
+ u64 tx_cso; /* # of TX checksum offloads */
+ u64 tx_tso; /* # of TSO requests */
+ u64 vlan_xtract; /* # of VLAN tag extractions */
+ u64 vlan_insert; /* # of VLAN tag insertions */
};
struct sk_buff;
@@ -90,7 +85,11 @@ int t1_sge_intr_error_handler(struct sge *);
void t1_sge_intr_enable(struct sge *);
void t1_sge_intr_disable(struct sge *);
void t1_sge_intr_clear(struct sge *);
-const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge);
-const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port);
+const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge);
+void t1_sge_get_port_stats(const struct sge *sge, int port, struct sge_port_stats *);
+void t1_sched_set_max_avail_bytes(struct sge *, unsigned int);
+void t1_sched_set_drain_bits_per_us(struct sge *, unsigned int, unsigned int);
+unsigned int t1_sched_update_parms(struct sge *, unsigned int, unsigned int,
+ unsigned int);
#endif /* _CXGB_SGE_H_ */
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 12e4e96dba2d..22ed9a383c08 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -43,6 +43,7 @@
#include "gmac.h"
#include "cphy.h"
#include "sge.h"
+#include "tp.h"
#include "espi.h"
/**
@@ -59,7 +60,7 @@
* otherwise.
*/
static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
- int attempts, int delay)
+ int attempts, int delay)
{
while (1) {
u32 val = readl(adapter->regs + reg) & mask;
@@ -78,7 +79,7 @@ static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
/*
* Write a register over the TPI interface (unlocked and locked versions).
*/
-static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
+int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
{
int tpi_busy;
@@ -98,16 +99,16 @@ int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
{
int ret;
- spin_lock(&(adapter)->tpi_lock);
+ spin_lock(&adapter->tpi_lock);
ret = __t1_tpi_write(adapter, addr, value);
- spin_unlock(&(adapter)->tpi_lock);
+ spin_unlock(&adapter->tpi_lock);
return ret;
}
/*
* Read a register over the TPI interface (unlocked and locked versions).
*/
-static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
+int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
{
int tpi_busy;
@@ -128,18 +129,26 @@ int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
{
int ret;
- spin_lock(&(adapter)->tpi_lock);
+ spin_lock(&adapter->tpi_lock);
ret = __t1_tpi_read(adapter, addr, valp);
- spin_unlock(&(adapter)->tpi_lock);
+ spin_unlock(&adapter->tpi_lock);
return ret;
}
/*
+ * Set a TPI parameter.
+ */
+static void t1_tpi_par(adapter_t *adapter, u32 value)
+{
+ writel(V_TPIPAR(value), adapter->regs + A_TPI_PAR);
+}
+
+/*
* Called when a port's link settings change to propagate the new values to the
* associated PHY and MAC. After performing the common tasks it invokes an
* OS-specific handler.
*/
-/* static */ void link_changed(adapter_t *adapter, int port_id)
+void t1_link_changed(adapter_t *adapter, int port_id)
{
int link_ok, speed, duplex, fc;
struct cphy *phy = adapter->port[port_id].phy;
@@ -159,23 +168,83 @@ int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
lc->fc = (unsigned char)fc;
}
- t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
+ t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc);
}
static int t1_pci_intr_handler(adapter_t *adapter)
{
u32 pcix_cause;
- pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
+ pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
if (pcix_cause) {
pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
- pcix_cause);
+ pcix_cause);
t1_fatal_err(adapter); /* PCI errors are fatal */
}
return 0;
}
+#ifdef CONFIG_CHELSIO_T1_COUGAR
+#include "cspi.h"
+#endif
+#ifdef CONFIG_CHELSIO_T1_1G
+#include "fpga_defs.h"
+
+/*
+ * PHY interrupt handler for FPGA boards.
+ */
+static int fpga_phy_intr_handler(adapter_t *adapter)
+{
+ int p;
+ u32 cause = readl(adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE);
+
+ for_each_port(adapter, p)
+ if (cause & (1 << p)) {
+ struct cphy *phy = adapter->port[p].phy;
+ int phy_cause = phy->ops->interrupt_handler(phy);
+
+ if (phy_cause & cphy_cause_link_change)
+ t1_link_changed(adapter, p);
+ }
+ writel(cause, adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE);
+ return 0;
+}
+
+/*
+ * Slow path interrupt handler for FPGAs.
+ */
+static int fpga_slow_intr(adapter_t *adapter)
+{
+ u32 cause = readl(adapter->regs + A_PL_CAUSE);
+
+ cause &= ~F_PL_INTR_SGE_DATA;
+ if (cause & F_PL_INTR_SGE_ERR)
+ t1_sge_intr_error_handler(adapter->sge);
+
+ if (cause & FPGA_PCIX_INTERRUPT_GMAC)
+ fpga_phy_intr_handler(adapter);
+
+ if (cause & FPGA_PCIX_INTERRUPT_TP) {
+ /*
+ * FPGA doesn't support MC4 interrupts and it requires
+ * this odd layer of indirection for MC5.
+ */
+ u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
+
+ /* Clear TP interrupt */
+ writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
+ }
+ if (cause & FPGA_PCIX_INTERRUPT_PCIX)
+ t1_pci_intr_handler(adapter);
+
+ /* Clear the interrupts just processed. */
+ if (cause)
+ writel(cause, adapter->regs + A_PL_CAUSE);
+
+ return cause != 0;
+}
+#endif
/*
* Wait until Elmer's MI1 interface is ready for new operations.
@@ -212,12 +281,62 @@ static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
}
+#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR)
+/*
+ * Elmer MI1 MDIO read/write operations.
+ */
+static int mi1_mdio_read(adapter_t *adapter, int phy_addr, int mmd_addr,
+ int reg_addr, unsigned int *valp)
+{
+ u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+ if (mmd_addr)
+ return -EINVAL;
+
+ spin_lock(&adapter->tpi_lock);
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+ __t1_tpi_write(adapter,
+ A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_READ);
+ mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+ __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
+ spin_unlock(&adapter->tpi_lock);
+ return 0;
+}
+
+static int mi1_mdio_write(adapter_t *adapter, int phy_addr, int mmd_addr,
+ int reg_addr, unsigned int val)
+{
+ u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+ if (mmd_addr)
+ return -EINVAL;
+
+ spin_lock(&adapter->tpi_lock);
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
+ __t1_tpi_write(adapter,
+ A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_WRITE);
+ mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+ spin_unlock(&adapter->tpi_lock);
+ return 0;
+}
+
+#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR)
+static struct mdio_ops mi1_mdio_ops = {
+ mi1_mdio_init,
+ mi1_mdio_read,
+ mi1_mdio_write
+};
+#endif
+
+#endif
+
static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *valp)
{
u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
- spin_lock(&(adapter)->tpi_lock);
+ spin_lock(&adapter->tpi_lock);
/* Write the address we want. */
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
@@ -227,12 +346,13 @@ static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
/* Write the operation we want. */
- __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
+ __t1_tpi_write(adapter,
+ A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
/* Read the data. */
__t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
- spin_unlock(&(adapter)->tpi_lock);
+ spin_unlock(&adapter->tpi_lock);
return 0;
}
@@ -241,7 +361,7 @@ static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
{
u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
- spin_lock(&(adapter)->tpi_lock);
+ spin_lock(&adapter->tpi_lock);
/* Write the address we want. */
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
@@ -254,7 +374,7 @@ static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
- spin_unlock(&(adapter)->tpi_lock);
+ spin_unlock(&adapter->tpi_lock);
return 0;
}
@@ -265,12 +385,25 @@ static struct mdio_ops mi1_mdio_ext_ops = {
};
enum {
+ CH_BRD_T110_1CU,
CH_BRD_N110_1F,
CH_BRD_N210_1F,
+ CH_BRD_T210_1F,
+ CH_BRD_T210_1CU,
+ CH_BRD_N204_4CU,
};
static struct board_info t1_board[] = {
+{ CHBT_BOARD_CHT110, 1/*ports#*/,
+ SUPPORTED_10000baseT_Full /*caps*/, CHBT_TERM_T1,
+ CHBT_MAC_PM3393, CHBT_PHY_MY3126,
+ 125000000/*clk-core*/, 150000000/*clk-mc3*/, 125000000/*clk-mc4*/,
+ 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 1/*mdien*/,
+ 1/*mdiinv*/, 1/*mdc*/, 1/*phybaseaddr*/, &t1_pm3393_ops,
+ &t1_my3126_ops, &mi1_mdio_ext_ops,
+ "Chelsio T110 1x10GBase-CX4 TOE" },
+
{ CHBT_BOARD_N110, 1/*ports#*/,
SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1,
CHBT_MAC_PM3393, CHBT_PHY_88X2010,
@@ -289,12 +422,47 @@ static struct board_info t1_board[] = {
&t1_mv88x201x_ops, &mi1_mdio_ext_ops,
"Chelsio N210 1x10GBaseX NIC" },
+{ CHBT_BOARD_CHT210, 1/*ports#*/,
+ SUPPORTED_10000baseT_Full /*caps*/, CHBT_TERM_T2,
+ CHBT_MAC_PM3393, CHBT_PHY_88X2010,
+ 125000000/*clk-core*/, 133000000/*clk-mc3*/, 125000000/*clk-mc4*/,
+ 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
+ 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
+ &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
+ "Chelsio T210 1x10GBaseX TOE" },
+
+{ CHBT_BOARD_CHT210, 1/*ports#*/,
+ SUPPORTED_10000baseT_Full /*caps*/, CHBT_TERM_T2,
+ CHBT_MAC_PM3393, CHBT_PHY_MY3126,
+ 125000000/*clk-core*/, 133000000/*clk-mc3*/, 125000000/*clk-mc4*/,
+ 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 1/*mdien*/,
+ 1/*mdiinv*/, 1/*mdc*/, 1/*phybaseaddr*/, &t1_pm3393_ops,
+ &t1_my3126_ops, &mi1_mdio_ext_ops,
+ "Chelsio T210 1x10GBase-CX4 TOE" },
+
+#ifdef CONFIG_CHELSIO_T1_1G
+{ CHBT_BOARD_CHN204, 4/*ports#*/,
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_PAUSE | SUPPORTED_TP /*caps*/, CHBT_TERM_T2, CHBT_MAC_VSC7321, CHBT_PHY_88E1111,
+ 100000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
+ 4/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
+ 0/*mdiinv*/, 1/*mdc*/, 4/*phybaseaddr*/, &t1_vsc7326_ops,
+ &t1_mv88e1xxx_ops, &mi1_mdio_ops,
+ "Chelsio N204 4x100/1000BaseT NIC" },
+#endif
+
};
struct pci_device_id t1_pci_tbl[] = {
+ CH_DEVICE(8, 0, CH_BRD_T110_1CU),
+ CH_DEVICE(8, 1, CH_BRD_T110_1CU),
CH_DEVICE(7, 0, CH_BRD_N110_1F),
CH_DEVICE(10, 1, CH_BRD_N210_1F),
- { 0, }
+ CH_DEVICE(11, 1, CH_BRD_T210_1F),
+ CH_DEVICE(14, 1, CH_BRD_T210_1CU),
+ CH_DEVICE(16, 1, CH_BRD_N204_4CU),
+ { 0 }
};
MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
@@ -390,9 +558,14 @@ int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
if (lc->supported & SUPPORTED_Autoneg) {
lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
if (fc) {
- lc->advertising |= ADVERTISED_ASYM_PAUSE;
- if (fc == (PAUSE_RX | PAUSE_TX))
+ if (fc == ((PAUSE_RX | PAUSE_TX) &
+ (mac->adapter->params.nports < 2)))
lc->advertising |= ADVERTISED_PAUSE;
+ else {
+ lc->advertising |= ADVERTISED_ASYM_PAUSE;
+ if (fc == PAUSE_RX)
+ lc->advertising |= ADVERTISED_PAUSE;
+ }
}
phy->ops->advertise(phy, lc->advertising);
@@ -403,11 +576,15 @@ int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
mac->ops->set_speed_duplex_fc(mac, lc->speed,
lc->duplex, fc);
/* Also disables autoneg */
+ phy->state = PHY_AUTONEG_RDY;
phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
phy->ops->reset(phy, 0);
- } else
+ } else {
+ phy->state = PHY_AUTONEG_EN;
phy->ops->autoneg_enable(phy); /* also resets PHY */
+ }
} else {
+ phy->state = PHY_AUTONEG_RDY;
mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
lc->fc = (unsigned char)fc;
phy->ops->reset(phy, 0);
@@ -418,24 +595,109 @@ int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
/*
* External interrupt handler for boards using elmer0.
*/
-int elmer0_ext_intr_handler(adapter_t *adapter)
+int t1_elmer0_ext_intr_handler(adapter_t *adapter)
{
- struct cphy *phy;
+ struct cphy *phy;
int phy_cause;
- u32 cause;
+ u32 cause;
t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
switch (board_info(adapter)->board) {
+#ifdef CONFIG_CHELSIO_T1_1G
+ case CHBT_BOARD_CHT204:
+ case CHBT_BOARD_CHT204E:
+ case CHBT_BOARD_CHN204:
+ case CHBT_BOARD_CHT204V: {
+ int i, port_bit;
+ for_each_port(adapter, i) {
+ port_bit = i + 1;
+ if (!(cause & (1 << port_bit))) continue;
+
+ phy = adapter->port[i].phy;
+ phy_cause = phy->ops->interrupt_handler(phy);
+ if (phy_cause & cphy_cause_link_change)
+ t1_link_changed(adapter, i);
+ }
+ break;
+ }
+ case CHBT_BOARD_CHT101:
+ if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */
+ phy = adapter->port[0].phy;
+ phy_cause = phy->ops->interrupt_handler(phy);
+ if (phy_cause & cphy_cause_link_change)
+ t1_link_changed(adapter, 0);
+ }
+ break;
+ case CHBT_BOARD_7500: {
+ int p;
+ /*
+ * Elmer0's interrupt cause isn't useful here because there is
+ * only one bit that can be set for all 4 ports. This means
+ * we are forced to check every PHY's interrupt status
+ * register to see who initiated the interrupt.
+ */
+ for_each_port(adapter, p) {
+ phy = adapter->port[p].phy;
+ phy_cause = phy->ops->interrupt_handler(phy);
+ if (phy_cause & cphy_cause_link_change)
+ t1_link_changed(adapter, p);
+ }
+ break;
+ }
+#endif
+ case CHBT_BOARD_CHT210:
case CHBT_BOARD_N210:
case CHBT_BOARD_N110:
if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
phy = adapter->port[0].phy;
phy_cause = phy->ops->interrupt_handler(phy);
if (phy_cause & cphy_cause_link_change)
- link_changed(adapter, 0);
+ t1_link_changed(adapter, 0);
+ }
+ break;
+ case CHBT_BOARD_8000:
+ case CHBT_BOARD_CHT110:
+ CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n",
+ cause);
+ if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */
+ struct cmac *mac = adapter->port[0].mac;
+
+ mac->ops->interrupt_handler(mac);
}
+ if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */
+ u32 mod_detect;
+
+ t1_tpi_read(adapter,
+ A_ELMER0_GPI_STAT, &mod_detect);
+ CH_MSG(adapter, INFO, LINK, "XPAK %s\n",
+ mod_detect ? "removed" : "inserted");
+ }
break;
+#ifdef CONFIG_CHELSIO_T1_COUGAR
+ case CHBT_BOARD_COUGAR:
+ if (adapter->params.nports == 1) {
+ if (cause & ELMER0_GP_BIT1) { /* Vitesse MAC */
+ struct cmac *mac = adapter->port[0].mac;
+ mac->ops->interrupt_handler(mac);
+ }
+ if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */
+ }
+ } else {
+ int i, port_bit;
+
+ for_each_port(adapter, i) {
+ port_bit = i ? i + 1 : 0;
+ if (!(cause & (1 << port_bit))) continue;
+
+ phy = adapter->port[i].phy;
+ phy_cause = phy->ops->interrupt_handler(phy);
+ if (phy_cause & cphy_cause_link_change)
+ t1_link_changed(adapter, i);
+ }
+ }
+ break;
+#endif
}
t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
return 0;
@@ -445,11 +707,11 @@ int elmer0_ext_intr_handler(adapter_t *adapter)
void t1_interrupts_enable(adapter_t *adapter)
{
unsigned int i;
- u32 pl_intr;
- adapter->slow_intr_mask = F_PL_INTR_SGE_ERR;
+ adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP;
t1_sge_intr_enable(adapter->sge);
+ t1_tp_intr_enable(adapter->tp);
if (adapter->espi) {
adapter->slow_intr_mask |= F_PL_INTR_ESPI;
t1_espi_intr_enable(adapter->espi);
@@ -462,15 +724,17 @@ void t1_interrupts_enable(adapter_t *adapter)
}
/* Enable PCIX & external chip interrupts on ASIC boards. */
- pl_intr = readl(adapter->regs + A_PL_ENABLE);
+ if (t1_is_asic(adapter)) {
+ u32 pl_intr = readl(adapter->regs + A_PL_ENABLE);
- /* PCI-X interrupts */
- pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
- 0xffffffff);
+ /* PCI-X interrupts */
+ pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
+ 0xffffffff);
- adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
- pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
- writel(pl_intr, adapter->regs + A_PL_ENABLE);
+ adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+ pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+ writel(pl_intr, adapter->regs + A_PL_ENABLE);
+ }
}
/* Disables all interrupts. */
@@ -479,6 +743,7 @@ void t1_interrupts_disable(adapter_t* adapter)
unsigned int i;
t1_sge_intr_disable(adapter->sge);
+ t1_tp_intr_disable(adapter->tp);
if (adapter->espi)
t1_espi_intr_disable(adapter->espi);
@@ -489,7 +754,8 @@ void t1_interrupts_disable(adapter_t* adapter)
}
/* Disable PCIX & external chip interrupts. */
- writel(0, adapter->regs + A_PL_ENABLE);
+ if (t1_is_asic(adapter))
+ writel(0, adapter->regs + A_PL_ENABLE);
/* PCI-X interrupts */
pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
@@ -501,10 +767,9 @@ void t1_interrupts_disable(adapter_t* adapter)
void t1_interrupts_clear(adapter_t* adapter)
{
unsigned int i;
- u32 pl_intr;
-
t1_sge_intr_clear(adapter->sge);
+ t1_tp_intr_clear(adapter->tp);
if (adapter->espi)
t1_espi_intr_clear(adapter->espi);
@@ -515,10 +780,12 @@ void t1_interrupts_clear(adapter_t* adapter)
}
/* Enable interrupts for external devices. */
- pl_intr = readl(adapter->regs + A_PL_CAUSE);
+ if (t1_is_asic(adapter)) {
+ u32 pl_intr = readl(adapter->regs + A_PL_CAUSE);
- writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
- adapter->regs + A_PL_CAUSE);
+ writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
+ adapter->regs + A_PL_CAUSE);
+ }
/* PCI-X interrupts */
pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
@@ -527,7 +794,7 @@ void t1_interrupts_clear(adapter_t* adapter)
/*
* Slow path interrupt handler for ASICs.
*/
-int t1_slow_intr_handler(adapter_t *adapter)
+static int asic_slow_intr(adapter_t *adapter)
{
u32 cause = readl(adapter->regs + A_PL_CAUSE);
@@ -536,89 +803,54 @@ int t1_slow_intr_handler(adapter_t *adapter)
return 0;
if (cause & F_PL_INTR_SGE_ERR)
t1_sge_intr_error_handler(adapter->sge);
+ if (cause & F_PL_INTR_TP)
+ t1_tp_intr_handler(adapter->tp);
if (cause & F_PL_INTR_ESPI)
t1_espi_intr_handler(adapter->espi);
if (cause & F_PL_INTR_PCIX)
t1_pci_intr_handler(adapter);
if (cause & F_PL_INTR_EXT)
- t1_elmer0_ext_intr(adapter);
+ t1_elmer0_ext_intr_handler(adapter);
/* Clear the interrupts just processed. */
writel(cause, adapter->regs + A_PL_CAUSE);
- (void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */
+ readl(adapter->regs + A_PL_CAUSE); /* flush writes */
return 1;
}
-/* Pause deadlock avoidance parameters */
-#define DROP_MSEC 16
-#define DROP_PKTS_CNT 1
-
-static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable)
-{
- u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
-
- if (enable)
- val |= csum_bit;
- else
- val &= ~csum_bit;
- writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
-}
-
-void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable)
-{
- set_csum_offload(adapter, F_IP_CSUM, enable);
-}
-
-void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable)
-{
- set_csum_offload(adapter, F_UDP_CSUM, enable);
-}
-
-void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable)
+int t1_slow_intr_handler(adapter_t *adapter)
{
- set_csum_offload(adapter, F_TCP_CSUM, enable);
+#ifdef CONFIG_CHELSIO_T1_1G
+ if (!t1_is_asic(adapter))
+ return fpga_slow_intr(adapter);
+#endif
+ return asic_slow_intr(adapter);
}
-static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk)
+/* Power sequencing is a work-around for Intel's XPAKs. */
+static void power_sequence_xpak(adapter_t* adapter)
{
- u32 val;
-
- val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
- F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
- val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
- F_TP_IN_ESPI_CHECK_TCP_CSUM;
- writel(val, adapter->regs + A_TP_IN_CONFIG);
- writel(F_TP_OUT_CSPI_CPL |
- F_TP_OUT_ESPI_ETHERNET |
- F_TP_OUT_ESPI_GENERATE_IP_CSUM |
- F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
- adapter->regs + A_TP_OUT_CONFIG);
-
- val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
- val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM);
- writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
-
- /*
- * Enable pause frame deadlock prevention.
- */
- if (is_T2(adapter)) {
- u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
-
- writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
- V_DROP_TICKS_CNT(drop_ticks) |
- V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
- adapter->regs + A_TP_TX_DROP_CONFIG);
+ u32 mod_detect;
+ u32 gpo;
+
+ /* Check for XPAK */
+ t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
+ if (!(ELMER0_GP_BIT5 & mod_detect)) {
+ /* XPAK is present */
+ t1_tpi_read(adapter, A_ELMER0_GPO, &gpo);
+ gpo |= ELMER0_GP_BIT18;
+ t1_tpi_write(adapter, A_ELMER0_GPO, gpo);
}
-
- writel(F_TP_RESET, adapter->regs + A_TP_RESET);
}
int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
struct adapter_params *p)
{
p->chip_version = bi->chip_term;
+ p->is_asic = (p->chip_version != CHBT_TERM_FPGA);
if (p->chip_version == CHBT_TERM_T1 ||
- p->chip_version == CHBT_TERM_T2) {
+ p->chip_version == CHBT_TERM_T2 ||
+ p->chip_version == CHBT_TERM_FPGA) {
u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
val = G_TP_PC_REV(val);
@@ -640,11 +872,38 @@ int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
static int board_init(adapter_t *adapter, const struct board_info *bi)
{
switch (bi->board) {
+ case CHBT_BOARD_8000:
case CHBT_BOARD_N110:
case CHBT_BOARD_N210:
- writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR);
+ case CHBT_BOARD_CHT210:
+ case CHBT_BOARD_COUGAR:
+ t1_tpi_par(adapter, 0xf);
t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
break;
+ case CHBT_BOARD_CHT110:
+ t1_tpi_par(adapter, 0xf);
+ t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800);
+
+ /* TBD XXX Might not need. This fixes a problem
+ * described in the Intel SR XPAK errata.
+ */
+ power_sequence_xpak(adapter);
+ break;
+#ifdef CONFIG_CHELSIO_T1_1G
+ case CHBT_BOARD_CHT204E:
+ /* add config space write here */
+ case CHBT_BOARD_CHT204:
+ case CHBT_BOARD_CHT204V:
+ case CHBT_BOARD_CHN204:
+ t1_tpi_par(adapter, 0xf);
+ t1_tpi_write(adapter, A_ELMER0_GPO, 0x804);
+ break;
+ case CHBT_BOARD_CHT101:
+ case CHBT_BOARD_7500:
+ t1_tpi_par(adapter, 0xf);
+ t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804);
+ break;
+#endif
}
return 0;
}
@@ -666,11 +925,16 @@ int t1_init_hw_modules(adapter_t *adapter)
adapter->regs + A_MC5_CONFIG);
}
+#ifdef CONFIG_CHELSIO_T1_COUGAR
+ if (adapter->cspi && t1_cspi_init(adapter->cspi))
+ goto out_err;
+#endif
if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
bi->espi_nports))
goto out_err;
- t1_tp_reset(adapter, bi->clock_core);
+ if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core))
+ goto out_err;
err = t1_sge_configure(adapter->sge, &adapter->params.sge);
if (err)
@@ -714,8 +978,14 @@ void t1_free_sw_modules(adapter_t *adapter)
if (adapter->sge)
t1_sge_destroy(adapter->sge);
+ if (adapter->tp)
+ t1_tp_destroy(adapter->tp);
if (adapter->espi)
t1_espi_destroy(adapter->espi);
+#ifdef CONFIG_CHELSIO_T1_COUGAR
+ if (adapter->cspi)
+ t1_cspi_destroy(adapter->cspi);
+#endif
}
static void __devinit init_link_config(struct link_config *lc,
@@ -735,6 +1005,13 @@ static void __devinit init_link_config(struct link_config *lc,
}
}
+#ifdef CONFIG_CHELSIO_T1_COUGAR
+ if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) {
+ CH_ERR("%s: CSPI initialization failed\n",
+ adapter->name);
+ goto error;
+ }
+#endif
/*
* Allocate and initialize the data structures that hold the SW state of
@@ -762,6 +1039,13 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
goto error;
}
+ adapter->tp = t1_tp_create(adapter, &adapter->params.tp);
+ if (!adapter->tp) {
+ CH_ERR("%s: TP initialization failed\n",
+ adapter->name);
+ goto error;
+ }
+
board_init(adapter, bi);
bi->mdio_ops->init(adapter, bi);
if (bi->gphy->reset)
@@ -793,7 +1077,9 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
* Get the port's MAC addresses either from the EEPROM if one
* exists or the one hardcoded in the MAC.
*/
- if (vpd_macaddress_get(adapter, i, hw_addr)) {
+ if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY)
+ mac->ops->macaddress_get(mac, hw_addr);
+ else if (vpd_macaddress_get(adapter, i, hw_addr)) {
CH_ERR("%s: could not read MAC address from VPD ROM\n",
adapter->port[i].dev->name);
goto error;
@@ -806,7 +1092,7 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
t1_interrupts_clear(adapter);
return 0;
- error:
+error:
t1_free_sw_modules(adapter);
return -1;
}
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h
index 81816c2b708a..269d097dd927 100644
--- a/drivers/net/chelsio/suni1x10gexp_regs.h
+++ b/drivers/net/chelsio/suni1x10gexp_regs.h
@@ -32,6 +32,30 @@
#ifndef _CXGB_SUNI1x10GEXP_REGS_H_
#define _CXGB_SUNI1x10GEXP_REGS_H_
+/*
+** Space allocated for each Exact Match Filter
+** There are 8 filter configurations
+*/
+#define SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER 0x0003
+
+#define mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId) ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER )
+
+/*
+** Space allocated for VLAN-Id Filter
+** There are 8 filter configurations
+*/
+#define SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER 0x0001
+
+#define mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId) ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER )
+
+/*
+** Space allocated for each MSTAT Counter
+*/
+#define SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT 0x0004
+
+#define mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId) ( (countId) * SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT )
+
+
/******************************************************************************/
/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/
/******************************************************************************/
@@ -39,33 +63,125 @@
/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */
/******************************************************************************/
+
+#define SUNI1x10GEXP_REG_IDENTIFICATION 0x0000
+#define SUNI1x10GEXP_REG_PRODUCT_REVISION 0x0001
+#define SUNI1x10GEXP_REG_CONFIG_AND_RESET_CONTROL 0x0002
+#define SUNI1x10GEXP_REG_LOOPBACK_MISC_CTRL 0x0003
#define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004
+#define SUNI1x10GEXP_REG_GLOBAL_PERFORMANCE_MONITOR_UPDATE 0x0005
+
+#define SUNI1x10GEXP_REG_MDIO_COMMAND 0x0006
+#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_ENABLE 0x0007
+#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_STATUS 0x0008
+#define SUNI1x10GEXP_REG_MMD_PHY_ADDRESS 0x0009
+#define SUNI1x10GEXP_REG_MMD_CONTROL_ADDRESS_DATA 0x000A
+#define SUNI1x10GEXP_REG_MDIO_READ_STATUS_DATA 0x000B
+
+#define SUNI1x10GEXP_REG_OAM_INTF_CTRL 0x000C
#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D
#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E
+#define SUNI1x10GEXP_REG_FREE 0x000F
+
+#define SUNI1x10GEXP_REG_XTEF_MISC_CTRL 0x0010
+#define SUNI1x10GEXP_REG_XRF_MISC_CTRL 0x0011
+
+#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_1 0x0100
+#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_2 0x0101
#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_VISIBLE 0x0103
#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104
+#define SUNI1x10GEXP_REG_SERDES_3125_TEST_CONFIG 0x0107
+
#define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_2 0x2041
#define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042
#define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043
#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045
#define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046
#define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047
#define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048
+#define SUNI1x10GEXP_REG_RXXG_RECEIVE_FIFO_THRESHOLD 0x2049
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_LOW(filterId) (0x204A + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_MID(filterId) (0x204B + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_HIGH(filterId)(0x204C + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID(filterId) (0x2062 + mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId)
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_LOW 0x204A
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_MID 0x204B
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_HIGH 0x204C
#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D
#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E
#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_LOW 0x2050
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_MID 0x2051
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_HIGH 0x2052
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_LOW 0x2053
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_MID 0x2054
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_HIGH 0x2055
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_LOW 0x2056
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_MID 0x2057
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_HIGH 0x2058
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_LOW 0x2059
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_MID 0x205A
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_HIGH 0x205B
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_LOW 0x205C
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_MID 0x205D
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_HIGH 0x205E
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_LOW 0x205F
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_MID 0x2060
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_HIGH 0x2061
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_0 0x2062
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_1 0x2063
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_2 0x2064
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_3 0x2065
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_4 0x2066
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_5 0x2067
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_6 0x2068
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_7 0x2069
#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A
#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B
#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C
#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D
#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_1 0x206F
#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070
+
+#define SUNI1x10GEXP_REG_XRF_PATTERN_GEN_CTRL 0x2081
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_0 0x2084
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_1 0x2085
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_2 0x2086
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_3 0x2087
#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088
#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089
+#define SUNI1x10GEXP_REG_XRF_ERR_STATUS 0x208A
#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B
#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C
+#define SUNI1x10GEXP_REG_XRF_CODE_ERR_THRES 0x2092
+
+#define SUNI1x10GEXP_REG_RXOAM_CONFIG 0x20C0
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_CONFIG 0x20C1
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_CONFIG 0x20C2
+#define SUNI1x10GEXP_REG_RXOAM_CONFIG_2 0x20C3
+#define SUNI1x10GEXP_REG_RXOAM_HEC_CONFIG 0x20C4
+#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_THRES 0x20C5
#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7
#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8
+#define SUNI1x10GEXP_REG_RXOAM_STATUS 0x20C9
+#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_COUNT 0x20CA
+#define SUNI1x10GEXP_REG_RXOAM_FIFO_OVERFLOW_COUNT 0x20CB
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_LSB 0x20CC
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_MSB 0x20CD
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_LSB 0x20CE
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_MSB 0x20CF
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_LSB 0x20D0
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_MSB 0x20D1
+#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_LSB 0x20D2
+#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_MSB 0x20D3
+#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_LSB 0x20D4
+#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_MSB 0x20D5
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_LSB 0x20D6
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_MSB 0x20D7
+
#define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102
@@ -75,50 +191,321 @@
#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106
#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107
#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_ADDRESS 0x2109
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_LOW 0x210A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_MIDDLE 0x210B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_HIGH 0x210C
+#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_LOW(countId) (0x2110 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId))
+#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_MID(countId) (0x2111 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId))
+#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_HIGH(countId) (0x2112 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId))
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_MID 0x2111
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_HIGH 0x2112
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_RESVD 0x2113
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_MID 0x2115
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_HIGH 0x2116
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_RESVD 0x2117
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_LOW 0x2118
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_MID 0x2119
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_HIGH 0x211A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_RESVD 0x211B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_LOW 0x211C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_MID 0x211D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_HIGH 0x211E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_RESVD 0x211F
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_MID 0x2121
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_HIGH 0x2122
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_RESVD 0x2123
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_MID 0x2125
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_HIGH 0x2126
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_RESVD 0x2127
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_MID 0x2129
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_HIGH 0x212A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_RESVD 0x212B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_LOW 0x212C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_MID 0x212D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_HIGH 0x212E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_RESVD 0x212F
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_MID 0x2131
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_HIGH 0x2132
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_RESVD 0x2133
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_LOW 0x2134
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_MID 0x2135
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_HIGH 0x2136
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_RESVD 0x2137
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_MID 0x2139
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_HIGH 0x213A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_RESVD 0x213B
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_MID 0x213D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_HIGH 0x213E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_RESVD 0x213F
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_MID 0x2141
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_HIGH 0x2142
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_RESVD 0x2143
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_MID 0x2145
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_HIGH 0x2146
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_RESVD 0x2147
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_LOW 0x2148
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_MID 0x2149
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_HIGH 0x214A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_RESVD 0x214B
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_MID 0x214D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_HIGH 0x214E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_RESVD 0x214F
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_MID 0x2151
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_HIGH 0x2152
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_RESVD 0x2153
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_MID 0x2155
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_HIGH 0x2156
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_RESVD 0x2157
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_MID 0x2159
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_HIGH 0x215A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_RESVD 0x215B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_LOW 0x215C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_MID 0x215D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_HIGH 0x215E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_RESVD 0x215F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_LOW 0x2160
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_MID 0x2161
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_HIGH 0x2162
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_RESVD 0x2163
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_LOW 0x2164
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_MID 0x2165
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_HIGH 0x2166
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_RESVD 0x2167
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_LOW 0x2168
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_MID 0x2169
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_HIGH 0x216A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_RESVD 0x216B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_LOW 0x216C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_MID 0x216D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_HIGH 0x216E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_RESVD 0x216F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_LOW 0x2170
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_MID 0x2171
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_HIGH 0x2172
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_RESVD 0x2173
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW 0x2174
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_MID 0x2175
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_HIGH 0x2176
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_RESVD 0x2177
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW 0x2178
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_MID 0x2179
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_HIGH 0x217a
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_RESVD 0x217b
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_LOW 0x217c
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_MID 0x217d
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_HIGH 0x217e
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_RESVD 0x217f
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_LOW 0x2180
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_MID 0x2181
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_HIGH 0x2182
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_RESVD 0x2183
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_LOW 0x2184
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_MID 0x2185
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_HIGH 0x2186
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_RESVD 0x2187
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_LOW 0x2188
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_MID 0x2189
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_HIGH 0x218A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_RESVD 0x218B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_LOW 0x218C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_MID 0x218D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_HIGH 0x218E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_RESVD 0x218F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_LOW 0x2190
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_MID 0x2191
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_HIGH 0x2192
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_RESVD 0x2193
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_MID 0x2195
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_HIGH 0x2196
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_RESVD 0x2197
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_LOW 0x2198
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_MID 0x2199
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_HIGH 0x219A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_RESVD 0x219B
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_MID 0x219D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_HIGH 0x219E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_RESVD 0x219F
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_MID 0x21A1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_HIGH 0x21A2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_RESVD 0x21A3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_LOW 0x21A4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_MID 0x21A5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_HIGH 0x21A6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_RESVD 0x21A7
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_MID 0x21A9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_HIGH 0x21AA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_RESVD 0x21AB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_LOW 0x21AC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_MID 0x21AD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_HIGH 0x21AE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_RESVD 0x21AF
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_MID 0x21B1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_HIGH 0x21B2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_RESVD 0x21B3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_LOW 0x21B4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_MID 0x21B5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_HIGH 0x21B6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_RESVD 0x21B7
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_MID 0x21B9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_HIGH 0x21BA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_RESVD 0x21BB
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_MID 0x21BD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_HIGH 0x21BE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_RESVD 0x21BF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_LOW 0x21C0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_MID 0x21C1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_HIGH 0x21C2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_RESVD 0x21C3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_LOW 0x21C4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_MID 0x21C5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_HIGH 0x21C6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_RESVD 0x21C7
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_LOW 0x21C8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_MID 0x21C9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_HIGH 0x21CA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_RESVD 0x21CB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_LOW 0x21CC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_MID 0x21CD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_HIGH 0x21CE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_RESVD 0x21CF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_LOW 0x21D0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_MID 0x21D1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_HIGH 0x21D2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_RESVD 0x21D3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_LOW 0x21D4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_MID 0x21D5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_HIGH 0x21D6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_RESVD 0x21D7
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_LOW 0x21D8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_MID 0x21D9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_HIGH 0x21DA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_RESVD 0x21DB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW 0x21DC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_MID 0x21DD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_HIGH 0x21DE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_RESVD 0x21DF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW 0x21E0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_MID 0x21E1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_HIGH 0x21E2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_RESVD 0x21E3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_LOW 0x21E4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_MID 0x21E5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_HIGH 0x21E6
+#define SUNI1x10GEXP_CNTR_MAC_ETHERNET_NUM 51
+
+#define SUNI1x10GEXP_REG_IFLX_GLOBAL_CONFIG 0x2200
+#define SUNI1x10GEXP_REG_IFLX_CHANNEL_PROVISION 0x2201
#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209
#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A
+#define SUNI1x10GEXP_REG_IFLX_INDIR_CHANNEL_ADDRESS 0x220D
+#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_LOW_LIMIT_PROVISION 0x220E
+#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_HIGH_LIMIT 0x220F
+#define SUNI1x10GEXP_REG_IFLX_INDIR_FULL_ALMOST_FULL_STATUS_LIMIT 0x2210
+#define SUNI1x10GEXP_REG_IFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_LIMIT 0x2211
+
+#define SUNI1x10GEXP_REG_PL4MOS_CONFIG 0x2240
+#define SUNI1x10GEXP_REG_PL4MOS_MASK 0x2241
+#define SUNI1x10GEXP_REG_PL4MOS_FAIRNESS_MASKING 0x2242
+#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST1 0x2243
+#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST2 0x2244
+#define SUNI1x10GEXP_REG_PL4MOS_TRANSFER_SIZE 0x2245
+
+#define SUNI1x10GEXP_REG_PL4ODP_CONFIG 0x2280
#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282
#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283
+#define SUNI1x10GEXP_REG_PL4ODP_CONFIG_MAX_T 0x2284
+
#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300
#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301
#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_LIMITS 0x2303
+#define SUNI1x10GEXP_REG_PL4IO_CALENDAR_REPETITIONS 0x2304
+#define SUNI1x10GEXP_REG_PL4IO_CONFIG 0x2305
+
#define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_2 0x3041
#define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042
#define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043
+#define SUNI1x10GEXP_REG_TXXG_STATUS 0x3044
#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045
+#define SUNI1x10GEXP_REG_TXXG_MIN_FRAME_SIZE 0x3046
#define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047
#define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048
#define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049
+#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER 0x304D
+#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER_INTERVAL 0x304E
+#define SUNI1x10GEXP_REG_TXXG_FILTER_ERROR_COUNTER 0x3051
+#define SUNI1x10GEXP_REG_TXXG_PAUSE_QUANTUM_CONFIG 0x3052
+
+#define SUNI1x10GEXP_REG_XTEF_CTRL 0x3080
#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084
#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085
+#define SUNI1x10GEXP_REG_XTEF_VISIBILITY 0x3086
+
+#define SUNI1x10GEXP_REG_TXOAM_OAM_CONFIG 0x30C0
+#define SUNI1x10GEXP_REG_TXOAM_MINI_RATE_CONFIG 0x30C1
+#define SUNI1x10GEXP_REG_TXOAM_MINI_GAP_FIFO_CONFIG 0x30C2
+#define SUNI1x10GEXP_REG_TXOAM_P1P2_STATIC_VALUES 0x30C3
+#define SUNI1x10GEXP_REG_TXOAM_P3P4_STATIC_VALUES 0x30C4
+#define SUNI1x10GEXP_REG_TXOAM_P5P6_STATIC_VALUES 0x30C5
#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6
#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7
+#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_LSB 0x30C8
+#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_MSB 0x30C9
+#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_LSB 0x30CA
+#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_MSB 0x30CB
+#define SUNI1x10GEXP_REG_TXOAM_P1P2_MINI_MASK 0x30CC
+#define SUNI1x10GEXP_REG_TXOAM_P3P4_MINI_MASK 0x30CD
+#define SUNI1x10GEXP_REG_TXOAM_P5P6_MINI_MASK 0x30CE
+#define SUNI1x10GEXP_REG_TXOAM_COSET 0x30CF
+#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_LSB 0x30D0
+#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_MSB 0x30D1
+#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_LSB 0x30D2
+#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_MSB 0x30D3
+
+
+#define SUNI1x10GEXP_REG_EFLX_GLOBAL_CONFIG 0x3200
+#define SUNI1x10GEXP_REG_EFLX_ERCU_GLOBAL_STATUS 0x3201
+#define SUNI1x10GEXP_REG_EFLX_INDIR_CHANNEL_ADDRESS 0x3202
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_LOW_LIMIT 0x3203
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_HIGH_LIMIT 0x3204
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FULL_ALMOST_FULL_STATUS_AND_LIMIT 0x3205
+#define SUNI1x10GEXP_REG_EFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_AND_LIMIT 0x3206
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_CUT_THROUGH_THRESHOLD 0x3207
#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C
#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D
+#define SUNI1x10GEXP_REG_EFLX_CHANNEL_PROVISION 0x3210
+
+#define SUNI1x10GEXP_REG_PL4IDU_CONFIG 0x3280
#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282
#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283
+
+/*----------------------------------------*/
+#define SUNI1x10GEXP_REG_MAX_OFFSET 0x3480
+
/******************************************************************************/
/* -- End register offset definitions -- */
/******************************************************************************/
@@ -127,6 +514,81 @@
/** SUNI-1x10GE-XP REGISTER BIT MASKS **/
/******************************************************************************/
+#define SUNI1x10GEXP_BITMSK_BITS_1 0x00001
+#define SUNI1x10GEXP_BITMSK_BITS_2 0x00003
+#define SUNI1x10GEXP_BITMSK_BITS_3 0x00007
+#define SUNI1x10GEXP_BITMSK_BITS_4 0x0000f
+#define SUNI1x10GEXP_BITMSK_BITS_5 0x0001f
+#define SUNI1x10GEXP_BITMSK_BITS_6 0x0003f
+#define SUNI1x10GEXP_BITMSK_BITS_7 0x0007f
+#define SUNI1x10GEXP_BITMSK_BITS_8 0x000ff
+#define SUNI1x10GEXP_BITMSK_BITS_9 0x001ff
+#define SUNI1x10GEXP_BITMSK_BITS_10 0x003ff
+#define SUNI1x10GEXP_BITMSK_BITS_11 0x007ff
+#define SUNI1x10GEXP_BITMSK_BITS_12 0x00fff
+#define SUNI1x10GEXP_BITMSK_BITS_13 0x01fff
+#define SUNI1x10GEXP_BITMSK_BITS_14 0x03fff
+#define SUNI1x10GEXP_BITMSK_BITS_15 0x07fff
+#define SUNI1x10GEXP_BITMSK_BITS_16 0x0ffff
+
+#define mSUNI1x10GEXP_CLR_MSBITS_1(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_15)
+#define mSUNI1x10GEXP_CLR_MSBITS_2(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_14)
+#define mSUNI1x10GEXP_CLR_MSBITS_3(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_13)
+#define mSUNI1x10GEXP_CLR_MSBITS_4(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_12)
+#define mSUNI1x10GEXP_CLR_MSBITS_5(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_11)
+#define mSUNI1x10GEXP_CLR_MSBITS_6(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_10)
+#define mSUNI1x10GEXP_CLR_MSBITS_7(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_9)
+#define mSUNI1x10GEXP_CLR_MSBITS_8(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_8)
+#define mSUNI1x10GEXP_CLR_MSBITS_9(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_7)
+#define mSUNI1x10GEXP_CLR_MSBITS_10(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_6)
+#define mSUNI1x10GEXP_CLR_MSBITS_11(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_5)
+#define mSUNI1x10GEXP_CLR_MSBITS_12(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_4)
+#define mSUNI1x10GEXP_CLR_MSBITS_13(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_3)
+#define mSUNI1x10GEXP_CLR_MSBITS_14(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_2)
+#define mSUNI1x10GEXP_CLR_MSBITS_15(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_1)
+
+#define mSUNI1x10GEXP_GET_BIT(val, bitMsk) (((val)&(bitMsk)) ? 1:0)
+
+
+
+/*----------------------------------------------------------------------------
+ * Register 0x0001: S/UNI-1x10GE-XP Product Revision
+ * Bit 3-0 REVISION
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_REVISION 0x000F
+
+/*----------------------------------------------------------------------------
+ * Register 0x0002: S/UNI-1x10GE-XP Configuration and Reset Control
+ * Bit 2 XAUI_ARESETB
+ * Bit 1 PL4_ARESETB
+ * Bit 0 DRESETB
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XAUI_ARESET 0x0004
+#define SUNI1x10GEXP_BITMSK_PL4_ARESET 0x0002
+#define SUNI1x10GEXP_BITMSK_DRESETB 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0003: S/UNI-1x10GE-XP Loop Back and Miscellaneous Control
+ * Bit 11 PL4IO_OUTCLKSEL
+ * Bit 9 SYSPCSLB
+ * Bit 8 LINEPCSLB
+ * Bit 7 MSTAT_BYPASS
+ * Bit 6 RXXG_BYPASS
+ * Bit 5 TXXG_BYPASS
+ * Bit 4 SOP_PAD_EN
+ * Bit 1 LOS_INV
+ * Bit 0 OVERRIDE_LOS
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUTCLKSEL 0x0800
+#define SUNI1x10GEXP_BITMSK_SYSPCSLB 0x0200
+#define SUNI1x10GEXP_BITMSK_LINEPCSLB 0x0100
+#define SUNI1x10GEXP_BITMSK_MSTAT_BYPASS 0x0080
+#define SUNI1x10GEXP_BITMSK_RXXG_BYPASS 0x0040
+#define SUNI1x10GEXP_BITMSK_TXXG_BYPASS 0x0020
+#define SUNI1x10GEXP_BITMSK_SOP_PAD_EN 0x0010
+#define SUNI1x10GEXP_BITMSK_LOS_INV 0x0002
+#define SUNI1x10GEXP_BITMSK_OVERRIDE_LOS 0x0001
+
/*----------------------------------------------------------------------------
* Register 0x0004: S/UNI-1x10GE-XP Device Status
* Bit 9 TOP_SXRA_EXPIRED
@@ -141,7 +603,10 @@
* Bit 0 TOP_PL4_OUT_ROOL
*----------------------------------------------------------------------------*/
#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200
+#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY 0x0100
+#define SUNI1x10GEXP_BITMSK_TOP_DTRB 0x0080
#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040
+#define SUNI1x10GEXP_BITMSK_TOP_PAUSED 0x0020
#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010
#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008
#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004
@@ -149,12 +614,219 @@
#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001
/*----------------------------------------------------------------------------
+ * Register 0x0005: Global Performance Update and Clock Monitors
+ * Bit 15 TIP
+ * Bit 8 XAUI_REF_CLKA
+ * Bit 7 RXLANE3CLKA
+ * Bit 6 RXLANE2CLKA
+ * Bit 5 RXLANE1CLKA
+ * Bit 4 RXLANE0CLKA
+ * Bit 3 CSUCLKA
+ * Bit 2 TDCLKA
+ * Bit 1 RSCLKA
+ * Bit 0 RDCLKA
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TIP 0x8000
+#define SUNI1x10GEXP_BITMSK_XAUI_REF_CLKA 0x0100
+#define SUNI1x10GEXP_BITMSK_RXLANE3CLKA 0x0080
+#define SUNI1x10GEXP_BITMSK_RXLANE2CLKA 0x0040
+#define SUNI1x10GEXP_BITMSK_RXLANE1CLKA 0x0020
+#define SUNI1x10GEXP_BITMSK_RXLANE0CLKA 0x0010
+#define SUNI1x10GEXP_BITMSK_CSUCLKA 0x0008
+#define SUNI1x10GEXP_BITMSK_TDCLKA 0x0004
+#define SUNI1x10GEXP_BITMSK_RSCLKA 0x0002
+#define SUNI1x10GEXP_BITMSK_RDCLKA 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0006: MDIO Command
+ * Bit 4 MDIO_RDINC
+ * Bit 3 MDIO_RSTAT
+ * Bit 2 MDIO_LCTLD
+ * Bit 1 MDIO_LCTLA
+ * Bit 0 MDIO_SPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_RDINC 0x0010
+#define SUNI1x10GEXP_BITMSK_MDIO_RSTAT 0x0008
+#define SUNI1x10GEXP_BITMSK_MDIO_LCTLD 0x0004
+#define SUNI1x10GEXP_BITMSK_MDIO_LCTLA 0x0002
+#define SUNI1x10GEXP_BITMSK_MDIO_SPRE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0007: MDIO Interrupt Enable
+ * Bit 0 MDIO_BUSY_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_BUSY_EN 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0008: MDIO Interrupt Status
+ * Bit 0 MDIO_BUSYI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_BUSYI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0009: MMD PHY Address
+ * Bit 12-8 MDIO_DEVADR
+ * Bit 4-0 MDIO_PRTADR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_DEVADR 0x1F00
+#define SUNI1x10GEXP_BITOFF_MDIO_DEVADR 8
+#define SUNI1x10GEXP_BITMSK_MDIO_PRTADR 0x001F
+#define SUNI1x10GEXP_BITOFF_MDIO_PRTADR 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x000C: OAM Interface Control
+ * Bit 6 MDO_OD_ENB
+ * Bit 5 MDI_INV
+ * Bit 4 MDI_SEL
+ * Bit 3 RXOAMEN
+ * Bit 2 RXOAMCLKEN
+ * Bit 1 TXOAMEN
+ * Bit 0 TXOAMCLKEN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDO_OD_ENB 0x0040
+#define SUNI1x10GEXP_BITMSK_MDI_INV 0x0020
+#define SUNI1x10GEXP_BITMSK_MDI_SEL 0x0010
+#define SUNI1x10GEXP_BITMSK_RXOAMEN 0x0008
+#define SUNI1x10GEXP_BITMSK_RXOAMCLKEN 0x0004
+#define SUNI1x10GEXP_BITMSK_TXOAMEN 0x0002
+#define SUNI1x10GEXP_BITMSK_TXOAMCLKEN 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x000D: S/UNI-1x10GE-XP Master Interrupt Status
+ * Bit 15 TOP_PL4IO_INT
+ * Bit 14 TOP_IRAM_INT
+ * Bit 13 TOP_ERAM_INT
+ * Bit 12 TOP_XAUI_INT
+ * Bit 11 TOP_MSTAT_INT
+ * Bit 10 TOP_RXXG_INT
+ * Bit 9 TOP_TXXG_INT
+ * Bit 8 TOP_XRF_INT
+ * Bit 7 TOP_XTEF_INT
+ * Bit 6 TOP_MDIO_BUSY_INT
+ * Bit 5 TOP_RXOAM_INT
+ * Bit 4 TOP_TXOAM_INT
+ * Bit 3 TOP_IFLX_INT
+ * Bit 2 TOP_EFLX_INT
+ * Bit 1 TOP_PL4ODP_INT
+ * Bit 0 TOP_PL4IDU_INT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_PL4IO_INT 0x8000
+#define SUNI1x10GEXP_BITMSK_TOP_IRAM_INT 0x4000
+#define SUNI1x10GEXP_BITMSK_TOP_ERAM_INT 0x2000
+#define SUNI1x10GEXP_BITMSK_TOP_XAUI_INT 0x1000
+#define SUNI1x10GEXP_BITMSK_TOP_MSTAT_INT 0x0800
+#define SUNI1x10GEXP_BITMSK_TOP_RXXG_INT 0x0400
+#define SUNI1x10GEXP_BITMSK_TOP_TXXG_INT 0x0200
+#define SUNI1x10GEXP_BITMSK_TOP_XRF_INT 0x0100
+#define SUNI1x10GEXP_BITMSK_TOP_XTEF_INT 0x0080
+#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY_INT 0x0040
+#define SUNI1x10GEXP_BITMSK_TOP_RXOAM_INT 0x0020
+#define SUNI1x10GEXP_BITMSK_TOP_TXOAM_INT 0x0010
+#define SUNI1x10GEXP_BITMSK_TOP_IFLX_INT 0x0008
+#define SUNI1x10GEXP_BITMSK_TOP_EFLX_INT 0x0004
+#define SUNI1x10GEXP_BITMSK_TOP_PL4ODP_INT 0x0002
+#define SUNI1x10GEXP_BITMSK_TOP_PL4IDU_INT 0x0001
+
+/*----------------------------------------------------------------------------
* Register 0x000E:PM3393 Global interrupt enable
* Bit 15 TOP_INTE
*----------------------------------------------------------------------------*/
#define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000
/*----------------------------------------------------------------------------
+ * Register 0x0010: XTEF Miscellaneous Control
+ * Bit 7 RF_VAL
+ * Bit 6 RF_OVERRIDE
+ * Bit 5 LF_VAL
+ * Bit 4 LF_OVERRIDE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RF_VAL 0x0080
+#define SUNI1x10GEXP_BITMSK_RF_OVERRIDE 0x0040
+#define SUNI1x10GEXP_BITMSK_LF_VAL 0x0020
+#define SUNI1x10GEXP_BITMSK_LF_OVERRIDE 0x0010
+#define SUNI1x10GEXP_BITMSK_LFRF_OVERRIDE_VAL 0x00F0
+
+/*----------------------------------------------------------------------------
+ * Register 0x0011: XRF Miscellaneous Control
+ * Bit 6-4 EN_IDLE_REP
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EN_IDLE_REP 0x0070
+
+/*----------------------------------------------------------------------------
+ * Register 0x0100: SERDES 3125 Configuration Register 1
+ * Bit 10 RXEQB_3
+ * Bit 8 RXEQB_2
+ * Bit 6 RXEQB_1
+ * Bit 4 RXEQB_0
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXEQB 0x0FF0
+#define SUNI1x10GEXP_BITOFF_RXEQB_3 10
+#define SUNI1x10GEXP_BITOFF_RXEQB_2 8
+#define SUNI1x10GEXP_BITOFF_RXEQB_1 6
+#define SUNI1x10GEXP_BITOFF_RXEQB_0 4
+
+/*----------------------------------------------------------------------------
+ * Register 0x0101: SERDES 3125 Configuration Register 2
+ * Bit 12 YSEL
+ * Bit 7 PRE_EMPH_3
+ * Bit 6 PRE_EMPH_2
+ * Bit 5 PRE_EMPH_1
+ * Bit 4 PRE_EMPH_0
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_YSEL 0x1000
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH 0x00F0
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_3 0x0080
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_2 0x0040
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_1 0x0020
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_0 0x0010
+
+/*----------------------------------------------------------------------------
+ * Register 0x0102: SERDES 3125 Interrupt Enable Register
+ * Bit 3 LASIE
+ * Bit 2 SPLL_RAE
+ * Bit 1 MPLL_RAE
+ * Bit 0 PLL_LOCKE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LASIE 0x0008
+#define SUNI1x10GEXP_BITMSK_SPLL_RAE 0x0004
+#define SUNI1x10GEXP_BITMSK_MPLL_RAE 0x0002
+#define SUNI1x10GEXP_BITMSK_PLL_LOCKE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0103: SERDES 3125 Interrupt Visibility Register
+ * Bit 3 LASIV
+ * Bit 2 SPLL_RAV
+ * Bit 1 MPLL_RAV
+ * Bit 0 PLL_LOCKV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LASIV 0x0008
+#define SUNI1x10GEXP_BITMSK_SPLL_RAV 0x0004
+#define SUNI1x10GEXP_BITMSK_MPLL_RAV 0x0002
+#define SUNI1x10GEXP_BITMSK_PLL_LOCKV 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0104: SERDES 3125 Interrupt Status Register
+ * Bit 3 LASII
+ * Bit 2 SPLL_RAI
+ * Bit 1 MPLL_RAI
+ * Bit 0 PLL_LOCKI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LASII 0x0008
+#define SUNI1x10GEXP_BITMSK_SPLL_RAI 0x0004
+#define SUNI1x10GEXP_BITMSK_MPLL_RAI 0x0002
+#define SUNI1x10GEXP_BITMSK_PLL_LOCKI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0107: SERDES 3125 Test Configuration
+ * Bit 12 DUALTX
+ * Bit 10 HC_1
+ * Bit 9 HC_0
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_DUALTX 0x1000
+#define SUNI1x10GEXP_BITMSK_HC 0x0600
+#define SUNI1x10GEXP_BITOFF_HC_0 9
+
+/*----------------------------------------------------------------------------
* Register 0x2040: RXXG Configuration 1
* Bit 15 RXXG_RXEN
* Bit 14 RXXG_ROCF
@@ -168,11 +840,84 @@
* Bit 2-0 RXXG_MIFG
*----------------------------------------------------------------------------*/
#define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_ROCF 0x4000
+#define SUNI1x10GEXP_BITMSK_RXXG_PAD_STRIP 0x2000
#define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_LONGP 0x0200
+#define SUNI1x10GEXP_BITMSK_RXXG_PARF 0x0100
#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080
+#define SUNI1x10GEXP_BITMSK_RXXG_PASS_CTRL 0x0020
#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008
/*----------------------------------------------------------------------------
+ * Register 0x02041: RXXG Configuration 2
+ * Bit 7-0 RXXG_HDRSIZE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_HDRSIZE 0x00FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x2042: RXXG Configuration 3
+ * Bit 15 RXXG_MIN_LERRE
+ * Bit 14 RXXG_MAX_LERRE
+ * Bit 12 RXXG_LINE_ERRE
+ * Bit 10 RXXG_RX_OVRE
+ * Bit 9 RXXG_ADR_FILTERE
+ * Bit 8 RXXG_ERR_FILTERE
+ * Bit 5 RXXG_PRMB_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRE 0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRE 0x4000
+#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRE 0x1000
+#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRE 0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERE 0x0200
+#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERRE 0x0100
+#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE 0x0020
+
+/*----------------------------------------------------------------------------
+ * Register 0x2043: RXXG Interrupt
+ * Bit 15 RXXG_MIN_LERRI
+ * Bit 14 RXXG_MAX_LERRI
+ * Bit 12 RXXG_LINE_ERRI
+ * Bit 10 RXXG_RX_OVRI
+ * Bit 9 RXXG_ADR_FILTERI
+ * Bit 8 RXXG_ERR_FILTERI
+ * Bit 5 RXXG_PRMB_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRI 0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRI 0x4000
+#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRI 0x1000
+#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRI 0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERI 0x0200
+#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERI 0x0100
+#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE 0x0020
+
+/*----------------------------------------------------------------------------
+ * Register 0x2049: RXXG Receive FIFO Threshold
+ * Bit 2-0 RXXG_CUT_THRU
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_CUT_THRU 0x0007
+#define SUNI1x10GEXP_BITOFF_RXXG_CUT_THRU 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2062H - 0x2069: RXXG Exact Match VID
+ * Bit 11-0 RXXG_VID_MATCH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_VID_MATCH 0x0FFF
+#define SUNI1x10GEXP_BITOFF_RXXG_VID_MATCH 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x206EH - 0x206F: RXXG Address Filter Control
+ * Bit 3 RXXG_FORWARD_ENABLE
+ * Bit 2 RXXG_VLAN_ENABLE
+ * Bit 1 RXXG_SRC_ADDR
+ * Bit 0 RXXG_MATCH_ENABLE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_FORWARD_ENABLE 0x0008
+#define SUNI1x10GEXP_BITMSK_RXXG_VLAN_ENABLE 0x0004
+#define SUNI1x10GEXP_BITMSK_RXXG_SRC_ADDR 0x0002
+#define SUNI1x10GEXP_BITMSK_RXXG_MATCH_ENABLE 0x0001
+
+/*----------------------------------------------------------------------------
* Register 0x2070: RXXG Address Filter Control 2
* Bit 1 RXXG_PMODE
* Bit 0 RXXG_MHASH_EN
@@ -181,15 +926,446 @@
#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001
/*----------------------------------------------------------------------------
+ * Register 0x2081: XRF Control Register 2
+ * Bit 6 EN_PKT_GEN
+ * Bit 4-2 PATT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EN_PKT_GEN 0x0040
+#define SUNI1x10GEXP_BITMSK_PATT 0x001C
+#define SUNI1x10GEXP_BITOFF_PATT 2
+
+/*----------------------------------------------------------------------------
+ * Register 0x2088: XRF Interrupt Enable
+ * Bit 12-9 LANE_HICERE
+ * Bit 8-5 HS_SD_LANEE
+ * Bit 4 ALIGN_STATUS_ERRE
+ * Bit 3-0 LANE_SYNC_STAT_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_HICERE 0x1E00
+#define SUNI1x10GEXP_BITOFF_LANE_HICERE 9
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANEE 0x01E0
+#define SUNI1x10GEXP_BITOFF_HS_SD_LANEE 5
+#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRE 0x0010
+#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRE 0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRE 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2089: XRF Interrupt Status
+ * Bit 12-9 LANE_HICERI
+ * Bit 8-5 HS_SD_LANEI
+ * Bit 4 ALIGN_STATUS_ERRI
+ * Bit 3-0 LANE_SYNC_STAT_ERRI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_HICERI 0x1E00
+#define SUNI1x10GEXP_BITOFF_LANE_HICERI 9
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANEI 0x01E0
+#define SUNI1x10GEXP_BITOFF_HS_SD_LANEI 5
+#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRI 0x0010
+#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRI 0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRI 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x208A: XRF Error Status
+ * Bit 8-5 HS_SD_LANE
+ * Bit 4 ALIGN_STATUS_ERR
+ * Bit 3-0 LANE_SYNC_STAT_ERR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE3 0x0100
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE2 0x0080
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE1 0x0040
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE0 0x0020
+#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERR 0x0010
+#define SUNI1x10GEXP_BITMSK_LANE3_SYNC_STAT_ERR 0x0008
+#define SUNI1x10GEXP_BITMSK_LANE2_SYNC_STAT_ERR 0x0004
+#define SUNI1x10GEXP_BITMSK_LANE1_SYNC_STAT_ERR 0x0002
+#define SUNI1x10GEXP_BITMSK_LANE0_SYNC_STAT_ERR 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x208B: XRF Diagnostic Interrupt Enable
+ * Bit 7-4 LANE_OVERRUNE
+ * Bit 3-0 LANE_UNDERRUNE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNE 0x00F0
+#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNE 4
+#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNE 0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNE 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x208C: XRF Diagnostic Interrupt Status
+ * Bit 7-4 LANE_OVERRUNI
+ * Bit 3-0 LANE_UNDERRUNI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNI 0x00F0
+#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNI 4
+#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNI 0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNI 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C0: RXOAM Configuration
+ * Bit 15 RXOAM_BUSY
+ * Bit 14-12 RXOAM_F2_SEL
+ * Bit 10-8 RXOAM_F1_SEL
+ * Bit 7-6 RXOAM_FILTER_CTRL
+ * Bit 5-0 RXOAM_PX_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_BUSY 0x8000
+#define SUNI1x10GEXP_BITMSK_RXOAM_F2_SEL 0x7000
+#define SUNI1x10GEXP_BITOFF_RXOAM_F2_SEL 12
+#define SUNI1x10GEXP_BITMSK_RXOAM_F1_SEL 0x0700
+#define SUNI1x10GEXP_BITOFF_RXOAM_F1_SEL 8
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_CTRL 0x00C0
+#define SUNI1x10GEXP_BITOFF_RXOAM_FILTER_CTRL 6
+#define SUNI1x10GEXP_BITMSK_RXOAM_PX_EN 0x003F
+#define SUNI1x10GEXP_BITOFF_RXOAM_PX_EN 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C1,0x20C2: RXOAM Filter Configuration
+ * Bit 15-8 RXOAM_FX_MASK
+ * Bit 7-0 RXOAM_FX_VAL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FX_MASK 0xFF00
+#define SUNI1x10GEXP_BITOFF_RXOAM_FX_MASK 8
+#define SUNI1x10GEXP_BITMSK_RXOAM_FX_VAL 0x00FF
+#define SUNI1x10GEXP_BITOFF_RXOAM_FX_VAl 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C3: RXOAM Configuration Register 2
+ * Bit 13 RXOAM_REC_BYTE_VAL
+ * Bit 11-10 RXOAM_BYPASS_MODE
+ * Bit 5-0 RXOAM_PX_CLEAR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_REC_BYTE_VAL 0x2000
+#define SUNI1x10GEXP_BITMSK_RXOAM_BYPASS_MODE 0x0C00
+#define SUNI1x10GEXP_BITOFF_RXOAM_BYPASS_MODE 10
+#define SUNI1x10GEXP_BITMSK_RXOAM_PX_CLEAR 0x003F
+#define SUNI1x10GEXP_BITOFF_RXOAM_PX_CLEAR 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C4: RXOAM HEC Configuration
+ * Bit 15-8 RXOAM_COSET
+ * Bit 2 RXOAM_HEC_ERR_PKT
+ * Bit 0 RXOAM_HEC_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_COSET 0xFF00
+#define SUNI1x10GEXP_BITOFF_RXOAM_COSET 8
+#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_ERR_PKT 0x0004
+#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_EN 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C7: RXOAM Interrupt Enable
+ * Bit 10 RXOAM_FILTER_THRSHE
+ * Bit 9 RXOAM_OAM_ERRE
+ * Bit 8 RXOAM_HECE_THRSHE
+ * Bit 7 RXOAM_SOPE
+ * Bit 6 RXOAM_RFE
+ * Bit 5 RXOAM_LFE
+ * Bit 4 RXOAM_DV_ERRE
+ * Bit 3 RXOAM_DATA_INVALIDE
+ * Bit 2 RXOAM_FILTER_DROPE
+ * Bit 1 RXOAM_HECE
+ * Bit 0 RXOAM_OFLE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHE 0x0400
+#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRE 0x0200
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHE 0x0100
+#define SUNI1x10GEXP_BITMSK_RXOAM_SOPE 0x0080
+#define SUNI1x10GEXP_BITMSK_RXOAM_RFE 0x0040
+#define SUNI1x10GEXP_BITMSK_RXOAM_LFE 0x0020
+#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRE 0x0010
+#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDE 0x0008
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPE 0x0004
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE 0x0002
+#define SUNI1x10GEXP_BITMSK_RXOAM_OFLE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C8: RXOAM Interrupt Status
+ * Bit 10 RXOAM_FILTER_THRSHI
+ * Bit 9 RXOAM_OAM_ERRI
+ * Bit 8 RXOAM_HECE_THRSHI
+ * Bit 7 RXOAM_SOPI
+ * Bit 6 RXOAM_RFI
+ * Bit 5 RXOAM_LFI
+ * Bit 4 RXOAM_DV_ERRI
+ * Bit 3 RXOAM_DATA_INVALIDI
+ * Bit 2 RXOAM_FILTER_DROPI
+ * Bit 1 RXOAM_HECI
+ * Bit 0 RXOAM_OFLI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHI 0x0400
+#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRI 0x0200
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHI 0x0100
+#define SUNI1x10GEXP_BITMSK_RXOAM_SOPI 0x0080
+#define SUNI1x10GEXP_BITMSK_RXOAM_RFI 0x0040
+#define SUNI1x10GEXP_BITMSK_RXOAM_LFI 0x0020
+#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRI 0x0010
+#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDI 0x0008
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPI 0x0004
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECI 0x0002
+#define SUNI1x10GEXP_BITMSK_RXOAM_OFLI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C9: RXOAM Status
+ * Bit 10 RXOAM_FILTER_THRSHV
+ * Bit 8 RXOAM_HECE_THRSHV
+ * Bit 6 RXOAM_RFV
+ * Bit 5 RXOAM_LFV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHV 0x0400
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHV 0x0100
+#define SUNI1x10GEXP_BITMSK_RXOAM_RFV 0x0040
+#define SUNI1x10GEXP_BITMSK_RXOAM_LFV 0x0020
+
+/*----------------------------------------------------------------------------
* Register 0x2100: MSTAT Control
* Bit 2 MSTAT_WRITE
* Bit 1 MSTAT_CLEAR
* Bit 0 MSTAT_SNAP
*----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE 0x0004
#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002
#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001
/*----------------------------------------------------------------------------
+ * Register 0x2109: MSTAT Counter Write Address
+ * Bit 5-0 MSTAT_WRITE_ADDRESS
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE_ADDRESS 0x003F
+#define SUNI1x10GEXP_BITOFF_MSTAT_WRITE_ADDRESS 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2200: IFLX Global Configuration Register
+ * Bit 15 IFLX_IRCU_ENABLE
+ * Bit 14 IFLX_IDSWT_ENABLE
+ * Bit 13-0 IFLX_IFD_CNT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_IRCU_ENABLE 0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_IDSWT_ENABLE 0x4000
+#define SUNI1x10GEXP_BITMSK_IFLX_IFD_CNT 0x3FFF
+#define SUNI1x10GEXP_BITOFF_IFLX_IFD_CNT 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2209: IFLX FIFO Overflow Enable
+ * Bit 0 IFLX_OVFE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_OVFE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x220A: IFLX FIFO Overflow Interrupt
+ * Bit 0 IFLX_OVFI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_OVFI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x220D: IFLX Indirect Channel Address
+ * Bit 15 IFLX_BUSY
+ * Bit 14 IFLX_RWB
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_BUSY 0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_RWB 0x4000
+
+/*----------------------------------------------------------------------------
+ * Register 0x220E: IFLX Indirect Logical FIFO Low Limit & Provision
+ * Bit 9-0 IFLX_LOLIM
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_LOLIM 0x03FF
+#define SUNI1x10GEXP_BITOFF_IFLX_LOLIM 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x220F: IFLX Indirect Logical FIFO High Limit
+ * Bit 9-0 IFLX_HILIM
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_HILIM 0x03FF
+#define SUNI1x10GEXP_BITOFF_IFLX_HILIM 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2210: IFLX Indirect Full/Almost Full Status & Limit
+ * Bit 15 IFLX_FULL
+ * Bit 14 IFLX_AFULL
+ * Bit 13-0 IFLX_AFTH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_FULL 0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_AFULL 0x4000
+#define SUNI1x10GEXP_BITMSK_IFLX_AFTH 0x3FFF
+#define SUNI1x10GEXP_BITOFF_IFLX_AFTH 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2211: IFLX Indirect Empty/Almost Empty Status & Limit
+ * Bit 15 IFLX_EMPTY
+ * Bit 14 IFLX_AEMPTY
+ * Bit 13-0 IFLX_AETH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_EMPTY 0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_AEMPTY 0x4000
+#define SUNI1x10GEXP_BITMSK_IFLX_AETH 0x3FFF
+#define SUNI1x10GEXP_BITOFF_IFLX_AETH 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2240: PL4MOS Configuration Register
+ * Bit 3 PL4MOS_RE_INIT
+ * Bit 2 PL4MOS_EN
+ * Bit 1 PL4MOS_NO_STATUS
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_RE_INIT 0x0008
+#define SUNI1x10GEXP_BITMSK_PL4MOS_EN 0x0004
+#define SUNI1x10GEXP_BITMSK_PL4MOS_NO_STATUS 0x0002
+
+/*----------------------------------------------------------------------------
+ * Register 0x2243: PL4MOS MaxBurst1 Register
+ * Bit 11-0 PL4MOS_MAX_BURST1
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST1 0x0FFF
+#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST1 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2244: PL4MOS MaxBurst2 Register
+ * Bit 11-0 PL4MOS_MAX_BURST2
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST2 0x0FFF
+#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST2 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2245: PL4MOS Transfer Size Register
+ * Bit 7-0 PL4MOS_MAX_TRANSFER
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_TRANSFER 0x00FF
+#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_TRANSFER 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2280: PL4ODP Configuration
+ * Bit 15-12 PL4ODP_REPEAT_T
+ * Bit 8 PL4ODP_SOP_RULE
+ * Bit 1 PL4ODP_EN_PORTS
+ * Bit 0 PL4ODP_EN_DFWD
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4ODP_REPEAT_T 0xF000
+#define SUNI1x10GEXP_BITOFF_PL4ODP_REPEAT_T 12
+#define SUNI1x10GEXP_BITMSK_PL4ODP_SOP_RULE 0x0100
+#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_PORTS 0x0002
+#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_DFWD 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2282: PL4ODP Interrupt Mask
+ * Bit 0 PL4ODP_OUT_DISE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISE 0x0001
+
+
+
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBE 0x0080
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPE 0x0040
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPE 0x0008
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPE 0x0004
+#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRE 0x0002
+
+
+/*----------------------------------------------------------------------------
+ * Register 0x2283: PL4ODP Interrupt
+ * Bit 0 PL4ODP_OUT_DISI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISI 0x0001
+
+
+
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBI 0x0080
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPI 0x0040
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPI 0x0008
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPI 0x0004
+#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRI 0x0002
+
+/*----------------------------------------------------------------------------
+ * Register 0x2300: PL4IO Lock Detect Status
+ * Bit 15 PL4IO_OUT_ROOLV
+ * Bit 12 PL4IO_IS_ROOLV
+ * Bit 11 PL4IO_DIP2_ERRV
+ * Bit 8 PL4IO_ID_ROOLV
+ * Bit 4 PL4IO_IS_DOOLV
+ * Bit 0 PL4IO_ID_DOOLV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLV 0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLV 0x1000
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRV 0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLV 0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLV 0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLV 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2301: PL4IO Lock Detect Change
+ * Bit 15 PL4IO_OUT_ROOLI
+ * Bit 12 PL4IO_IS_ROOLI
+ * Bit 11 PL4IO_DIP2_ERRI
+ * Bit 8 PL4IO_ID_ROOLI
+ * Bit 4 PL4IO_IS_DOOLI
+ * Bit 0 PL4IO_ID_DOOLI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLI 0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLI 0x1000
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRI 0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLI 0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLI 0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2302: PL4IO Lock Detect Mask
+ * Bit 15 PL4IO_OUT_ROOLE
+ * Bit 12 PL4IO_IS_ROOLE
+ * Bit 11 PL4IO_DIP2_ERRE
+ * Bit 8 PL4IO_ID_ROOLE
+ * Bit 4 PL4IO_IS_DOOLE
+ * Bit 0 PL4IO_ID_DOOLE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLE 0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLE 0x1000
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRE 0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLE 0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLE 0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2303: PL4IO Lock Detect Limits
+ * Bit 15-8 PL4IO_REF_LIMIT
+ * Bit 7-0 PL4IO_TRAN_LIMIT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_REF_LIMIT 0xFF00
+#define SUNI1x10GEXP_BITOFF_PL4IO_REF_LIMIT 8
+#define SUNI1x10GEXP_BITMSK_PL4IO_TRAN_LIMIT 0x00FF
+#define SUNI1x10GEXP_BITOFF_PL4IO_TRAN_LIMIT 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2304: PL4IO Calendar Repetitions
+ * Bit 15-8 PL4IO_IN_MUL
+ * Bit 7-0 PL4IO_OUT_MUL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_IN_MUL 0xFF00
+#define SUNI1x10GEXP_BITOFF_PL4IO_IN_MUL 8
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_MUL 0x00FF
+#define SUNI1x10GEXP_BITOFF_PL4IO_OUT_MUL 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2305: PL4IO Configuration
+ * Bit 15 PL4IO_DIP2_ERR_CHK
+ * Bit 11 PL4IO_ODAT_DIS
+ * Bit 10 PL4IO_TRAIN_DIS
+ * Bit 9 PL4IO_OSTAT_DIS
+ * Bit 8 PL4IO_ISTAT_DIS
+ * Bit 7 PL4IO_NO_ISTAT
+ * Bit 6 PL4IO_STAT_OUTSEL
+ * Bit 5 PL4IO_INSEL
+ * Bit 4 PL4IO_DLSEL
+ * Bit 1-0 PL4IO_OUTSEL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERR_CHK 0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_ODAT_DIS 0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_TRAIN_DIS 0x0400
+#define SUNI1x10GEXP_BITMSK_PL4IO_OSTAT_DIS 0x0200
+#define SUNI1x10GEXP_BITMSK_PL4IO_ISTAT_DIS 0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_NO_ISTAT 0x0080
+#define SUNI1x10GEXP_BITMSK_PL4IO_STAT_OUTSEL 0x0040
+#define SUNI1x10GEXP_BITMSK_PL4IO_INSEL 0x0020
+#define SUNI1x10GEXP_BITMSK_PL4IO_DLSEL 0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUTSEL 0x0003
+#define SUNI1x10GEXP_BITOFF_PL4IO_OUTSEL 0
+
+/*----------------------------------------------------------------------------
* Register 0x3040: TXXG Configuration Register 1
* Bit 15 TXXG_TXEN0
* Bit 13 TXXG_HOSTPAUSE
@@ -202,12 +1378,266 @@
* Bit 0 TXXG_SPRE
*----------------------------------------------------------------------------*/
#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000
+#define SUNI1x10GEXP_BITMSK_TXXG_HOSTPAUSE 0x2000
+#define SUNI1x10GEXP_BITMSK_TXXG_IPGT 0x1F80
#define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7
#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020
#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010
#define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008
#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004
#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002
+#define SUNI1x10GEXP_BITMSK_TXXG_SPRE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3041: TXXG Configuration Register 2
+ * Bit 7-0 TXXG_HDRSIZE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_HDRSIZE 0x00FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x3042: TXXG Configuration Register 3
+ * Bit 15 TXXG_FIFO_ERRE
+ * Bit 14 TXXG_FIFO_UDRE
+ * Bit 13 TXXG_MAX_LERRE
+ * Bit 12 TXXG_MIN_LERRE
+ * Bit 11 TXXG_XFERE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRE 0x8000
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRE 0x4000
+#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRE 0x2000
+#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRE 0x1000
+#define SUNI1x10GEXP_BITMSK_TXXG_XFERE 0x0800
+
+/*----------------------------------------------------------------------------
+ * Register 0x3043: TXXG Interrupt
+ * Bit 15 TXXG_FIFO_ERRI
+ * Bit 14 TXXG_FIFO_UDRI
+ * Bit 13 TXXG_MAX_LERRI
+ * Bit 12 TXXG_MIN_LERRI
+ * Bit 11 TXXG_XFERI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRI 0x8000
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRI 0x4000
+#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRI 0x2000
+#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRI 0x1000
+#define SUNI1x10GEXP_BITMSK_TXXG_XFERI 0x0800
+
+/*----------------------------------------------------------------------------
+ * Register 0x3044: TXXG Status Register
+ * Bit 1 TXXG_TXACTIVE
+ * Bit 0 TXXG_PAUSED
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TXACTIVE 0x0002
+#define SUNI1x10GEXP_BITMSK_TXXG_PAUSED 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3046: TXXG TX_MINFR - Transmit Min Frame Size Register
+ * Bit 7-0 TXXG_TX_MINFR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TX_MINFR 0x00FF
+#define SUNI1x10GEXP_BITOFF_TXXG_TX_MINFR 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3052: TXXG Pause Quantum Value Configuration Register
+ * Bit 7-0 TXXG_FC_PAUSE_QNTM
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_FC_PAUSE_QNTM 0x00FF
+#define SUNI1x10GEXP_BITOFF_TXXG_FC_PAUSE_QNTM 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3080: XTEF Control
+ * Bit 3-0 XTEF_FORCE_PARITY_ERR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_FORCE_PARITY_ERR 0x000F
+#define SUNI1x10GEXP_BITOFF_XTEF_FORCE_PARITY_ERR 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3084: XTEF Interrupt Event Register
+ * Bit 0 XTEF_LOST_SYNCI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3085: XTEF Interrupt Enable Register
+ * Bit 0 XTEF_LOST_SYNCE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3086: XTEF Visibility Register
+ * Bit 0 XTEF_LOST_SYNCV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCV 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C0: TXOAM OAM Configuration
+ * Bit 15 TXOAM_HEC_EN
+ * Bit 14 TXOAM_EMPTYCODE_EN
+ * Bit 13 TXOAM_FORCE_IDLE
+ * Bit 12 TXOAM_IGNORE_IDLE
+ * Bit 11-6 TXOAM_PX_OVERWRITE
+ * Bit 5-0 TXOAM_PX_SEL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_HEC_EN 0x8000
+#define SUNI1x10GEXP_BITMSK_TXOAM_EMPTYCODE_EN 0x4000
+#define SUNI1x10GEXP_BITMSK_TXOAM_FORCE_IDLE 0x2000
+#define SUNI1x10GEXP_BITMSK_TXOAM_IGNORE_IDLE 0x1000
+#define SUNI1x10GEXP_BITMSK_TXOAM_PX_OVERWRITE 0x0FC0
+#define SUNI1x10GEXP_BITOFF_TXOAM_PX_OVERWRITE 6
+#define SUNI1x10GEXP_BITMSK_TXOAM_PX_SEL 0x003F
+#define SUNI1x10GEXP_BITOFF_TXOAM_PX_SEL 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C1: TXOAM Mini-Packet Rate Configuration
+ * Bit 15 TXOAM_MINIDIS
+ * Bit 14 TXOAM_BUSY
+ * Bit 13 TXOAM_TRANS_EN
+ * Bit 10-0 TXOAM_MINIRATE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIDIS 0x8000
+#define SUNI1x10GEXP_BITMSK_TXOAM_BUSY 0x4000
+#define SUNI1x10GEXP_BITMSK_TXOAM_TRANS_EN 0x2000
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIRATE 0x07FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C2: TXOAM Mini-Packet Gap and FIFO Configuration
+ * Bit 13-10 TXOAM_FTHRESH
+ * Bit 9-6 TXOAM_MINIPOST
+ * Bit 5-0 TXOAM_MINIPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_FTHRESH 0x3C00
+#define SUNI1x10GEXP_BITOFF_TXOAM_FTHRESH 10
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPOST 0x03C0
+#define SUNI1x10GEXP_BITOFF_TXOAM_MINIPOST 6
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPRE 0x003F
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C6: TXOAM Interrupt Enable
+ * Bit 2 TXOAM_SOP_ERRE
+ * Bit 1 TXOAM_OFLE
+ * Bit 0 TXOAM_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRE 0x0004
+#define SUNI1x10GEXP_BITMSK_TXOAM_OFLE 0x0002
+#define SUNI1x10GEXP_BITMSK_TXOAM_ERRE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C7: TXOAM Interrupt Status
+ * Bit 2 TXOAM_SOP_ERRI
+ * Bit 1 TXOAM_OFLI
+ * Bit 0 TXOAM_ERRI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRI 0x0004
+#define SUNI1x10GEXP_BITMSK_TXOAM_OFLI 0x0002
+#define SUNI1x10GEXP_BITMSK_TXOAM_ERRI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x30CF: TXOAM Coset
+ * Bit 7-0 TXOAM_COSET
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_COSET 0x00FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x3200: EFLX Global Configuration
+ * Bit 15 EFLX_ERCU_EN
+ * Bit 7 EFLX_EN_EDSWT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_ERCU_EN 0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_EN_EDSWT 0x0080
+
+/*----------------------------------------------------------------------------
+ * Register 0x3201: EFLX ERCU Global Status
+ * Bit 13 EFLX_OVF_ERR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_OVF_ERR 0x2000
+
+/*----------------------------------------------------------------------------
+ * Register 0x3202: EFLX Indirect Channel Address
+ * Bit 15 EFLX_BUSY
+ * Bit 14 EFLX_RDWRB
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_BUSY 0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_RDWRB 0x4000
+
+/*----------------------------------------------------------------------------
+ * Register 0x3203: EFLX Indirect Logical FIFO Low Limit
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_LOLIM 0x03FF
+#define SUNI1x10GEXP_BITOFF_EFLX_LOLIM 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3204: EFLX Indirect Logical FIFO High Limit
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_HILIM 0x03FF
+#define SUNI1x10GEXP_BITOFF_EFLX_HILIM 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3205: EFLX Indirect Full/Almost-Full Status and Limit
+ * Bit 15 EFLX_FULL
+ * Bit 14 EFLX_AFULL
+ * Bit 13-0 EFLX_AFTH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_FULL 0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_AFULL 0x4000
+#define SUNI1x10GEXP_BITMSK_EFLX_AFTH 0x3FFF
+#define SUNI1x10GEXP_BITOFF_EFLX_AFTH 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3206: EFLX Indirect Empty/Almost-Empty Status and Limit
+ * Bit 15 EFLX_EMPTY
+ * Bit 14 EFLX_AEMPTY
+ * Bit 13-0 EFLX_AETH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_EMPTY 0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_AEMPTY 0x4000
+#define SUNI1x10GEXP_BITMSK_EFLX_AETH 0x3FFF
+#define SUNI1x10GEXP_BITOFF_EFLX_AETH 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3207: EFLX Indirect FIFO Cut-Through Threshold
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_CUT_THRU 0x3FFF
+#define SUNI1x10GEXP_BITOFF_EFLX_CUT_THRU 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x320C: EFLX FIFO Overflow Error Enable
+ * Bit 0 EFLX_OVFE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_OVFE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x320D: EFLX FIFO Overflow Error Indication
+ * Bit 0 EFLX_OVFI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_OVFI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3210: EFLX Channel Provision
+ * Bit 0 EFLX_PROV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_PROV 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3280: PL4IDU Configuration
+ * Bit 2 PL4IDU_SYNCH_ON_TRAIN
+ * Bit 1 PL4IDU_EN_PORTS
+ * Bit 0 PL4IDU_EN_DFWD
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IDU_SYNCH_ON_TRAIN 0x0004
+#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_PORTS 0x0002
+#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_DFWD 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3282: PL4IDU Interrupt Mask
+ * Bit 1 PL4IDU_DIP4E
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4E 0x0002
+
+/*----------------------------------------------------------------------------
+ * Register 0x3283: PL4IDU Interrupt
+ * Bit 1 PL4IDU_DIP4I
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4I 0x0002
#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
diff --git a/drivers/net/chelsio/tp.c b/drivers/net/chelsio/tp.c
new file mode 100644
index 000000000000..0ca0b6e19e43
--- /dev/null
+++ b/drivers/net/chelsio/tp.c
@@ -0,0 +1,178 @@
+/* $Date: 2006/02/07 04:21:54 $ $RCSfile: tp.c,v $ $Revision: 1.73 $ */
+#include "common.h"
+#include "regs.h"
+#include "tp.h"
+#ifdef CONFIG_CHELSIO_T1_1G
+#include "fpga_defs.h"
+#endif
+
+struct petp {
+ adapter_t *adapter;
+};
+
+/* Pause deadlock avoidance parameters */
+#define DROP_MSEC 16
+#define DROP_PKTS_CNT 1
+
+static void tp_init(adapter_t * ap, const struct tp_params *p,
+ unsigned int tp_clk)
+{
+ if (t1_is_asic(ap)) {
+ u32 val;
+
+ val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
+ F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
+ if (!p->pm_size)
+ val |= F_OFFLOAD_DISABLE;
+ else
+ val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
+ F_TP_IN_ESPI_CHECK_TCP_CSUM;
+ writel(val, ap->regs + A_TP_IN_CONFIG);
+ writel(F_TP_OUT_CSPI_CPL |
+ F_TP_OUT_ESPI_ETHERNET |
+ F_TP_OUT_ESPI_GENERATE_IP_CSUM |
+ F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
+ ap->regs + A_TP_OUT_CONFIG);
+ writel(V_IP_TTL(64) |
+ F_PATH_MTU /* IP DF bit */ |
+ V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
+ V_SYN_COOKIE_PARAMETER(29),
+ ap->regs + A_TP_GLOBAL_CONFIG);
+ /*
+ * Enable pause frame deadlock prevention.
+ */
+ if (is_T2(ap) && ap->params.nports > 1) {
+ u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
+
+ writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
+ V_DROP_TICKS_CNT(drop_ticks) |
+ V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
+ ap->regs + A_TP_TX_DROP_CONFIG);
+ }
+
+ }
+}
+
+void t1_tp_destroy(struct petp *tp)
+{
+ kfree(tp);
+}
+
+struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p)
+{
+ struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+ if (!tp)
+ return NULL;
+
+ tp->adapter = adapter;
+
+ return tp;
+}
+
+void t1_tp_intr_enable(struct petp *tp)
+{
+ u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE);
+
+#ifdef CONFIG_CHELSIO_T1_1G
+ if (!t1_is_asic(tp->adapter)) {
+ /* FPGA */
+ writel(0xffffffff,
+ tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE);
+ writel(tp_intr | FPGA_PCIX_INTERRUPT_TP,
+ tp->adapter->regs + A_PL_ENABLE);
+ } else
+#endif
+ {
+ /* We don't use any TP interrupts */
+ writel(0, tp->adapter->regs + A_TP_INT_ENABLE);
+ writel(tp_intr | F_PL_INTR_TP,
+ tp->adapter->regs + A_PL_ENABLE);
+ }
+}
+
+void t1_tp_intr_disable(struct petp *tp)
+{
+ u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE);
+
+#ifdef CONFIG_CHELSIO_T1_1G
+ if (!t1_is_asic(tp->adapter)) {
+ /* FPGA */
+ writel(0, tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE);
+ writel(tp_intr & ~FPGA_PCIX_INTERRUPT_TP,
+ tp->adapter->regs + A_PL_ENABLE);
+ } else
+#endif
+ {
+ writel(0, tp->adapter->regs + A_TP_INT_ENABLE);
+ writel(tp_intr & ~F_PL_INTR_TP,
+ tp->adapter->regs + A_PL_ENABLE);
+ }
+}
+
+void t1_tp_intr_clear(struct petp *tp)
+{
+#ifdef CONFIG_CHELSIO_T1_1G
+ if (!t1_is_asic(tp->adapter)) {
+ writel(0xffffffff,
+ tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
+ writel(FPGA_PCIX_INTERRUPT_TP, tp->adapter->regs + A_PL_CAUSE);
+ return;
+ }
+#endif
+ writel(0xffffffff, tp->adapter->regs + A_TP_INT_CAUSE);
+ writel(F_PL_INTR_TP, tp->adapter->regs + A_PL_CAUSE);
+}
+
+int t1_tp_intr_handler(struct petp *tp)
+{
+ u32 cause;
+
+#ifdef CONFIG_CHELSIO_T1_1G
+ /* FPGA doesn't support TP interrupts. */
+ if (!t1_is_asic(tp->adapter))
+ return 1;
+#endif
+
+ cause = readl(tp->adapter->regs + A_TP_INT_CAUSE);
+ writel(cause, tp->adapter->regs + A_TP_INT_CAUSE);
+ return 0;
+}
+
+static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable)
+{
+ u32 val = readl(tp->adapter->regs + A_TP_GLOBAL_CONFIG);
+
+ if (enable)
+ val |= csum_bit;
+ else
+ val &= ~csum_bit;
+ writel(val, tp->adapter->regs + A_TP_GLOBAL_CONFIG);
+}
+
+void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable)
+{
+ set_csum_offload(tp, F_IP_CSUM, enable);
+}
+
+void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable)
+{
+ set_csum_offload(tp, F_UDP_CSUM, enable);
+}
+
+void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable)
+{
+ set_csum_offload(tp, F_TCP_CSUM, enable);
+}
+
+/*
+ * Initialize TP state. tp_params contains initial settings for some TP
+ * parameters, particularly the one-time PM and CM settings.
+ */
+int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk)
+{
+ adapter_t *adapter = tp->adapter;
+
+ tp_init(adapter, p, tp_clk);
+ writel(F_TP_RESET, adapter->regs + A_TP_RESET);
+ return 0;
+}
diff --git a/drivers/net/chelsio/tp.h b/drivers/net/chelsio/tp.h
new file mode 100644
index 000000000000..32fc71e58913
--- /dev/null
+++ b/drivers/net/chelsio/tp.h
@@ -0,0 +1,73 @@
+/* $Date: 2005/03/07 23:59:05 $ $RCSfile: tp.h,v $ $Revision: 1.20 $ */
+#ifndef CHELSIO_TP_H
+#define CHELSIO_TP_H
+
+#include "common.h"
+
+#define TP_MAX_RX_COALESCING_SIZE 16224U
+
+struct tp_mib_statistics {
+
+ /* IP */
+ u32 ipInReceive_hi;
+ u32 ipInReceive_lo;
+ u32 ipInHdrErrors_hi;
+ u32 ipInHdrErrors_lo;
+ u32 ipInAddrErrors_hi;
+ u32 ipInAddrErrors_lo;
+ u32 ipInUnknownProtos_hi;
+ u32 ipInUnknownProtos_lo;
+ u32 ipInDiscards_hi;
+ u32 ipInDiscards_lo;
+ u32 ipInDelivers_hi;
+ u32 ipInDelivers_lo;
+ u32 ipOutRequests_hi;
+ u32 ipOutRequests_lo;
+ u32 ipOutDiscards_hi;
+ u32 ipOutDiscards_lo;
+ u32 ipOutNoRoutes_hi;
+ u32 ipOutNoRoutes_lo;
+ u32 ipReasmTimeout;
+ u32 ipReasmReqds;
+ u32 ipReasmOKs;
+ u32 ipReasmFails;
+
+ u32 reserved[8];
+
+ /* TCP */
+ u32 tcpActiveOpens;
+ u32 tcpPassiveOpens;
+ u32 tcpAttemptFails;
+ u32 tcpEstabResets;
+ u32 tcpOutRsts;
+ u32 tcpCurrEstab;
+ u32 tcpInSegs_hi;
+ u32 tcpInSegs_lo;
+ u32 tcpOutSegs_hi;
+ u32 tcpOutSegs_lo;
+ u32 tcpRetransSeg_hi;
+ u32 tcpRetransSeg_lo;
+ u32 tcpInErrs_hi;
+ u32 tcpInErrs_lo;
+ u32 tcpRtoMin;
+ u32 tcpRtoMax;
+};
+
+struct petp;
+struct tp_params;
+
+struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p);
+void t1_tp_destroy(struct petp *tp);
+
+void t1_tp_intr_disable(struct petp *tp);
+void t1_tp_intr_enable(struct petp *tp);
+void t1_tp_intr_clear(struct petp *tp);
+int t1_tp_intr_handler(struct petp *tp);
+
+void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
+void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable);
+void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
+void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
+int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
+int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
+#endif
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c
new file mode 100644
index 000000000000..85dc3b1dc309
--- /dev/null
+++ b/drivers/net/chelsio/vsc7326.c
@@ -0,0 +1,725 @@
+/* $Date: 2006/04/28 19:20:06 $ $RCSfile: vsc7326.c,v $ $Revision: 1.19 $ */
+
+/* Driver for Vitesse VSC7326 (Schaumburg) MAC */
+
+#include "gmac.h"
+#include "elmer0.h"
+#include "vsc7326_reg.h"
+
+/* Update fast changing statistics every 15 seconds */
+#define STATS_TICK_SECS 15
+/* 30 minutes for full statistics update */
+#define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS)
+
+#define MAX_MTU 9600
+
+/* The egress WM value 0x01a01fff should be used only when the
+ * interface is down (MAC port disabled). This is a workaround
+ * for disabling the T2/MAC flow-control. When the interface is
+ * enabled, the WM value should be set to 0x014a03F0.
+ */
+#define WM_DISABLE 0x01a01fff
+#define WM_ENABLE 0x014a03F0
+
+struct init_table {
+ u32 addr;
+ u32 data;
+};
+
+struct _cmac_instance {
+ u32 index;
+ u32 ticks;
+};
+
+#define INITBLOCK_SLEEP 0xffffffff
+
+static void vsc_read(adapter_t *adapter, u32 addr, u32 *val)
+{
+ u32 status, vlo, vhi;
+ int i;
+
+ spin_lock_bh(&adapter->mac_lock);
+ t1_tpi_read(adapter, (addr << 2) + 4, &vlo);
+ i = 0;
+ do {
+ t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo);
+ t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi);
+ status = (vhi << 16) | vlo;
+ i++;
+ } while (((status & 1) == 0) && (i < 50));
+ if (i == 50)
+ CH_ERR("Invalid tpi read from MAC, breaking loop.\n");
+
+ t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo);
+ t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi);
+
+ *val = (vhi << 16) | vlo;
+
+ /* CH_ERR("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
+ ((addr&0xe000)>>13), ((addr&0x1e00)>>9),
+ ((addr&0x01fe)>>1), *val); */
+ spin_unlock_bh(&adapter->mac_lock);
+}
+
+static void vsc_write(adapter_t *adapter, u32 addr, u32 data)
+{
+ spin_lock_bh(&adapter->mac_lock);
+ t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF);
+ t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF);
+ /* CH_ERR("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
+ ((addr&0xe000)>>13), ((addr&0x1e00)>>9),
+ ((addr&0x01fe)>>1), data); */
+ spin_unlock_bh(&adapter->mac_lock);
+}
+
+/* Hard reset the MAC. This wipes out *all* configuration. */
+static void vsc7326_full_reset(adapter_t* adapter)
+{
+ u32 val;
+ u32 result = 0xffff;
+
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~1;
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(2);
+ val |= 0x1; /* Enable mac MAC itself */
+ val |= 0x800; /* Turn off the red LED */
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ mdelay(1);
+ vsc_write(adapter, REG_SW_RESET, 0x80000001);
+ do {
+ mdelay(1);
+ vsc_read(adapter, REG_SW_RESET, &result);
+ } while (result != 0x0);
+}
+
+static struct init_table vsc7326_reset[] = {
+ { REG_IFACE_MODE, 0x00000000 },
+ { REG_CRC_CFG, 0x00000020 },
+ { REG_PLL_CLK_SPEED, 0x00050c00 },
+ { REG_PLL_CLK_SPEED, 0x00050c00 },
+ { REG_MSCH, 0x00002f14 },
+ { REG_SPI4_MISC, 0x00040409 },
+ { REG_SPI4_DESKEW, 0x00080000 },
+ { REG_SPI4_ING_SETUP2, 0x08080004 },
+ { REG_SPI4_ING_SETUP0, 0x04111004 },
+ { REG_SPI4_EGR_SETUP0, 0x80001a04 },
+ { REG_SPI4_ING_SETUP1, 0x02010000 },
+ { REG_AGE_INC(0), 0x00000000 },
+ { REG_AGE_INC(1), 0x00000000 },
+ { REG_ING_CONTROL, 0x0a200011 },
+ { REG_EGR_CONTROL, 0xa0010091 },
+};
+
+static struct init_table vsc7326_portinit[4][22] = {
+ { /* Port 0 */
+ /* FIFO setup */
+ { REG_DBG(0), 0x000004f0 },
+ { REG_HDX(0), 0x00073101 },
+ { REG_TEST(0,0), 0x00000022 },
+ { REG_TEST(1,0), 0x00000022 },
+ { REG_TOP_BOTTOM(0,0), 0x003f0000 },
+ { REG_TOP_BOTTOM(1,0), 0x00120000 },
+ { REG_HIGH_LOW_WM(0,0), 0x07460757 },
+ { REG_HIGH_LOW_WM(1,0), WM_DISABLE },
+ { REG_CT_THRHLD(0,0), 0x00000000 },
+ { REG_CT_THRHLD(1,0), 0x00000000 },
+ { REG_BUCKE(0), 0x0002ffff },
+ { REG_BUCKI(0), 0x0002ffff },
+ { REG_TEST(0,0), 0x00000020 },
+ { REG_TEST(1,0), 0x00000020 },
+ /* Port config */
+ { REG_MAX_LEN(0), 0x00002710 },
+ { REG_PORT_FAIL(0), 0x00000002 },
+ { REG_NORMALIZER(0), 0x00000a64 },
+ { REG_DENORM(0), 0x00000010 },
+ { REG_STICK_BIT(0), 0x03baa370 },
+ { REG_DEV_SETUP(0), 0x00000083 },
+ { REG_DEV_SETUP(0), 0x00000082 },
+ { REG_MODE_CFG(0), 0x0200259f },
+ },
+ { /* Port 1 */
+ /* FIFO setup */
+ { REG_DBG(1), 0x000004f0 },
+ { REG_HDX(1), 0x00073101 },
+ { REG_TEST(0,1), 0x00000022 },
+ { REG_TEST(1,1), 0x00000022 },
+ { REG_TOP_BOTTOM(0,1), 0x007e003f },
+ { REG_TOP_BOTTOM(1,1), 0x00240012 },
+ { REG_HIGH_LOW_WM(0,1), 0x07460757 },
+ { REG_HIGH_LOW_WM(1,1), WM_DISABLE },
+ { REG_CT_THRHLD(0,1), 0x00000000 },
+ { REG_CT_THRHLD(1,1), 0x00000000 },
+ { REG_BUCKE(1), 0x0002ffff },
+ { REG_BUCKI(1), 0x0002ffff },
+ { REG_TEST(0,1), 0x00000020 },
+ { REG_TEST(1,1), 0x00000020 },
+ /* Port config */
+ { REG_MAX_LEN(1), 0x00002710 },
+ { REG_PORT_FAIL(1), 0x00000002 },
+ { REG_NORMALIZER(1), 0x00000a64 },
+ { REG_DENORM(1), 0x00000010 },
+ { REG_STICK_BIT(1), 0x03baa370 },
+ { REG_DEV_SETUP(1), 0x00000083 },
+ { REG_DEV_SETUP(1), 0x00000082 },
+ { REG_MODE_CFG(1), 0x0200259f },
+ },
+ { /* Port 2 */
+ /* FIFO setup */
+ { REG_DBG(2), 0x000004f0 },
+ { REG_HDX(2), 0x00073101 },
+ { REG_TEST(0,2), 0x00000022 },
+ { REG_TEST(1,2), 0x00000022 },
+ { REG_TOP_BOTTOM(0,2), 0x00bd007e },
+ { REG_TOP_BOTTOM(1,2), 0x00360024 },
+ { REG_HIGH_LOW_WM(0,2), 0x07460757 },
+ { REG_HIGH_LOW_WM(1,2), WM_DISABLE },
+ { REG_CT_THRHLD(0,2), 0x00000000 },
+ { REG_CT_THRHLD(1,2), 0x00000000 },
+ { REG_BUCKE(2), 0x0002ffff },
+ { REG_BUCKI(2), 0x0002ffff },
+ { REG_TEST(0,2), 0x00000020 },
+ { REG_TEST(1,2), 0x00000020 },
+ /* Port config */
+ { REG_MAX_LEN(2), 0x00002710 },
+ { REG_PORT_FAIL(2), 0x00000002 },
+ { REG_NORMALIZER(2), 0x00000a64 },
+ { REG_DENORM(2), 0x00000010 },
+ { REG_STICK_BIT(2), 0x03baa370 },
+ { REG_DEV_SETUP(2), 0x00000083 },
+ { REG_DEV_SETUP(2), 0x00000082 },
+ { REG_MODE_CFG(2), 0x0200259f },
+ },
+ { /* Port 3 */
+ /* FIFO setup */
+ { REG_DBG(3), 0x000004f0 },
+ { REG_HDX(3), 0x00073101 },
+ { REG_TEST(0,3), 0x00000022 },
+ { REG_TEST(1,3), 0x00000022 },
+ { REG_TOP_BOTTOM(0,3), 0x00fc00bd },
+ { REG_TOP_BOTTOM(1,3), 0x00480036 },
+ { REG_HIGH_LOW_WM(0,3), 0x07460757 },
+ { REG_HIGH_LOW_WM(1,3), WM_DISABLE },
+ { REG_CT_THRHLD(0,3), 0x00000000 },
+ { REG_CT_THRHLD(1,3), 0x00000000 },
+ { REG_BUCKE(3), 0x0002ffff },
+ { REG_BUCKI(3), 0x0002ffff },
+ { REG_TEST(0,3), 0x00000020 },
+ { REG_TEST(1,3), 0x00000020 },
+ /* Port config */
+ { REG_MAX_LEN(3), 0x00002710 },
+ { REG_PORT_FAIL(3), 0x00000002 },
+ { REG_NORMALIZER(3), 0x00000a64 },
+ { REG_DENORM(3), 0x00000010 },
+ { REG_STICK_BIT(3), 0x03baa370 },
+ { REG_DEV_SETUP(3), 0x00000083 },
+ { REG_DEV_SETUP(3), 0x00000082 },
+ { REG_MODE_CFG(3), 0x0200259f },
+ },
+};
+
+static void run_table(adapter_t *adapter, struct init_table *ib, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (ib[i].addr == INITBLOCK_SLEEP) {
+ udelay( ib[i].data );
+ CH_ERR("sleep %d us\n",ib[i].data);
+ } else {
+ vsc_write( adapter, ib[i].addr, ib[i].data );
+ }
+ }
+}
+
+static int bist_rd(adapter_t *adapter, int moduleid, int address)
+{
+ int data=0;
+ u32 result=0;
+
+ if( (address != 0x0) &&
+ (address != 0x1) &&
+ (address != 0x2) &&
+ (address != 0xd) &&
+ (address != 0xe))
+ CH_ERR("No bist address: 0x%x\n", address);
+
+ data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
+ ((moduleid & 0xff) << 0));
+ vsc_write(adapter, REG_RAM_BIST_CMD, data);
+
+ udelay(10);
+
+ vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
+ if((result & (1<<9)) != 0x0)
+ CH_ERR("Still in bist read: 0x%x\n", result);
+ else if((result & (1<<8)) != 0x0)
+ CH_ERR("bist read error: 0x%x\n", result);
+
+ return(result & 0xff);
+}
+
+static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
+{
+ int data=0;
+ u32 result=0;
+
+ if( (address != 0x0) &&
+ (address != 0x1) &&
+ (address != 0x2) &&
+ (address != 0xd) &&
+ (address != 0xe))
+ CH_ERR("No bist address: 0x%x\n", address);
+
+ if( value>255 )
+ CH_ERR("Suspicious write out of range value: 0x%x\n", value);
+
+ data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
+ ((moduleid & 0xff) << 0));
+ vsc_write(adapter, REG_RAM_BIST_CMD, data);
+
+ udelay(5);
+
+ vsc_read(adapter, REG_RAM_BIST_CMD, &result);
+ if((result & (1<<27)) != 0x0)
+ CH_ERR("Still in bist write: 0x%x\n", result);
+ else if((result & (1<<26)) != 0x0)
+ CH_ERR("bist write error: 0x%x\n", result);
+
+ return(0);
+}
+
+static int run_bist(adapter_t *adapter, int moduleid)
+{
+ /*run bist*/
+ (void) bist_wr(adapter,moduleid, 0x00, 0x02);
+ (void) bist_wr(adapter,moduleid, 0x01, 0x01);
+
+ return(0);
+}
+
+static int check_bist(adapter_t *adapter, int moduleid)
+{
+ int result=0;
+ int column=0;
+ /*check bist*/
+ result = bist_rd(adapter,moduleid, 0x02);
+ column = ((bist_rd(adapter,moduleid, 0x0e)<<8) +
+ (bist_rd(adapter,moduleid, 0x0d)));
+ if ((result & 3) != 0x3)
+ CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n",
+ result, moduleid, column);
+ return(0);
+}
+
+static int enable_mem(adapter_t *adapter, int moduleid)
+{
+ /*enable mem*/
+ (void) bist_wr(adapter,moduleid, 0x00, 0x00);
+ return(0);
+}
+
+static int run_bist_all(adapter_t *adapter)
+{
+ int port=0;
+ u32 val=0;
+
+ vsc_write(adapter, REG_MEM_BIST, 0x5);
+ vsc_read(adapter, REG_MEM_BIST, &val);
+
+ for(port=0; port<12; port++){
+ vsc_write(adapter, REG_DEV_SETUP(port), 0x0);
+ }
+
+ udelay(300);
+ vsc_write(adapter, REG_SPI4_MISC, 0x00040409);
+ udelay(300);
+
+ (void) run_bist(adapter,13);
+ (void) run_bist(adapter,14);
+ (void) run_bist(adapter,20);
+ (void) run_bist(adapter,21);
+ mdelay(200);
+ (void) check_bist(adapter,13);
+ (void) check_bist(adapter,14);
+ (void) check_bist(adapter,20);
+ (void) check_bist(adapter,21);
+ udelay(100);
+ (void) enable_mem(adapter,13);
+ (void) enable_mem(adapter,14);
+ (void) enable_mem(adapter,20);
+ (void) enable_mem(adapter,21);
+ udelay(300);
+ vsc_write(adapter, REG_SPI4_MISC, 0x60040400);
+ udelay(300);
+ for(port=0; port<12; port++){
+ vsc_write(adapter, REG_DEV_SETUP(port), 0x1);
+ }
+ udelay(300);
+ vsc_write(adapter, REG_MEM_BIST, 0x0);
+ mdelay(10);
+ return(0);
+}
+
+static int mac_intr_handler(struct cmac *mac)
+{
+ return 0;
+}
+
+static int mac_intr_enable(struct cmac *mac)
+{
+ return 0;
+}
+
+static int mac_intr_disable(struct cmac *mac)
+{
+ return 0;
+}
+
+static int mac_intr_clear(struct cmac *mac)
+{
+ return 0;
+}
+
+/* Expect MAC address to be in network byte order. */
+static int mac_set_address(struct cmac* mac, u8 addr[6])
+{
+ u32 val;
+ int port = mac->instance->index;
+
+ vsc_write(mac->adapter, REG_MAC_LOW_ADDR(port),
+ (addr[3] << 16) | (addr[4] << 8) | addr[5]);
+ vsc_write(mac->adapter, REG_MAC_HIGH_ADDR(port),
+ (addr[0] << 16) | (addr[1] << 8) | addr[2]);
+
+ vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &val);
+ val &= ~0xf0000000;
+ vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, val | (port << 28));
+
+ vsc_write(mac->adapter, REG_ING_FFILT_MASK0,
+ 0xffff0000 | (addr[4] << 8) | addr[5]);
+ vsc_write(mac->adapter, REG_ING_FFILT_MASK1,
+ 0xffff0000 | (addr[2] << 8) | addr[3]);
+ vsc_write(mac->adapter, REG_ING_FFILT_MASK2,
+ 0xffff0000 | (addr[0] << 8) | addr[1]);
+ return 0;
+}
+
+static int mac_get_address(struct cmac *mac, u8 addr[6])
+{
+ u32 addr_lo, addr_hi;
+ int port = mac->instance->index;
+
+ vsc_read(mac->adapter, REG_MAC_LOW_ADDR(port), &addr_lo);
+ vsc_read(mac->adapter, REG_MAC_HIGH_ADDR(port), &addr_hi);
+
+ addr[0] = (u8) (addr_hi >> 16);
+ addr[1] = (u8) (addr_hi >> 8);
+ addr[2] = (u8) addr_hi;
+ addr[3] = (u8) (addr_lo >> 16);
+ addr[4] = (u8) (addr_lo >> 8);
+ addr[5] = (u8) addr_lo;
+ return 0;
+}
+
+/* This is intended to reset a port, not the whole MAC */
+static int mac_reset(struct cmac *mac)
+{
+ int index = mac->instance->index;
+
+ run_table(mac->adapter, vsc7326_portinit[index],
+ ARRAY_SIZE(vsc7326_portinit[index]));
+
+ return 0;
+}
+
+static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
+{
+ u32 v;
+ int port = mac->instance->index;
+
+ vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &v);
+ v |= 1 << 12;
+
+ if (t1_rx_mode_promisc(rm))
+ v &= ~(1 << (port + 16));
+ else
+ v |= 1 << (port + 16);
+
+ vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, v);
+ return 0;
+}
+
+static int mac_set_mtu(struct cmac *mac, int mtu)
+{
+ int port = mac->instance->index;
+
+ if (mtu > MAX_MTU)
+ return -EINVAL;
+
+ /* max_len includes header and FCS */
+ vsc_write(mac->adapter, REG_MAX_LEN(port), mtu + 14 + 4);
+ return 0;
+}
+
+static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex,
+ int fc)
+{
+ u32 v;
+ int enable, port = mac->instance->index;
+
+ if (speed >= 0 && speed != SPEED_10 && speed != SPEED_100 &&
+ speed != SPEED_1000)
+ return -1;
+ if (duplex > 0 && duplex != DUPLEX_FULL)
+ return -1;
+
+ if (speed >= 0) {
+ vsc_read(mac->adapter, REG_MODE_CFG(port), &v);
+ enable = v & 3; /* save tx/rx enables */
+ v &= ~0xf;
+ v |= 4; /* full duplex */
+ if (speed == SPEED_1000)
+ v |= 8; /* GigE */
+ enable |= v;
+ vsc_write(mac->adapter, REG_MODE_CFG(port), v);
+
+ if (speed == SPEED_1000)
+ v = 0x82;
+ else if (speed == SPEED_100)
+ v = 0x84;
+ else /* SPEED_10 */
+ v = 0x86;
+ vsc_write(mac->adapter, REG_DEV_SETUP(port), v | 1); /* reset */
+ vsc_write(mac->adapter, REG_DEV_SETUP(port), v);
+ vsc_read(mac->adapter, REG_DBG(port), &v);
+ v &= ~0xff00;
+ if (speed == SPEED_1000)
+ v |= 0x400;
+ else if (speed == SPEED_100)
+ v |= 0x2000;
+ else /* SPEED_10 */
+ v |= 0xff00;
+ vsc_write(mac->adapter, REG_DBG(port), v);
+
+ vsc_write(mac->adapter, REG_TX_IFG(port),
+ speed == SPEED_1000 ? 5 : 0x11);
+ if (duplex == DUPLEX_HALF)
+ enable = 0x0; /* 100 or 10 */
+ else if (speed == SPEED_1000)
+ enable = 0xc;
+ else /* SPEED_100 or 10 */
+ enable = 0x4;
+ enable |= 0x9 << 10; /* IFG1 */
+ enable |= 0x6 << 6; /* IFG2 */
+ enable |= 0x1 << 4; /* VLAN */
+ enable |= 0x3; /* RX/TX EN */
+ vsc_write(mac->adapter, REG_MODE_CFG(port), enable);
+
+ }
+
+ vsc_read(mac->adapter, REG_PAUSE_CFG(port), &v);
+ v &= 0xfff0ffff;
+ v |= 0x20000; /* xon/xoff */
+ if (fc & PAUSE_RX)
+ v |= 0x40000;
+ if (fc & PAUSE_TX)
+ v |= 0x80000;
+ if (fc == (PAUSE_RX | PAUSE_TX))
+ v |= 0x10000;
+ vsc_write(mac->adapter, REG_PAUSE_CFG(port), v);
+ return 0;
+}
+
+static int mac_enable(struct cmac *mac, int which)
+{
+ u32 val;
+ int port = mac->instance->index;
+
+ /* Write the correct WM value when the port is enabled. */
+ vsc_write(mac->adapter, REG_HIGH_LOW_WM(1,port), WM_ENABLE);
+
+ vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
+ if (which & MAC_DIRECTION_RX)
+ val |= 0x2;
+ if (which & MAC_DIRECTION_TX)
+ val |= 1;
+ vsc_write(mac->adapter, REG_MODE_CFG(port), val);
+ return 0;
+}
+
+static int mac_disable(struct cmac *mac, int which)
+{
+ u32 val;
+ int i, port = mac->instance->index;
+
+ /* Reset the port, this also writes the correct WM value */
+ mac_reset(mac);
+
+ vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
+ if (which & MAC_DIRECTION_RX)
+ val &= ~0x2;
+ if (which & MAC_DIRECTION_TX)
+ val &= ~0x1;
+ vsc_write(mac->adapter, REG_MODE_CFG(port), val);
+ vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
+
+ /* Clear stats */
+ for (i = 0; i <= 0x3a; ++i)
+ vsc_write(mac->adapter, CRA(4, port, i), 0);
+
+ /* Clear sofware counters */
+ memset(&mac->stats, 0, sizeof(struct cmac_statistics));
+
+ return 0;
+}
+
+static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat)
+{
+ u32 v, lo;
+
+ vsc_read(mac->adapter, addr, &v);
+ lo = *stat;
+ *stat = *stat - lo + v;
+
+ if (v == 0)
+ return;
+
+ if (v < lo)
+ *stat += (1ULL << 32);
+}
+
+static void port_stats_update(struct cmac *mac)
+{
+ int port = mac->instance->index;
+
+ /* Rx stats */
+ rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK);
+ rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad);
+ rmon_update(mac, REG_RX_UNICAST(port), &mac->stats.RxUnicastFramesOK);
+ rmon_update(mac, REG_RX_MULTICAST(port),
+ &mac->stats.RxMulticastFramesOK);
+ rmon_update(mac, REG_RX_BROADCAST(port),
+ &mac->stats.RxBroadcastFramesOK);
+ rmon_update(mac, REG_CRC(port), &mac->stats.RxFCSErrors);
+ rmon_update(mac, REG_RX_ALIGNMENT(port), &mac->stats.RxAlignErrors);
+ rmon_update(mac, REG_RX_OVERSIZE(port),
+ &mac->stats.RxFrameTooLongErrors);
+ rmon_update(mac, REG_RX_PAUSE(port), &mac->stats.RxPauseFrames);
+ rmon_update(mac, REG_RX_JABBERS(port), &mac->stats.RxJabberErrors);
+ rmon_update(mac, REG_RX_FRAGMENTS(port), &mac->stats.RxRuntErrors);
+ rmon_update(mac, REG_RX_UNDERSIZE(port), &mac->stats.RxRuntErrors);
+ rmon_update(mac, REG_RX_SYMBOL_CARRIER(port),
+ &mac->stats.RxSymbolErrors);
+ rmon_update(mac, REG_RX_SIZE_1519_TO_MAX(port),
+ &mac->stats.RxJumboFramesOK);
+
+ /* Tx stats (skip collision stats as we are full-duplex only) */
+ rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
+ rmon_update(mac, REG_TX_UNICAST(port), &mac->stats.TxUnicastFramesOK);
+ rmon_update(mac, REG_TX_MULTICAST(port),
+ &mac->stats.TxMulticastFramesOK);
+ rmon_update(mac, REG_TX_BROADCAST(port),
+ &mac->stats.TxBroadcastFramesOK);
+ rmon_update(mac, REG_TX_PAUSE(port), &mac->stats.TxPauseFrames);
+ rmon_update(mac, REG_TX_UNDERRUN(port), &mac->stats.TxUnderrun);
+ rmon_update(mac, REG_TX_SIZE_1519_TO_MAX(port),
+ &mac->stats.TxJumboFramesOK);
+}
+
+/*
+ * This function is called periodically to accumulate the current values of the
+ * RMON counters into the port statistics. Since the counters are only 32 bits
+ * some of them can overflow in less than a minute at GigE speeds, so this
+ * function should be called every 30 seconds or so.
+ *
+ * To cut down on reading costs we update only the octet counters at each tick
+ * and do a full update at major ticks, which can be every 30 minutes or more.
+ */
+static const struct cmac_statistics *mac_update_statistics(struct cmac *mac,
+ int flag)
+{
+ if (flag == MAC_STATS_UPDATE_FULL ||
+ mac->instance->ticks >= MAJOR_UPDATE_TICKS) {
+ port_stats_update(mac);
+ mac->instance->ticks = 0;
+ } else {
+ int port = mac->instance->index;
+
+ rmon_update(mac, REG_RX_OK_BYTES(port),
+ &mac->stats.RxOctetsOK);
+ rmon_update(mac, REG_RX_BAD_BYTES(port),
+ &mac->stats.RxOctetsBad);
+ rmon_update(mac, REG_TX_OK_BYTES(port),
+ &mac->stats.TxOctetsOK);
+ mac->instance->ticks++;
+ }
+ return &mac->stats;
+}
+
+static void mac_destroy(struct cmac *mac)
+{
+ kfree(mac);
+}
+
+static struct cmac_ops vsc7326_ops = {
+ .destroy = mac_destroy,
+ .reset = mac_reset,
+ .interrupt_handler = mac_intr_handler,
+ .interrupt_enable = mac_intr_enable,
+ .interrupt_disable = mac_intr_disable,
+ .interrupt_clear = mac_intr_clear,
+ .enable = mac_enable,
+ .disable = mac_disable,
+ .set_mtu = mac_set_mtu,
+ .set_rx_mode = mac_set_rx_mode,
+ .set_speed_duplex_fc = mac_set_speed_duplex_fc,
+ .statistics_update = mac_update_statistics,
+ .macaddress_get = mac_get_address,
+ .macaddress_set = mac_set_address,
+};
+
+static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index)
+{
+ struct cmac *mac;
+ u32 val;
+ int i;
+
+ mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
+ if (!mac) return NULL;
+
+ mac->ops = &vsc7326_ops;
+ mac->instance = (cmac_instance *)(mac + 1);
+ mac->adapter = adapter;
+
+ mac->instance->index = index;
+ mac->instance->ticks = 0;
+
+ i = 0;
+ do {
+ u32 vhi, vlo;
+
+ vhi = vlo = 0;
+ t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo);
+ udelay(1);
+ t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi);
+ udelay(5);
+ val = (vhi << 16) | vlo;
+ } while ((++i < 10000) && (val == 0xffffffff));
+
+ return mac;
+}
+
+static int vsc7326_mac_reset(adapter_t *adapter)
+{
+ vsc7326_full_reset(adapter);
+ (void) run_bist_all(adapter);
+ run_table(adapter, vsc7326_reset, ARRAY_SIZE(vsc7326_reset));
+ return 0;
+}
+
+struct gmac t1_vsc7326_ops = {
+ .stats_update_period = STATS_TICK_SECS,
+ .create = vsc7326_mac_create,
+ .reset = vsc7326_mac_reset,
+};
diff --git a/drivers/net/chelsio/vsc7326_reg.h b/drivers/net/chelsio/vsc7326_reg.h
new file mode 100644
index 000000000000..491bcf75c4fb
--- /dev/null
+++ b/drivers/net/chelsio/vsc7326_reg.h
@@ -0,0 +1,286 @@
+/* $Date: 2006/04/28 19:20:17 $ $RCSfile: vsc7326_reg.h,v $ $Revision: 1.5 $ */
+#ifndef _VSC7321_REG_H_
+#define _VSC7321_REG_H_
+
+/* Register definitions for Vitesse VSC7321 (Meigs II) MAC
+ *
+ * Straight off the data sheet, VMDS-10038 Rev 2.0 and
+ * PD0011-01-14-Meigs-II 2002-12-12
+ */
+
+/* Just 'cause it's in here doesn't mean it's used. */
+
+#define CRA(blk,sub,adr) ((((blk) & 0x7) << 13) | (((sub) & 0xf) << 9) | (((adr) & 0xff) << 1))
+
+/* System and CPU comm's registers */
+#define REG_CHIP_ID CRA(0x7,0xf,0x00) /* Chip ID */
+#define REG_BLADE_ID CRA(0x7,0xf,0x01) /* Blade ID */
+#define REG_SW_RESET CRA(0x7,0xf,0x02) /* Global Soft Reset */
+#define REG_MEM_BIST CRA(0x7,0xf,0x04) /* mem */
+#define REG_IFACE_MODE CRA(0x7,0xf,0x07) /* Interface mode */
+#define REG_MSCH CRA(0x7,0x2,0x06) /* CRC error count */
+#define REG_CRC_CNT CRA(0x7,0x2,0x0a) /* CRC error count */
+#define REG_CRC_CFG CRA(0x7,0x2,0x0b) /* CRC config */
+#define REG_SI_TRANSFER_SEL CRA(0x7,0xf,0x18) /* SI Transfer Select */
+#define REG_PLL_CLK_SPEED CRA(0x7,0xf,0x19) /* Clock Speed Selection */
+#define REG_SYS_CLK_SELECT CRA(0x7,0xf,0x1c) /* System Clock Select */
+#define REG_GPIO_CTRL CRA(0x7,0xf,0x1d) /* GPIO Control */
+#define REG_GPIO_OUT CRA(0x7,0xf,0x1e) /* GPIO Out */
+#define REG_GPIO_IN CRA(0x7,0xf,0x1f) /* GPIO In */
+#define REG_CPU_TRANSFER_SEL CRA(0x7,0xf,0x20) /* CPU Transfer Select */
+#define REG_LOCAL_DATA CRA(0x7,0xf,0xfe) /* Local CPU Data Register */
+#define REG_LOCAL_STATUS CRA(0x7,0xf,0xff) /* Local CPU Status Register */
+
+/* Aggregator registers */
+#define REG_AGGR_SETUP CRA(0x7,0x1,0x00) /* Aggregator Setup */
+#define REG_PMAP_TABLE CRA(0x7,0x1,0x01) /* Port map table */
+#define REG_MPLS_BIT0 CRA(0x7,0x1,0x08) /* MPLS bit0 position */
+#define REG_MPLS_BIT1 CRA(0x7,0x1,0x09) /* MPLS bit1 position */
+#define REG_MPLS_BIT2 CRA(0x7,0x1,0x0a) /* MPLS bit2 position */
+#define REG_MPLS_BIT3 CRA(0x7,0x1,0x0b) /* MPLS bit3 position */
+#define REG_MPLS_BITMASK CRA(0x7,0x1,0x0c) /* MPLS bit mask */
+#define REG_PRE_BIT0POS CRA(0x7,0x1,0x10) /* Preamble bit0 position */
+#define REG_PRE_BIT1POS CRA(0x7,0x1,0x11) /* Preamble bit1 position */
+#define REG_PRE_BIT2POS CRA(0x7,0x1,0x12) /* Preamble bit2 position */
+#define REG_PRE_BIT3POS CRA(0x7,0x1,0x13) /* Preamble bit3 position */
+#define REG_PRE_ERR_CNT CRA(0x7,0x1,0x14) /* Preamble parity error count */
+
+/* BIST registers */
+/*#define REG_RAM_BIST_CMD CRA(0x7,0x2,0x00)*/ /* RAM BIST Command Register */
+/*#define REG_RAM_BIST_RESULT CRA(0x7,0x2,0x01)*/ /* RAM BIST Read Status/Result */
+#define REG_RAM_BIST_CMD CRA(0x7,0x1,0x00) /* RAM BIST Command Register */
+#define REG_RAM_BIST_RESULT CRA(0x7,0x1,0x01) /* RAM BIST Read Status/Result */
+#define BIST_PORT_SELECT 0x00 /* BIST port select */
+#define BIST_COMMAND 0x01 /* BIST enable/disable */
+#define BIST_STATUS 0x02 /* BIST operation status */
+#define BIST_ERR_CNT_LSB 0x03 /* BIST error count lo 8b */
+#define BIST_ERR_CNT_MSB 0x04 /* BIST error count hi 8b */
+#define BIST_ERR_SEL_LSB 0x05 /* BIST error select lo 8b */
+#define BIST_ERR_SEL_MSB 0x06 /* BIST error select hi 8b */
+#define BIST_ERROR_STATE 0x07 /* BIST engine internal state */
+#define BIST_ERR_ADR0 0x08 /* BIST error address lo 8b */
+#define BIST_ERR_ADR1 0x09 /* BIST error address lomid 8b */
+#define BIST_ERR_ADR2 0x0a /* BIST error address himid 8b */
+#define BIST_ERR_ADR3 0x0b /* BIST error address hi 8b */
+
+/* FIFO registers
+ * ie = 0 for ingress, 1 for egress
+ * fn = FIFO number, 0-9
+ */
+#define REG_TEST(ie,fn) CRA(0x2,ie&1,0x00+fn) /* Mode & Test Register */
+#define REG_TOP_BOTTOM(ie,fn) CRA(0x2,ie&1,0x10+fn) /* FIFO Buffer Top & Bottom */
+#define REG_TAIL(ie,fn) CRA(0x2,ie&1,0x20+fn) /* FIFO Write Pointer */
+#define REG_HEAD(ie,fn) CRA(0x2,ie&1,0x30+fn) /* FIFO Read Pointer */
+#define REG_HIGH_LOW_WM(ie,fn) CRA(0x2,ie&1,0x40+fn) /* Flow Control Water Marks */
+#define REG_CT_THRHLD(ie,fn) CRA(0x2,ie&1,0x50+fn) /* Cut Through Threshold */
+#define REG_FIFO_DROP_CNT(ie,fn) CRA(0x2,ie&1,0x60+fn) /* Drop & CRC Error Counter */
+#define REG_DEBUG_BUF_CNT(ie,fn) CRA(0x2,ie&1,0x70+fn) /* Input Side Debug Counter */
+#define REG_BUCKI(fn) CRA(0x2,2,0x20+fn) /* Input Side Debug Counter */
+#define REG_BUCKE(fn) CRA(0x2,3,0x20+fn) /* Input Side Debug Counter */
+
+/* Traffic shaper buckets
+ * ie = 0 for ingress, 1 for egress
+ * bn = bucket number 0-10 (yes, 11 buckets)
+ */
+/* OK, this one's kinda ugly. Some hardware designers are perverse. */
+#define REG_TRAFFIC_SHAPER_BUCKET(ie,bn) CRA(0x2,ie&1,0x0a + (bn>7) | ((bn&7)<<4))
+#define REG_TRAFFIC_SHAPER_CONTROL(ie) CRA(0x2,ie&1,0x3b)
+
+#define REG_SRAM_ADR(ie) CRA(0x2,ie&1,0x0e) /* FIFO SRAM address */
+#define REG_SRAM_WR_STRB(ie) CRA(0x2,ie&1,0x1e) /* FIFO SRAM write strobe */
+#define REG_SRAM_RD_STRB(ie) CRA(0x2,ie&1,0x2e) /* FIFO SRAM read strobe */
+#define REG_SRAM_DATA_0(ie) CRA(0x2,ie&1,0x3e) /* FIFO SRAM data lo 8b */
+#define REG_SRAM_DATA_1(ie) CRA(0x2,ie&1,0x4e) /* FIFO SRAM data lomid 8b */
+#define REG_SRAM_DATA_2(ie) CRA(0x2,ie&1,0x5e) /* FIFO SRAM data himid 8b */
+#define REG_SRAM_DATA_3(ie) CRA(0x2,ie&1,0x6e) /* FIFO SRAM data hi 8b */
+#define REG_SRAM_DATA_BLK_TYPE(ie) CRA(0x2,ie&1,0x7e) /* FIFO SRAM tag */
+/* REG_ING_CONTROL equals REG_CONTROL with ie = 0, likewise REG_EGR_CONTROL is ie = 1 */
+#define REG_CONTROL(ie) CRA(0x2,ie&1,0x0f) /* FIFO control */
+#define REG_ING_CONTROL CRA(0x2,0x0,0x0f) /* Ingress control (alias) */
+#define REG_EGR_CONTROL CRA(0x2,0x1,0x0f) /* Egress control (alias) */
+#define REG_AGE_TIMER(ie) CRA(0x2,ie&1,0x1f) /* Aging timer */
+#define REG_AGE_INC(ie) CRA(0x2,ie&1,0x2f) /* Aging increment */
+#define DEBUG_OUT(ie) CRA(0x2,ie&1,0x3f) /* Output debug counter control */
+#define DEBUG_CNT(ie) CRA(0x2,ie&1,0x4f) /* Output debug counter */
+
+/* SPI4 interface */
+#define REG_SPI4_MISC CRA(0x5,0x0,0x00) /* Misc Register */
+#define REG_SPI4_STATUS CRA(0x5,0x0,0x01) /* CML Status */
+#define REG_SPI4_ING_SETUP0 CRA(0x5,0x0,0x02) /* Ingress Status Channel Setup */
+#define REG_SPI4_ING_SETUP1 CRA(0x5,0x0,0x03) /* Ingress Data Training Setup */
+#define REG_SPI4_ING_SETUP2 CRA(0x5,0x0,0x04) /* Ingress Data Burst Size Setup */
+#define REG_SPI4_EGR_SETUP0 CRA(0x5,0x0,0x05) /* Egress Status Channel Setup */
+#define REG_SPI4_DBG_CNT(n) CRA(0x5,0x0,0x10+n) /* Debug counters 0-9 */
+#define REG_SPI4_DBG_SETUP CRA(0x5,0x0,0x1A) /* Debug counters setup */
+#define REG_SPI4_TEST CRA(0x5,0x0,0x20) /* Test Setup Register */
+#define REG_TPGEN_UP0 CRA(0x5,0x0,0x21) /* Test Pattern generator user pattern 0 */
+#define REG_TPGEN_UP1 CRA(0x5,0x0,0x22) /* Test Pattern generator user pattern 1 */
+#define REG_TPCHK_UP0 CRA(0x5,0x0,0x23) /* Test Pattern checker user pattern 0 */
+#define REG_TPCHK_UP1 CRA(0x5,0x0,0x24) /* Test Pattern checker user pattern 1 */
+#define REG_TPSAM_P0 CRA(0x5,0x0,0x25) /* Sampled pattern 0 */
+#define REG_TPSAM_P1 CRA(0x5,0x0,0x26) /* Sampled pattern 1 */
+#define REG_TPERR_CNT CRA(0x5,0x0,0x27) /* Pattern checker error counter */
+#define REG_SPI4_STICKY CRA(0x5,0x0,0x30) /* Sticky bits register */
+#define REG_SPI4_DBG_INH CRA(0x5,0x0,0x31) /* Core egress & ingress inhibit */
+#define REG_SPI4_DBG_STATUS CRA(0x5,0x0,0x32) /* Sampled ingress status */
+#define REG_SPI4_DBG_GRANT CRA(0x5,0x0,0x33) /* Ingress cranted credit value */
+
+#define REG_SPI4_DESKEW CRA(0x5,0x0,0x43) /* Ingress cranted credit value */
+
+/* 10GbE MAC Block Registers */
+/* Note that those registers that are exactly the same for 10GbE as for
+ * tri-speed are only defined with the version that needs a port number.
+ * Pass 0xa in those cases.
+ *
+ * Also note that despite the presence of a MAC address register, this part
+ * does no ingress MAC address filtering. That register is used only for
+ * pause frame detection and generation.
+ */
+/* 10GbE specific, and different from tri-speed */
+#define REG_MISC_10G CRA(0x1,0xa,0x00) /* Misc 10GbE setup */
+#define REG_PAUSE_10G CRA(0x1,0xa,0x01) /* Pause register */
+#define REG_NORMALIZER_10G CRA(0x1,0xa,0x05) /* 10G normalizer */
+#define REG_STICKY_RX CRA(0x1,0xa,0x06) /* RX debug register */
+#define REG_DENORM_10G CRA(0x1,0xa,0x07) /* Denormalizer */
+#define REG_STICKY_TX CRA(0x1,0xa,0x08) /* TX sticky bits */
+#define REG_MAX_RXHIGH CRA(0x1,0xa,0x0a) /* XGMII lane 0-3 debug */
+#define REG_MAX_RXLOW CRA(0x1,0xa,0x0b) /* XGMII lane 4-7 debug */
+#define REG_MAC_TX_STICKY CRA(0x1,0xa,0x0c) /* MAC Tx state sticky debug */
+#define REG_MAC_TX_RUNNING CRA(0x1,0xa,0x0d) /* MAC Tx state running debug */
+#define REG_TX_ABORT_AGE CRA(0x1,0xa,0x14) /* Aged Tx frames discarded */
+#define REG_TX_ABORT_SHORT CRA(0x1,0xa,0x15) /* Short Tx frames discarded */
+#define REG_TX_ABORT_TAXI CRA(0x1,0xa,0x16) /* Taxi error frames discarded */
+#define REG_TX_ABORT_UNDERRUN CRA(0x1,0xa,0x17) /* Tx Underrun abort counter */
+#define REG_TX_DENORM_DISCARD CRA(0x1,0xa,0x18) /* Tx denormalizer discards */
+#define REG_XAUI_STAT_A CRA(0x1,0xa,0x20) /* XAUI status A */
+#define REG_XAUI_STAT_B CRA(0x1,0xa,0x21) /* XAUI status B */
+#define REG_XAUI_STAT_C CRA(0x1,0xa,0x22) /* XAUI status C */
+#define REG_XAUI_CONF_A CRA(0x1,0xa,0x23) /* XAUI configuration A */
+#define REG_XAUI_CONF_B CRA(0x1,0xa,0x24) /* XAUI configuration B */
+#define REG_XAUI_CODE_GRP_CNT CRA(0x1,0xa,0x25) /* XAUI code group error count */
+#define REG_XAUI_CONF_TEST_A CRA(0x1,0xa,0x26) /* XAUI test register A */
+#define REG_PDERRCNT CRA(0x1,0xa,0x27) /* XAUI test register B */
+
+/* pn = port number 0-9 for tri-speed, 10 for 10GbE */
+/* Both tri-speed and 10GbE */
+#define REG_MAX_LEN(pn) CRA(0x1,pn,0x02) /* Max length */
+#define REG_MAC_HIGH_ADDR(pn) CRA(0x1,pn,0x03) /* Upper 24 bits of MAC addr */
+#define REG_MAC_LOW_ADDR(pn) CRA(0x1,pn,0x04) /* Lower 24 bits of MAC addr */
+
+/* tri-speed only
+ * pn = port number, 0-9
+ */
+#define REG_MODE_CFG(pn) CRA(0x1,pn,0x00) /* Mode configuration */
+#define REG_PAUSE_CFG(pn) CRA(0x1,pn,0x01) /* Pause configuration */
+#define REG_NORMALIZER(pn) CRA(0x1,pn,0x05) /* Normalizer */
+#define REG_TBI_STATUS(pn) CRA(0x1,pn,0x06) /* TBI status */
+#define REG_PCS_STATUS_DBG(pn) CRA(0x1,pn,0x07) /* PCS status debug */
+#define REG_PCS_CTRL(pn) CRA(0x1,pn,0x08) /* PCS control */
+#define REG_TBI_CONFIG(pn) CRA(0x1,pn,0x09) /* TBI configuration */
+#define REG_STICK_BIT(pn) CRA(0x1,pn,0x0a) /* Sticky bits */
+#define REG_DEV_SETUP(pn) CRA(0x1,pn,0x0b) /* MAC clock/reset setup */
+#define REG_DROP_CNT(pn) CRA(0x1,pn,0x0c) /* Drop counter */
+#define REG_PORT_POS(pn) CRA(0x1,pn,0x0d) /* Preamble port position */
+#define REG_PORT_FAIL(pn) CRA(0x1,pn,0x0e) /* Preamble port position */
+#define REG_SERDES_CONF(pn) CRA(0x1,pn,0x0f) /* SerDes configuration */
+#define REG_SERDES_TEST(pn) CRA(0x1,pn,0x10) /* SerDes test */
+#define REG_SERDES_STAT(pn) CRA(0x1,pn,0x11) /* SerDes status */
+#define REG_SERDES_COM_CNT(pn) CRA(0x1,pn,0x12) /* SerDes comma counter */
+#define REG_DENORM(pn) CRA(0x1,pn,0x15) /* Frame denormalization */
+#define REG_DBG(pn) CRA(0x1,pn,0x16) /* Device 1G debug */
+#define REG_TX_IFG(pn) CRA(0x1,pn,0x18) /* Tx IFG config */
+#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */
+
+/* Statistics */
+/* pn = port number, 0-a, a = 10GbE */
+#define REG_RX_IN_BYTES(pn) CRA(0x4,pn,0x00) /* # Rx in octets */
+#define REG_RX_SYMBOL_CARRIER(pn) CRA(0x4,pn,0x01) /* Frames w/ symbol errors */
+#define REG_RX_PAUSE(pn) CRA(0x4,pn,0x02) /* # pause frames received */
+#define REG_RX_UNSUP_OPCODE(pn) CRA(0x4,pn,0x03) /* # control frames with unsupported opcode */
+#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,0x04) /* # octets in good frames */
+#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,0x05) /* # octets in bad frames */
+#define REG_RX_UNICAST(pn) CRA(0x4,pn,0x06) /* # good unicast frames */
+#define REG_RX_MULTICAST(pn) CRA(0x4,pn,0x07) /* # good multicast frames */
+#define REG_RX_BROADCAST(pn) CRA(0x4,pn,0x08) /* # good broadcast frames */
+#define REG_CRC(pn) CRA(0x4,pn,0x09) /* # frames w/ bad CRC only */
+#define REG_RX_ALIGNMENT(pn) CRA(0x4,pn,0x0a) /* # frames w/ alignment err */
+#define REG_RX_UNDERSIZE(pn) CRA(0x4,pn,0x0b) /* # frames undersize */
+#define REG_RX_FRAGMENTS(pn) CRA(0x4,pn,0x0c) /* # frames undersize w/ crc err */
+#define REG_RX_IN_RANGE_LENGTH_ERROR(pn) CRA(0x4,pn,0x0d) /* # frames with length error */
+#define REG_RX_OUT_OF_RANGE_ERROR(pn) CRA(0x4,pn,0x0e) /* # frames with illegal length field */
+#define REG_RX_OVERSIZE(pn) CRA(0x4,pn,0x0f) /* # frames oversize */
+#define REG_RX_JABBERS(pn) CRA(0x4,pn,0x10) /* # frames oversize w/ crc err */
+#define REG_RX_SIZE_64(pn) CRA(0x4,pn,0x11) /* # frames 64 octets long */
+#define REG_RX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x12) /* # frames 65-127 octets */
+#define REG_RX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x13) /* # frames 128-255 */
+#define REG_RX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x14) /* # frames 256-511 */
+#define REG_RX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x15) /* # frames 512-1023 */
+#define REG_RX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x16) /* # frames 1024-1518 */
+#define REG_RX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x17) /* # frames 1519-max */
+
+#define REG_TX_OUT_BYTES(pn) CRA(0x4,pn,0x18) /* # octets tx */
+#define REG_TX_PAUSE(pn) CRA(0x4,pn,0x19) /* # pause frames sent */
+#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,0x1a) /* # octets tx OK */
+#define REG_TX_UNICAST(pn) CRA(0x4,pn,0x1b) /* # frames unicast */
+#define REG_TX_MULTICAST(pn) CRA(0x4,pn,0x1c) /* # frames multicast */
+#define REG_TX_BROADCAST(pn) CRA(0x4,pn,0x1d) /* # frames broadcast */
+#define REG_TX_MULTIPLE_COLL(pn) CRA(0x4,pn,0x1e) /* # frames tx after multiple collisions */
+#define REG_TX_LATE_COLL(pn) CRA(0x4,pn,0x1f) /* # late collisions detected */
+#define REG_TX_XCOLL(pn) CRA(0x4,pn,0x20) /* # frames lost, excessive collisions */
+#define REG_TX_DEFER(pn) CRA(0x4,pn,0x21) /* # frames deferred on first tx attempt */
+#define REG_TX_XDEFER(pn) CRA(0x4,pn,0x22) /* # frames excessively deferred */
+#define REG_TX_CSENSE(pn) CRA(0x4,pn,0x23) /* carrier sense errors at frame end */
+#define REG_TX_SIZE_64(pn) CRA(0x4,pn,0x24) /* # frames 64 octets long */
+#define REG_TX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x25) /* # frames 65-127 octets */
+#define REG_TX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x26) /* # frames 128-255 */
+#define REG_TX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x27) /* # frames 256-511 */
+#define REG_TX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x28) /* # frames 512-1023 */
+#define REG_TX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x29) /* # frames 1024-1518 */
+#define REG_TX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x2a) /* # frames 1519-max */
+#define REG_TX_SINGLE_COLL(pn) CRA(0x4,pn,0x2b) /* # frames tx after single collision */
+#define REG_TX_BACKOFF2(pn) CRA(0x4,pn,0x2c) /* # frames tx ok after 2 backoffs/collisions */
+#define REG_TX_BACKOFF3(pn) CRA(0x4,pn,0x2d) /* after 3 backoffs/collisions */
+#define REG_TX_BACKOFF4(pn) CRA(0x4,pn,0x2e) /* after 4 */
+#define REG_TX_BACKOFF5(pn) CRA(0x4,pn,0x2f) /* after 5 */
+#define REG_TX_BACKOFF6(pn) CRA(0x4,pn,0x30) /* after 6 */
+#define REG_TX_BACKOFF7(pn) CRA(0x4,pn,0x31) /* after 7 */
+#define REG_TX_BACKOFF8(pn) CRA(0x4,pn,0x32) /* after 8 */
+#define REG_TX_BACKOFF9(pn) CRA(0x4,pn,0x33) /* after 9 */
+#define REG_TX_BACKOFF10(pn) CRA(0x4,pn,0x34) /* after 10 */
+#define REG_TX_BACKOFF11(pn) CRA(0x4,pn,0x35) /* after 11 */
+#define REG_TX_BACKOFF12(pn) CRA(0x4,pn,0x36) /* after 12 */
+#define REG_TX_BACKOFF13(pn) CRA(0x4,pn,0x37) /* after 13 */
+#define REG_TX_BACKOFF14(pn) CRA(0x4,pn,0x38) /* after 14 */
+#define REG_TX_BACKOFF15(pn) CRA(0x4,pn,0x39) /* after 15 */
+#define REG_TX_UNDERRUN(pn) CRA(0x4,pn,0x3a) /* # frames dropped from underrun */
+#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */
+#define REG_RX_IPG_SHRINK(pn) CRA(0x4,pn,0x3c) /* # of IPG shrinks detected */
+
+#define REG_STAT_STICKY1G(pn) CRA(0x4,pn,0x3e) /* tri-speed sticky bits */
+#define REG_STAT_STICKY10G CRA(0x4,0xa,0x3e) /* 10GbE sticky bits */
+#define REG_STAT_INIT(pn) CRA(0x4,pn,0x3f) /* Clear all statistics */
+
+/* MII-Management Block registers */
+/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If
+ * we hooked up to the one with separate directions, the middle 0x0 needs to
+ * change to 0x1. And the current errata states that MII-M 1 doesn't work.
+ */
+
+#define REG_MIIM_STATUS CRA(0x3,0x0,0x00) /* MII-M Status */
+#define REG_MIIM_CMD CRA(0x3,0x0,0x01) /* MII-M Command */
+#define REG_MIIM_DATA CRA(0x3,0x0,0x02) /* MII-M Data */
+#define REG_MIIM_PRESCALE CRA(0x3,0x0,0x03) /* MII-M MDC Prescale */
+
+#define REG_ING_FFILT_UM_EN CRA(0x2, 0, 0xd)
+#define REG_ING_FFILT_BE_EN CRA(0x2, 0, 0x1d)
+#define REG_ING_FFILT_VAL0 CRA(0x2, 0, 0x2d)
+#define REG_ING_FFILT_VAL1 CRA(0x2, 0, 0x3d)
+#define REG_ING_FFILT_MASK0 CRA(0x2, 0, 0x4d)
+#define REG_ING_FFILT_MASK1 CRA(0x2, 0, 0x5d)
+#define REG_ING_FFILT_MASK2 CRA(0x2, 0, 0x6d)
+#define REG_ING_FFILT_ETYPE CRA(0x2, 0, 0x7d)
+
+
+/* Whew. */
+
+#endif
diff --git a/drivers/net/chelsio/vsc8244.c b/drivers/net/chelsio/vsc8244.c
new file mode 100644
index 000000000000..c493e783d459
--- /dev/null
+++ b/drivers/net/chelsio/vsc8244.c
@@ -0,0 +1,368 @@
+/*
+ * This file is part of the Chelsio T2 Ethernet driver.
+ *
+ * Copyright (C) 2005 Chelsio Communications. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+#include "common.h"
+#include "cphy.h"
+#include "elmer0.h"
+
+#ifndef ADVERTISE_PAUSE_CAP
+# define ADVERTISE_PAUSE_CAP 0x400
+#endif
+#ifndef ADVERTISE_PAUSE_ASYM
+# define ADVERTISE_PAUSE_ASYM 0x800
+#endif
+
+/* Gigabit MII registers */
+#ifndef MII_CTRL1000
+# define MII_CTRL1000 9
+#endif
+
+#ifndef ADVERTISE_1000FULL
+# define ADVERTISE_1000FULL 0x200
+# define ADVERTISE_1000HALF 0x100
+#endif
+
+/* VSC8244 PHY specific registers. */
+enum {
+ VSC8244_INTR_ENABLE = 25,
+ VSC8244_INTR_STATUS = 26,
+ VSC8244_AUX_CTRL_STAT = 28,
+};
+
+enum {
+ VSC_INTR_RX_ERR = 1 << 0,
+ VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
+ VSC_INTR_CABLE = 1 << 2, /* cable impairment */
+ VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
+ VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
+ VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
+ VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
+ VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
+ VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
+ VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
+ VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
+ VSC_INTR_LINK_CHG = 1 << 13, /* link change */
+ VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
+};
+
+#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
+ VSC_INTR_NEG_DONE)
+#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
+ VSC_INTR_ENABLE)
+
+/* PHY specific auxiliary control & status register fields */
+#define S_ACSR_ACTIPHY_TMR 0
+#define M_ACSR_ACTIPHY_TMR 0x3
+#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
+
+#define S_ACSR_SPEED 3
+#define M_ACSR_SPEED 0x3
+#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
+
+#define S_ACSR_DUPLEX 5
+#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
+
+#define S_ACSR_ACTIPHY 6
+#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
+
+/*
+ * Reset the PHY. This PHY completes reset immediately so we never wait.
+ */
+static int vsc8244_reset(struct cphy *cphy, int wait)
+{
+ int err;
+ unsigned int ctl;
+
+ err = simple_mdio_read(cphy, MII_BMCR, &ctl);
+ if (err)
+ return err;
+
+ ctl &= ~BMCR_PDOWN;
+ ctl |= BMCR_RESET;
+ return simple_mdio_write(cphy, MII_BMCR, ctl);
+}
+
+static int vsc8244_intr_enable(struct cphy *cphy)
+{
+ simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK);
+
+ /* Enable interrupts through Elmer */
+ if (t1_is_asic(cphy->adapter)) {
+ u32 elmer;
+
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+ elmer |= ELMER0_GP_BIT1;
+ if (is_T2(cphy->adapter)) {
+ elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
+ }
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+ }
+
+ return 0;
+}
+
+static int vsc8244_intr_disable(struct cphy *cphy)
+{
+ simple_mdio_write(cphy, VSC8244_INTR_ENABLE, 0);
+
+ if (t1_is_asic(cphy->adapter)) {
+ u32 elmer;
+
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+ elmer &= ~ELMER0_GP_BIT1;
+ if (is_T2(cphy->adapter)) {
+ elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
+ }
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+ }
+
+ return 0;
+}
+
+static int vsc8244_intr_clear(struct cphy *cphy)
+{
+ u32 val;
+ u32 elmer;
+
+ /* Clear PHY interrupts by reading the register. */
+ simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val);
+
+ if (t1_is_asic(cphy->adapter)) {
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+ elmer |= ELMER0_GP_BIT1;
+ if (is_T2(cphy->adapter)) {
+ elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
+ }
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+ }
+
+ return 0;
+}
+
+/*
+ * Force the PHY speed and duplex. This also disables auto-negotiation, except
+ * for 1Gb/s, where auto-negotiation is mandatory.
+ */
+static int vsc8244_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ int err;
+ unsigned int ctl;
+
+ err = simple_mdio_read(phy, MII_BMCR, &ctl);
+ if (err)
+ return err;
+
+ if (speed >= 0) {
+ ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+ if (speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+ else if (speed == SPEED_1000)
+ ctl |= BMCR_SPEED1000;
+ }
+ if (duplex >= 0) {
+ ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
+ if (duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ }
+ if (ctl & BMCR_SPEED1000) /* auto-negotiation required for 1Gb/s */
+ ctl |= BMCR_ANENABLE;
+ return simple_mdio_write(phy, MII_BMCR, ctl);
+}
+
+int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits)
+{
+ int ret;
+ unsigned int val;
+
+ ret = mdio_read(phy, mmd, reg, &val);
+ if (!ret)
+ ret = mdio_write(phy, mmd, reg, val | bits);
+ return ret;
+}
+
+static int vsc8244_autoneg_enable(struct cphy *cphy)
+{
+ return t1_mdio_set_bits(cphy, 0, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static int vsc8244_autoneg_restart(struct cphy *cphy)
+{
+ return t1_mdio_set_bits(cphy, 0, MII_BMCR, BMCR_ANRESTART);
+}
+
+static int vsc8244_advertise(struct cphy *phy, unsigned int advertise_map)
+{
+ int err;
+ unsigned int val = 0;
+
+ err = simple_mdio_read(phy, MII_CTRL1000, &val);
+ if (err)
+ return err;
+
+ val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+ if (advertise_map & ADVERTISED_1000baseT_Half)
+ val |= ADVERTISE_1000HALF;
+ if (advertise_map & ADVERTISED_1000baseT_Full)
+ val |= ADVERTISE_1000FULL;
+
+ err = simple_mdio_write(phy, MII_CTRL1000, val);
+ if (err)
+ return err;
+
+ val = 1;
+ if (advertise_map & ADVERTISED_10baseT_Half)
+ val |= ADVERTISE_10HALF;
+ if (advertise_map & ADVERTISED_10baseT_Full)
+ val |= ADVERTISE_10FULL;
+ if (advertise_map & ADVERTISED_100baseT_Half)
+ val |= ADVERTISE_100HALF;
+ if (advertise_map & ADVERTISED_100baseT_Full)
+ val |= ADVERTISE_100FULL;
+ if (advertise_map & ADVERTISED_PAUSE)
+ val |= ADVERTISE_PAUSE_CAP;
+ if (advertise_map & ADVERTISED_ASYM_PAUSE)
+ val |= ADVERTISE_PAUSE_ASYM;
+ return simple_mdio_write(phy, MII_ADVERTISE, val);
+}
+
+static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ unsigned int bmcr, status, lpa, adv;
+ int err, sp = -1, dplx = -1, pause = 0;
+
+ err = simple_mdio_read(cphy, MII_BMCR, &bmcr);
+ if (!err)
+ err = simple_mdio_read(cphy, MII_BMSR, &status);
+ if (err)
+ return err;
+
+ if (link_ok) {
+ /*
+ * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+ * once more to get the current link state.
+ */
+ if (!(status & BMSR_LSTATUS))
+ err = simple_mdio_read(cphy, MII_BMSR, &status);
+ if (err)
+ return err;
+ *link_ok = (status & BMSR_LSTATUS) != 0;
+ }
+ if (!(bmcr & BMCR_ANENABLE)) {
+ dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ if (bmcr & BMCR_SPEED1000)
+ sp = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ sp = SPEED_100;
+ else
+ sp = SPEED_10;
+ } else if (status & BMSR_ANEGCOMPLETE) {
+ err = simple_mdio_read(cphy, VSC8244_AUX_CTRL_STAT, &status);
+ if (err)
+ return err;
+
+ dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+ sp = G_ACSR_SPEED(status);
+ if (sp == 0)
+ sp = SPEED_10;
+ else if (sp == 1)
+ sp = SPEED_100;
+ else
+ sp = SPEED_1000;
+
+ if (fc && dplx == DUPLEX_FULL) {
+ err = simple_mdio_read(cphy, MII_LPA, &lpa);
+ if (!err)
+ err = simple_mdio_read(cphy, MII_ADVERTISE,
+ &adv);
+ if (err)
+ return err;
+
+ if (lpa & adv & ADVERTISE_PAUSE_CAP)
+ pause = PAUSE_RX | PAUSE_TX;
+ else if ((lpa & ADVERTISE_PAUSE_CAP) &&
+ (lpa & ADVERTISE_PAUSE_ASYM) &&
+ (adv & ADVERTISE_PAUSE_ASYM))
+ pause = PAUSE_TX;
+ else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
+ (adv & ADVERTISE_PAUSE_CAP))
+ pause = PAUSE_RX;
+ }
+ }
+ if (speed)
+ *speed = sp;
+ if (duplex)
+ *duplex = dplx;
+ if (fc)
+ *fc = pause;
+ return 0;
+}
+
+static int vsc8244_intr_handler(struct cphy *cphy)
+{
+ unsigned int cause;
+ int err, cphy_cause = 0;
+
+ err = simple_mdio_read(cphy, VSC8244_INTR_STATUS, &cause);
+ if (err)
+ return err;
+
+ cause &= INTR_MASK;
+ if (cause & CFG_CHG_INTR_MASK)
+ cphy_cause |= cphy_cause_link_change;
+ if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
+ cphy_cause |= cphy_cause_fifo_error;
+ return cphy_cause;
+}
+
+static void vsc8244_destroy(struct cphy *cphy)
+{
+ kfree(cphy);
+}
+
+static struct cphy_ops vsc8244_ops = {
+ .destroy = vsc8244_destroy,
+ .reset = vsc8244_reset,
+ .interrupt_enable = vsc8244_intr_enable,
+ .interrupt_disable = vsc8244_intr_disable,
+ .interrupt_clear = vsc8244_intr_clear,
+ .interrupt_handler = vsc8244_intr_handler,
+ .autoneg_enable = vsc8244_autoneg_enable,
+ .autoneg_restart = vsc8244_autoneg_restart,
+ .advertise = vsc8244_advertise,
+ .set_speed_duplex = vsc8244_set_speed_duplex,
+ .get_link_status = vsc8244_get_link_status
+};
+
+static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr, struct mdio_ops *mdio_ops)
+{
+ struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
+
+ if (!cphy) return NULL;
+
+ cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops);
+
+ return cphy;
+}
+
+
+static int vsc8244_phy_reset(adapter_t* adapter)
+{
+ return 0;
+}
+
+struct gphy t1_vsc8244_ops = {
+ vsc8244_phy_create,
+ vsc8244_phy_reset
+};
+
+
diff --git a/drivers/net/chelsio/vsc8244_reg.h b/drivers/net/chelsio/vsc8244_reg.h
new file mode 100644
index 000000000000..d3c1829055cb
--- /dev/null
+++ b/drivers/net/chelsio/vsc8244_reg.h
@@ -0,0 +1,172 @@
+/* $Date: 2005/11/23 16:28:53 $ $RCSfile: vsc8244_reg.h,v $ $Revision: 1.1 $ */
+#ifndef CHELSIO_MV8E1XXX_H
+#define CHELSIO_MV8E1XXX_H
+
+#ifndef BMCR_SPEED1000
+# define BMCR_SPEED1000 0x40
+#endif
+
+#ifndef ADVERTISE_PAUSE
+# define ADVERTISE_PAUSE 0x400
+#endif
+#ifndef ADVERTISE_PAUSE_ASYM
+# define ADVERTISE_PAUSE_ASYM 0x800
+#endif
+
+/* Gigabit MII registers */
+#define MII_GBMR 1 /* 1000Base-T mode register */
+#define MII_GBCR 9 /* 1000Base-T control register */
+#define MII_GBSR 10 /* 1000Base-T status register */
+
+/* 1000Base-T control register fields */
+#define GBCR_ADV_1000HALF 0x100
+#define GBCR_ADV_1000FULL 0x200
+#define GBCR_PREFER_MASTER 0x400
+#define GBCR_MANUAL_AS_MASTER 0x800
+#define GBCR_MANUAL_CONFIG_ENABLE 0x1000
+
+/* 1000Base-T status register fields */
+#define GBSR_LP_1000HALF 0x400
+#define GBSR_LP_1000FULL 0x800
+#define GBSR_REMOTE_OK 0x1000
+#define GBSR_LOCAL_OK 0x2000
+#define GBSR_LOCAL_MASTER 0x4000
+#define GBSR_MASTER_FAULT 0x8000
+
+/* Vitesse PHY interrupt status bits. */
+#if 0
+#define VSC8244_INTR_JABBER 0x0001
+#define VSC8244_INTR_POLARITY_CHNG 0x0002
+#define VSC8244_INTR_ENG_DETECT_CHNG 0x0010
+#define VSC8244_INTR_DOWNSHIFT 0x0020
+#define VSC8244_INTR_MDI_XOVER_CHNG 0x0040
+#define VSC8244_INTR_FIFO_OVER_UNDER 0x0080
+#define VSC8244_INTR_FALSE_CARRIER 0x0100
+#define VSC8244_INTR_SYMBOL_ERROR 0x0200
+#define VSC8244_INTR_LINK_CHNG 0x0400
+#define VSC8244_INTR_AUTONEG_DONE 0x0800
+#define VSC8244_INTR_PAGE_RECV 0x1000
+#define VSC8244_INTR_DUPLEX_CHNG 0x2000
+#define VSC8244_INTR_SPEED_CHNG 0x4000
+#define VSC8244_INTR_AUTONEG_ERR 0x8000
+#else
+//#define VSC8244_INTR_JABBER 0x0001
+//#define VSC8244_INTR_POLARITY_CHNG 0x0002
+//#define VSC8244_INTR_BIT2 0x0004
+//#define VSC8244_INTR_BIT3 0x0008
+#define VSC8244_INTR_RX_ERR 0x0001
+#define VSC8244_INTR_MASTER_SLAVE 0x0002
+#define VSC8244_INTR_CABLE_IMPAIRED 0x0004
+#define VSC8244_INTR_FALSE_CARRIER 0x0008
+//#define VSC8244_INTR_ENG_DETECT_CHNG 0x0010
+//#define VSC8244_INTR_DOWNSHIFT 0x0020
+//#define VSC8244_INTR_MDI_XOVER_CHNG 0x0040
+//#define VSC8244_INTR_FIFO_OVER_UNDER 0x0080
+#define VSC8244_INTR_BIT4 0x0010
+#define VSC8244_INTR_FIFO_RX 0x0020
+#define VSC8244_INTR_FIFO_OVER_UNDER 0x0040
+#define VSC8244_INTR_LOCK_LOST 0x0080
+//#define VSC8244_INTR_FALSE_CARRIER 0x0100
+//#define VSC8244_INTR_SYMBOL_ERROR 0x0200
+//#define VSC8244_INTR_LINK_CHNG 0x0400
+//#define VSC8244_INTR_AUTONEG_DONE 0x0800
+#define VSC8244_INTR_SYMBOL_ERROR 0x0100
+#define VSC8244_INTR_ENG_DETECT_CHNG 0x0200
+#define VSC8244_INTR_AUTONEG_DONE 0x0400
+#define VSC8244_INTR_AUTONEG_ERR 0x0800
+//#define VSC8244_INTR_PAGE_RECV 0x1000
+//#define VSC8244_INTR_DUPLEX_CHNG 0x2000
+//#define VSC8244_INTR_SPEED_CHNG 0x4000
+//#define VSC8244_INTR_AUTONEG_ERR 0x8000
+#define VSC8244_INTR_DUPLEX_CHNG 0x1000
+#define VSC8244_INTR_LINK_CHNG 0x2000
+#define VSC8244_INTR_SPEED_CHNG 0x4000
+#define VSC8244_INTR_STATUS 0x8000
+#endif
+
+
+/* Vitesse PHY specific registers. */
+#define VSC8244_SPECIFIC_CNTRL_REGISTER 16
+#define VSC8244_SPECIFIC_STATUS_REGISTER 0x1c
+#define VSC8244_INTERRUPT_ENABLE_REGISTER 0x19
+#define VSC8244_INTERRUPT_STATUS_REGISTER 0x1a
+#define VSC8244_EXT_PHY_SPECIFIC_CNTRL_REGISTER 20
+#define VSC8244_RECV_ERR_CNTR_REGISTER 21
+#define VSC8244_RES_REGISTER 22
+#define VSC8244_GLOBAL_STATUS_REGISTER 23
+#define VSC8244_LED_CONTROL_REGISTER 24
+#define VSC8244_MANUAL_LED_OVERRIDE_REGISTER 25
+#define VSC8244_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER 26
+#define VSC8244_EXT_PHY_SPECIFIC_STATUS_REGISTER 27
+#define VSC8244_VIRTUAL_CABLE_TESTER_REGISTER 28
+#define VSC8244_EXTENDED_ADDR_REGISTER 29
+#define VSC8244_EXTENDED_REGISTER 30
+
+/* PHY specific control register fields */
+#define S_PSCR_MDI_XOVER_MODE 5
+#define M_PSCR_MDI_XOVER_MODE 0x3
+#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE)
+#define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE)
+
+/* Extended PHY specific control register fields */
+#define S_DOWNSHIFT_ENABLE 8
+#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE)
+
+#define S_DOWNSHIFT_CNT 9
+#define M_DOWNSHIFT_CNT 0x7
+#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT)
+#define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT)
+
+/* PHY specific status register fields */
+#define S_PSSR_JABBER 0
+#define V_PSSR_JABBER (1 << S_PSSR_JABBER)
+
+#define S_PSSR_POLARITY 1
+#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY)
+
+#define S_PSSR_RX_PAUSE 2
+#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE)
+
+#define S_PSSR_TX_PAUSE 3
+#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE)
+
+#define S_PSSR_ENERGY_DETECT 4
+#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT)
+
+#define S_PSSR_DOWNSHIFT_STATUS 5
+#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS)
+
+#define S_PSSR_MDI 6
+#define V_PSSR_MDI (1 << S_PSSR_MDI)
+
+#define S_PSSR_CABLE_LEN 7
+#define M_PSSR_CABLE_LEN 0x7
+#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN)
+#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN)
+
+//#define S_PSSR_LINK 10
+//#define S_PSSR_LINK 13
+#define S_PSSR_LINK 2
+#define V_PSSR_LINK (1 << S_PSSR_LINK)
+
+//#define S_PSSR_STATUS_RESOLVED 11
+//#define S_PSSR_STATUS_RESOLVED 10
+#define S_PSSR_STATUS_RESOLVED 15
+#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED)
+
+#define S_PSSR_PAGE_RECEIVED 12
+#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED)
+
+//#define S_PSSR_DUPLEX 13
+//#define S_PSSR_DUPLEX 12
+#define S_PSSR_DUPLEX 5
+#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX)
+
+//#define S_PSSR_SPEED 14
+//#define S_PSSR_SPEED 14
+#define S_PSSR_SPEED 3
+#define M_PSSR_SPEED 0x3
+#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED)
+#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED)
+
+#endif
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 8f514cc0debd..dc3ab3b5c8cb 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -192,6 +192,7 @@
* 04 Aug 2003 macro Converted to the DMA API.
* 14 Aug 2004 macro Fix device names reported.
* 14 Jun 2005 macro Use irqreturn_t.
+ * 23 Oct 2006 macro Big-endian host support.
*/
/* Include files */
@@ -218,8 +219,8 @@
/* Version information string should be updated prior to each new release! */
#define DRV_NAME "defxx"
-#define DRV_VERSION "v1.08"
-#define DRV_RELDATE "2005/06/14"
+#define DRV_VERSION "v1.09"
+#define DRV_RELDATE "2006/10/23"
static char version[] __devinitdata =
DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
@@ -859,6 +860,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
print_name);
return(DFX_K_FAILURE);
}
+ data = cpu_to_le32(data);
memcpy(&bp->factory_mac_addr[0], &data, sizeof(u32));
if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
@@ -867,6 +869,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
print_name);
return(DFX_K_FAILURE);
}
+ data = cpu_to_le32(data);
memcpy(&bp->factory_mac_addr[4], &data, sizeof(u16));
/*
@@ -1085,27 +1088,23 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
}
/*
- * Set base address of Descriptor Block and bring adapter to DMA_AVAILABLE state
+ * Set the base address of Descriptor Block and bring adapter
+ * to DMA_AVAILABLE state.
*
- * Note: We also set the literal and data swapping requirements in this
- * command. Since this driver presently runs on Intel platforms
- * which are Little Endian, we'll tell the adapter to byte swap
- * data only. This code will need to change when we support
- * Big Endian systems (eg. PowerPC).
+ * Note: We also set the literal and data swapping requirements
+ * in this command.
*
- * Assumption: 32-bit physical address of descriptor block is 8Kbyte
- * aligned. That is, bits 0-12 of the address must be zero.
+ * Assumption: 32-bit physical address of descriptor block
+ * is 8Kbyte aligned.
*/
-
- if (dfx_hw_port_ctrl_req(bp,
- PI_PCTRL_M_INIT,
- (u32) (bp->descr_block_phys | PI_PDATA_A_INIT_M_BSWAP_DATA),
- 0,
- NULL) != DFX_K_SUCCESS)
- {
- printk("%s: Could not set descriptor block address!\n", bp->dev->name);
- return(DFX_K_FAILURE);
- }
+ if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
+ (u32)(bp->descr_block_phys |
+ PI_PDATA_A_INIT_M_BSWAP_INIT),
+ 0, NULL) != DFX_K_SUCCESS) {
+ printk("%s: Could not set descriptor block address!\n",
+ bp->dev->name);
+ return DFX_K_FAILURE;
+ }
/* Set transmit flush timeout value */
diff --git a/drivers/net/defxx.h b/drivers/net/defxx.h
index 8b1e9a11ca21..2ce8f97253eb 100644
--- a/drivers/net/defxx.h
+++ b/drivers/net/defxx.h
@@ -25,6 +25,7 @@
* macros to DEFXX.C.
* 12-Sep-96 LVS Removed packet request header pointers.
* 04 Aug 2003 macro Converted to the DMA API.
+ * 23 Oct 2006 macro Big-endian host support.
*/
#ifndef _DEFXX_H_
@@ -1344,7 +1345,7 @@ typedef struct
/* Register definition structures are defined for both big and little endian systems */
-#ifndef BIG_ENDIAN
+#ifndef __BIG_ENDIAN
/* Little endian format of Type 1 Producer register */
@@ -1402,7 +1403,11 @@ typedef union
} index;
} PI_TYPE_2_CONSUMER;
-#else
+/* Define swapping required by DMA transfers. */
+#define PI_PDATA_A_INIT_M_BSWAP_INIT \
+ (PI_PDATA_A_INIT_M_BSWAP_DATA)
+
+#else /* __BIG_ENDIAN */
/* Big endian format of Type 1 Producer register */
@@ -1460,7 +1465,11 @@ typedef union
} index;
} PI_TYPE_2_CONSUMER;
-#endif /* #ifndef BIG_ENDIAN */
+/* Define swapping required by DMA transfers. */
+#define PI_PDATA_A_INIT_M_BSWAP_INIT \
+ (PI_PDATA_A_INIT_M_BSWAP_DATA | PI_PDATA_A_INIT_M_BSWAP_LITERAL)
+
+#endif /* __BIG_ENDIAN */
/* Define EISA controller register offsets */
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index f87f6e3dc721..5113eef755b9 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1252,24 +1252,22 @@ static void set_multicast_list(struct net_device *dev)
struct depca_private *lp = (struct depca_private *) dev->priv;
u_long ioaddr = dev->base_addr;
- if (dev) {
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
+ netif_stop_queue(dev);
+ while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
- if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */
- lp->init_block.mode |= PROM;
- } else {
- SetMulticastFilter(dev);
- lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */
- }
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */
+ lp->init_block.mode |= PROM;
+ } else {
+ SetMulticastFilter(dev);
+ lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */
}
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ netif_start_queue(dev); /* Unlock the TX ring */
}
/*
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 7ecce438d258..f091042b146e 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -59,6 +59,9 @@
#include <linux/capability.h>
#include <linux/in.h>
#include <linux/ip.h>
+#ifdef NETIF_F_TSO6
+#include <linux/ipv6.h>
+#endif
#include <linux/tcp.h>
#include <linux/udp.h>
#include <net/pkt_sched.h>
@@ -254,6 +257,17 @@ struct e1000_adapter {
spinlock_t tx_queue_lock;
#endif
atomic_t irq_sem;
+ unsigned int detect_link;
+ unsigned int total_tx_bytes;
+ unsigned int total_tx_packets;
+ unsigned int total_rx_bytes;
+ unsigned int total_rx_packets;
+ /* Interrupt Throttle Rate */
+ uint32_t itr;
+ uint32_t itr_setting;
+ uint16_t tx_itr;
+ uint16_t rx_itr;
+
struct work_struct reset_task;
uint8_t fc_autoneg;
@@ -262,6 +276,7 @@ struct e1000_adapter {
/* TX */
struct e1000_tx_ring *tx_ring; /* One per active queue */
+ unsigned int restart_queue;
unsigned long tx_queue_len;
uint32_t txd_cmd;
uint32_t tx_int_delay;
@@ -310,8 +325,6 @@ struct e1000_adapter {
uint64_t gorcl_old;
uint16_t rx_ps_bsize0;
- /* Interrupt Throttle Rate */
- uint32_t itr;
/* OS defined structs */
struct net_device *netdev;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c564adbd669b..da459f7177c6 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -85,6 +85,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "tx_single_coll_ok", E1000_STAT(stats.scc) },
{ "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
{ "tx_timeout_count", E1000_STAT(tx_timeout_count) },
+ { "tx_restart_queue", E1000_STAT(restart_queue) },
{ "rx_long_length_errors", E1000_STAT(stats.roc) },
{ "rx_short_length_errors", E1000_STAT(stats.ruc) },
{ "rx_align_errors", E1000_STAT(stats.algnerrc) },
@@ -133,9 +134,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if (hw->autoneg == 1) {
ecmd->advertising |= ADVERTISED_Autoneg;
-
/* the e1000 autoneg seems to match ethtool nicely */
-
ecmd->advertising |= hw->autoneg_advertised;
}
@@ -285,7 +284,7 @@ e1000_set_pauseparam(struct net_device *netdev,
e1000_reset(adapter);
} else
retval = ((hw->media_type == e1000_media_type_fiber) ?
- e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+ e1000_setup_link(hw) : e1000_force_mac_fc(hw));
clear_bit(__E1000_RESETTING, &adapter->flags);
return retval;
@@ -350,6 +349,13 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
else
netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+ if (data)
+ netdev->features |= NETIF_F_TSO6;
+ else
+ netdev->features &= ~NETIF_F_TSO6;
+#endif
+
DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->tso_force = TRUE;
return 0;
@@ -774,7 +780,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
/* The status register is Read Only, so a write should fail.
* Some bits that get toggled are ignored.
*/
- switch (adapter->hw.mac_type) {
+ switch (adapter->hw.mac_type) {
/* there are several bits on newer hardware that are r/w */
case e1000_82571:
case e1000_82572:
@@ -802,12 +808,14 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
}
/* restore previous status */
E1000_WRITE_REG(&adapter->hw, STATUS, before);
+
if (adapter->hw.mac_type != e1000_ich8lan) {
REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
}
+
REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
@@ -820,8 +828,9 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
+
before = (adapter->hw.mac_type == e1000_ich8lan ?
- 0x06C3B33E : 0x06DFB3FE);
+ 0x06C3B33E : 0x06DFB3FE);
REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
@@ -834,10 +843,10 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
value = (adapter->hw.mac_type == e1000_ich8lan ?
- E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
+ E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
for (i = 0; i < value; i++) {
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
- 0xFFFFFFFF);
+ 0xFFFFFFFF);
}
} else {
@@ -883,8 +892,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
}
static irqreturn_t
-e1000_test_intr(int irq,
- void *data)
+e1000_test_intr(int irq, void *data)
{
struct net_device *netdev = (struct net_device *) data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -905,11 +913,11 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
/* NOTE: we don't test MSI interrupts here, yet */
/* Hook up test interrupt handler just for this test */
- if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED,
- netdev->name, netdev))
+ if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+ netdev))
shared_int = FALSE;
else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
*data = 1;
return -1;
}
@@ -925,6 +933,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
if (adapter->hw.mac_type == e1000_ich8lan && i == 8)
continue;
+
/* Interrupt to test */
mask = 1 << i;
@@ -1674,7 +1683,7 @@ e1000_diag_test(struct net_device *netdev,
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- /* Offline tests aren't run; pass by default */
+ /* Online tests aren't run; pass by default */
data[0] = 0;
data[1] = 0;
data[2] = 0;
@@ -1717,6 +1726,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol
retval = 0;
break;
case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
/* quad port adapters only support WoL on port A */
if (!adapter->quad_port_a) {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 796c4f7d4260..3655d902b0bd 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -385,6 +385,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82571EB_FIBER:
case E1000_DEV_ID_82571EB_SERDES:
case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
hw->mac_type = e1000_82571;
break;
case E1000_DEV_ID_82572EI_COPPER:
@@ -408,6 +409,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_ICH8_IGP_AMT:
case E1000_DEV_ID_ICH8_IGP_C:
case E1000_DEV_ID_ICH8_IFE:
+ case E1000_DEV_ID_ICH8_IFE_GT:
+ case E1000_DEV_ID_ICH8_IFE_G:
case E1000_DEV_ID_ICH8_IGP_M:
hw->mac_type = e1000_ich8lan;
break;
@@ -2367,6 +2370,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* Need to reset the PHY or these changes will be ignored */
mii_ctrl_reg |= MII_CR_RESET;
+
/* Disable MDI-X support for 10/100 */
} else if (hw->phy_type == e1000_phy_ife) {
ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
@@ -2379,6 +2383,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
if (ret_val)
return ret_val;
+
} else {
/* Clear Auto-Crossover to force MDI manually. IGP requires MDI
* forced whenever speed or duplex are forced.
@@ -3940,14 +3945,15 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw)
E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
- /* Write VR power-down enable */
+ /* Write VR power-down enable - bits 9:8 should be 10b */
e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
- e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data |
- IGP3_VR_CTRL_MODE_SHUT);
+ phy_data |= (1 << 9);
+ phy_data &= ~(1 << 8);
+ e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data);
/* Read it back and test */
e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
- if ((phy_data & IGP3_VR_CTRL_MODE_SHUT) || retry)
+ if (((phy_data & IGP3_VR_CTRL_MODE_MASK) == IGP3_VR_CTRL_MODE_SHUT) || retry)
break;
/* Issue PHY reset and repeat at most one more time */
@@ -4549,7 +4555,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
case e1000_ich8lan:
{
int32_t i = 0;
- uint32_t flash_size = E1000_READ_ICH8_REG(hw, ICH8_FLASH_GFPREG);
+ uint32_t flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
eeprom->type = e1000_eeprom_ich8;
eeprom->use_eerd = FALSE;
@@ -4565,12 +4571,14 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
}
}
- hw->flash_base_addr = (flash_size & ICH8_GFPREG_BASE_MASK) *
- ICH8_FLASH_SECTOR_SIZE;
+ hw->flash_base_addr = (flash_size & ICH_GFPREG_BASE_MASK) *
+ ICH_FLASH_SECTOR_SIZE;
+
+ hw->flash_bank_size = ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
+ hw->flash_bank_size -= (flash_size & ICH_GFPREG_BASE_MASK);
+
+ hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
- hw->flash_bank_size = ((flash_size >> 16) & ICH8_GFPREG_BASE_MASK) + 1;
- hw->flash_bank_size -= (flash_size & ICH8_GFPREG_BASE_MASK);
- hw->flash_bank_size *= ICH8_FLASH_SECTOR_SIZE;
hw->flash_bank_size /= 2 * sizeof(uint16_t);
break;
@@ -5620,8 +5628,8 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
* signature is valid. We want to do this after the write
* has completed so that we don't mark the segment valid
* while the write is still in progress */
- if (i == E1000_ICH8_NVM_SIG_WORD)
- high_byte = E1000_ICH8_NVM_SIG_MASK | high_byte;
+ if (i == E1000_ICH_NVM_SIG_WORD)
+ high_byte = E1000_ICH_NVM_SIG_MASK | high_byte;
error = e1000_verify_write_ich8_byte(hw,
(i << 1) + new_bank_offset + 1, high_byte);
@@ -5643,18 +5651,18 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
* erase as well since these bits are 11 to start with
* and we need to change bit 14 to 0b */
e1000_read_ich8_byte(hw,
- E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+ E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
&high_byte);
high_byte &= 0xBF;
error = e1000_verify_write_ich8_byte(hw,
- E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
+ E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
/* And invalidate the previously valid segment by setting
* its signature word (0x13) high_byte to 0b. This can be
* done without an erase because flash erase sets all bits
* to 1's. We can write 1's to 0's without an erase */
if (error == E1000_SUCCESS) {
error = e1000_verify_write_ich8_byte(hw,
- E1000_ICH8_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
+ E1000_ICH_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
}
/* Clear the now not used entry in the cache */
@@ -5841,6 +5849,7 @@ e1000_mta_set(struct e1000_hw *hw,
hash_reg = (hash_value >> 5) & 0x7F;
if (hw->mac_type == e1000_ich8lan)
hash_reg &= 0x1F;
+
hash_bit = hash_value & 0x1F;
mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
@@ -6026,6 +6035,7 @@ e1000_id_led_init(struct e1000_hw * hw)
else
eeprom_data = ID_LED_DEFAULT;
}
+
for (i = 0; i < 4; i++) {
temp = (eeprom_data >> (i << 2)) & led_mask;
switch (temp) {
@@ -8486,7 +8496,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
DEBUGFUNC("e1000_ich8_cycle_init");
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
/* May be check the Flash Des Valid bit in Hw status */
if (hsfsts.hsf_status.fldesvalid == 0) {
@@ -8499,7 +8509,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
hsfsts.hsf_status.flcerr = 1;
hsfsts.hsf_status.dael = 1;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
/* Either we should have a hardware SPI cycle in progress bit to check
* against, in order to start a new cycle or FDONE bit should be changed
@@ -8514,13 +8524,13 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
/* There is no cycle running at present, so we can start a cycle */
/* Begin by setting Flash Cycle Done. */
hsfsts.hsf_status.flcdone = 1;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
error = E1000_SUCCESS;
} else {
/* otherwise poll for sometime so the current cycle has a chance
* to end before giving up. */
- for (i = 0; i < ICH8_FLASH_COMMAND_TIMEOUT; i++) {
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcinprog == 0) {
error = E1000_SUCCESS;
break;
@@ -8531,7 +8541,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
/* Successful in waiting for previous cycle to timeout,
* now set the Flash Cycle Done. */
hsfsts.hsf_status.flcdone = 1;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
} else {
DEBUGOUT("Flash controller busy, cannot get access");
}
@@ -8553,13 +8563,13 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
uint32_t i = 0;
/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
- hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcgo = 1;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
/* wait till FDONE bit is set to 1 */
do {
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcdone == 1)
break;
udelay(1);
@@ -8593,10 +8603,10 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
DEBUGFUNC("e1000_read_ich8_data");
if (size < 1 || size > 2 || data == 0x0 ||
- index > ICH8_FLASH_LINEAR_ADDR_MASK)
+ index > ICH_FLASH_LINEAR_ADDR_MASK)
return error;
- flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+ flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
hw->flash_base_addr;
do {
@@ -8606,25 +8616,25 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
if (error != E1000_SUCCESS)
break;
- hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
hsflctl.hsf_ctrl.fldbcount = size - 1;
- hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_READ;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
/* Write the last 24 bits of index into Flash Linear address field in
* Flash Address */
/* TODO: TBD maybe check the index against the size of flash */
- E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
- error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+ error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
/* Check if FCERR is set to 1, if set to 1, clear it and try the whole
* sequence a few more times, else read in (shift in) the Flash Data0,
* the order is least significant byte first msb to lsb */
if (error == E1000_SUCCESS) {
- flash_data = E1000_READ_ICH8_REG(hw, ICH8_FLASH_FDATA0);
+ flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
if (size == 1) {
*data = (uint8_t)(flash_data & 0x000000FF);
} else if (size == 2) {
@@ -8634,9 +8644,9 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
} else {
/* If we've gotten here, then things are probably completely hosed,
* but if the error condition is detected, it won't hurt to give
- * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+ * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
*/
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcerr == 1) {
/* Repeat for some time before giving up. */
continue;
@@ -8645,7 +8655,7 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
break;
}
}
- } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
return error;
}
@@ -8672,10 +8682,10 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
DEBUGFUNC("e1000_write_ich8_data");
if (size < 1 || size > 2 || data > size * 0xff ||
- index > ICH8_FLASH_LINEAR_ADDR_MASK)
+ index > ICH_FLASH_LINEAR_ADDR_MASK)
return error;
- flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+ flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
hw->flash_base_addr;
do {
@@ -8685,34 +8695,34 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
if (error != E1000_SUCCESS)
break;
- hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
hsflctl.hsf_ctrl.fldbcount = size -1;
- hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_WRITE;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
/* Write the last 24 bits of index into Flash Linear address field in
* Flash Address */
- E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
if (size == 1)
flash_data = (uint32_t)data & 0x00FF;
else
flash_data = (uint32_t)data;
- E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FDATA0, flash_data);
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
/* check if FCERR is set to 1 , if set to 1, clear it and try the whole
* sequence a few more times else done */
- error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+ error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
if (error == E1000_SUCCESS) {
break;
} else {
/* If we're here, then things are most likely completely hosed,
* but if the error condition is detected, it won't hurt to give
- * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+ * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
*/
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcerr == 1) {
/* Repeat for some time before giving up. */
continue;
@@ -8721,7 +8731,7 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
break;
}
}
- } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
return error;
}
@@ -8840,7 +8850,7 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
int32_t j = 0;
int32_t error_flag = 0;
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
/* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
/* 00: The Hw sector is 256 bytes, hence we need to erase 16
@@ -8853,19 +8863,14 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
* 11: The Hw sector size is 64K bytes */
if (hsfsts.hsf_status.berasesz == 0x0) {
/* Hw sector size 256 */
- sub_sector_size = ICH8_FLASH_SEG_SIZE_256;
- bank_size = ICH8_FLASH_SECTOR_SIZE;
- iteration = ICH8_FLASH_SECTOR_SIZE / ICH8_FLASH_SEG_SIZE_256;
+ sub_sector_size = ICH_FLASH_SEG_SIZE_256;
+ bank_size = ICH_FLASH_SECTOR_SIZE;
+ iteration = ICH_FLASH_SECTOR_SIZE / ICH_FLASH_SEG_SIZE_256;
} else if (hsfsts.hsf_status.berasesz == 0x1) {
- bank_size = ICH8_FLASH_SEG_SIZE_4K;
- iteration = 1;
- } else if (hw->mac_type != e1000_ich8lan &&
- hsfsts.hsf_status.berasesz == 0x2) {
- /* 8K erase size invalid for ICH8 - added in for ICH9 */
- bank_size = ICH9_FLASH_SEG_SIZE_8K;
+ bank_size = ICH_FLASH_SEG_SIZE_4K;
iteration = 1;
} else if (hsfsts.hsf_status.berasesz == 0x3) {
- bank_size = ICH8_FLASH_SEG_SIZE_64K;
+ bank_size = ICH_FLASH_SEG_SIZE_64K;
iteration = 1;
} else {
return error;
@@ -8883,9 +8888,9 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
/* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
* Control */
- hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
- hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_ERASE;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
/* Write the last 24 bits of an index within the block into Flash
* Linear address field in Flash Address. This probably needs to
@@ -8893,17 +8898,17 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
* the software bank size (4, 8 or 64 KBytes) */
flash_linear_address = bank * bank_size + j * sub_sector_size;
flash_linear_address += hw->flash_base_addr;
- flash_linear_address &= ICH8_FLASH_LINEAR_ADDR_MASK;
+ flash_linear_address &= ICH_FLASH_LINEAR_ADDR_MASK;
- E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
- error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_ERASE_TIMEOUT);
+ error = e1000_ich8_flash_cycle(hw, ICH_FLASH_ERASE_TIMEOUT);
/* Check if FCERR is set to 1. If 1, clear it and try the whole
* sequence a few more times else Done */
if (error == E1000_SUCCESS) {
break;
} else {
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcerr == 1) {
/* repeat for some time before giving up */
continue;
@@ -8912,7 +8917,7 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
break;
}
}
- } while ((count < ICH8_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
+ } while ((count < ICH_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
if (error_flag == 1)
break;
}
@@ -9013,5 +9018,3 @@ e1000_init_lcd_from_nvm(struct e1000_hw *hw)
return E1000_SUCCESS;
}
-
-
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 449a60303e07..3321fb13bfa9 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -128,11 +128,13 @@ typedef enum {
/* PCI bus widths */
typedef enum {
e1000_bus_width_unknown = 0,
+ /* These PCIe values should literally match the possible return values
+ * from config space */
+ e1000_bus_width_pciex_1 = 1,
+ e1000_bus_width_pciex_2 = 2,
+ e1000_bus_width_pciex_4 = 4,
e1000_bus_width_32,
e1000_bus_width_64,
- e1000_bus_width_pciex_1,
- e1000_bus_width_pciex_2,
- e1000_bus_width_pciex_4,
e1000_bus_width_reserved
} e1000_bus_width;
@@ -326,6 +328,7 @@ int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
int32_t e1000_phy_reset(struct e1000_hw *hw);
int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
+
void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
/* EEPROM Functions */
@@ -390,7 +393,6 @@ int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
uint16_t length);
boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
-
int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw);
int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
@@ -473,6 +475,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82571EB_FIBER 0x105F
#define E1000_DEV_ID_82571EB_SERDES 0x1060
#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC
#define E1000_DEV_ID_82572EI_COPPER 0x107D
#define E1000_DEV_ID_82572EI_FIBER 0x107E
#define E1000_DEV_ID_82572EI_SERDES 0x107F
@@ -490,6 +493,8 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
#define E1000_DEV_ID_ICH8_IGP_C 0x104B
#define E1000_DEV_ID_ICH8_IFE 0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
#define E1000_DEV_ID_ICH8_IGP_M 0x104D
@@ -576,6 +581,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
* E1000_RAR_ENTRIES - 1 multicast addresses.
*/
#define E1000_RAR_ENTRIES 15
+
#define E1000_RAR_ENTRIES_ICH8LAN 6
#define MIN_NUMBER_OF_DESCRIPTORS 8
@@ -1335,9 +1341,9 @@ struct e1000_hw_stats {
uint64_t gotch;
uint64_t rnbc;
uint64_t ruc;
+ uint64_t rfc;
uint64_t roc;
uint64_t rlerrc;
- uint64_t rfc;
uint64_t rjc;
uint64_t mgprc;
uint64_t mgpdc;
@@ -1577,8 +1583,8 @@ struct e1000_hw {
#define E1000_HICR_FW_RESET 0xC0
#define E1000_SHADOW_RAM_WORDS 2048
-#define E1000_ICH8_NVM_SIG_WORD 0x13
-#define E1000_ICH8_NVM_SIG_MASK 0xC0
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC0
/* EEPROM Read */
#define E1000_EERD_START 0x00000001 /* Start Read */
@@ -3172,6 +3178,7 @@ struct e1000_host_command_info {
#define IGP3_VR_CTRL \
PHY_REG(776, 18) /* Voltage regulator control register */
#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */
+#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */
#define IGP3_CAPABILITY \
PHY_REG(776, 19) /* IGP3 Capability Register */
@@ -3256,41 +3263,40 @@ struct e1000_host_command_info {
#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
-#define ICH8_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */
-#define ICH8_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */
-#define ICH8_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */
-#define ICH8_FLASH_SEG_SIZE_256 256
-#define ICH8_FLASH_SEG_SIZE_4K 4096
-#define ICH9_FLASH_SEG_SIZE_8K 8192
-#define ICH8_FLASH_SEG_SIZE_64K 65536
-
-#define ICH8_CYCLE_READ 0x0
-#define ICH8_CYCLE_RESERVED 0x1
-#define ICH8_CYCLE_WRITE 0x2
-#define ICH8_CYCLE_ERASE 0x3
-
-#define ICH8_FLASH_GFPREG 0x0000
-#define ICH8_FLASH_HSFSTS 0x0004
-#define ICH8_FLASH_HSFCTL 0x0006
-#define ICH8_FLASH_FADDR 0x0008
-#define ICH8_FLASH_FDATA0 0x0010
-#define ICH8_FLASH_FRACC 0x0050
-#define ICH8_FLASH_FREG0 0x0054
-#define ICH8_FLASH_FREG1 0x0058
-#define ICH8_FLASH_FREG2 0x005C
-#define ICH8_FLASH_FREG3 0x0060
-#define ICH8_FLASH_FPR0 0x0074
-#define ICH8_FLASH_FPR1 0x0078
-#define ICH8_FLASH_SSFSTS 0x0090
-#define ICH8_FLASH_SSFCTL 0x0092
-#define ICH8_FLASH_PREOP 0x0094
-#define ICH8_FLASH_OPTYPE 0x0096
-#define ICH8_FLASH_OPMENU 0x0098
-
-#define ICH8_FLASH_REG_MAPSIZE 0x00A0
-#define ICH8_FLASH_SECTOR_SIZE 4096
-#define ICH8_GFPREG_BASE_MASK 0x1FFF
-#define ICH8_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */
+#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+#define ICH_CYCLE_READ 0x0
+#define ICH_CYCLE_RESERVED 0x1
+#define ICH_CYCLE_WRITE 0x2
+#define ICH_CYCLE_ERASE 0x3
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+#define ICH_FLASH_FRACC 0x0050
+#define ICH_FLASH_FREG0 0x0054
+#define ICH_FLASH_FREG1 0x0058
+#define ICH_FLASH_FREG2 0x005C
+#define ICH_FLASH_FREG3 0x0060
+#define ICH_FLASH_FPR0 0x0074
+#define ICH_FLASH_FPR1 0x0078
+#define ICH_FLASH_SSFSTS 0x0090
+#define ICH_FLASH_SSFCTL 0x0092
+#define ICH_FLASH_PREOP 0x0094
+#define ICH_FLASH_OPTYPE 0x0096
+#define ICH_FLASH_OPMENU 0x0098
+
+#define ICH_FLASH_REG_MAPSIZE 0x00A0
+#define ICH_FLASH_SECTOR_SIZE 4096
+#define ICH_GFPREG_BASE_MASK 0x1FFF
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 726ec5e88ab2..7a0828869ecf 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -27,6 +27,7 @@
*******************************************************************************/
#include "e1000.h"
+#include <net/ip6_checksum.h>
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -35,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "7.2.9-k4"DRIVERNAPI
+#define DRV_VERSION "7.3.15-k2"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -103,6 +104,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x10B9),
INTEL_E1000_ETHERNET_DEVICE(0x10BA),
INTEL_E1000_ETHERNET_DEVICE(0x10BB),
+ INTEL_E1000_ETHERNET_DEVICE(0x10BC),
+ INTEL_E1000_ETHERNET_DEVICE(0x10C4),
+ INTEL_E1000_ETHERNET_DEVICE(0x10C5),
/* required last entry */
{0,}
};
@@ -154,6 +158,9 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
static int e1000_set_mac(struct net_device *netdev, void *p);
static irqreturn_t e1000_intr(int irq, void *data);
+#ifdef CONFIG_PCI_MSI
+static irqreturn_t e1000_intr_msi(int irq, void *data);
+#endif
static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring);
#ifdef CONFIG_E1000_NAPI
@@ -285,7 +292,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
flags = IRQF_SHARED;
#ifdef CONFIG_PCI_MSI
- if (adapter->hw.mac_type > e1000_82547_rev_2) {
+ if (adapter->hw.mac_type >= e1000_82571) {
adapter->have_msi = TRUE;
if ((err = pci_enable_msi(adapter->pdev))) {
DPRINTK(PROBE, ERR,
@@ -293,8 +300,14 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
adapter->have_msi = FALSE;
}
}
- if (adapter->have_msi)
+ if (adapter->have_msi) {
flags &= ~IRQF_SHARED;
+ err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags,
+ netdev->name, netdev);
+ if (err)
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate interrupt Error: %d\n", err);
+ } else
#endif
if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
netdev->name, netdev)))
@@ -375,7 +388,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
* e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded. For AMT version (only with 82573) i
- * of the f/w this means that the netowrk i/f is closed.
+ * of the f/w this means that the network i/f is closed.
*
**/
@@ -416,7 +429,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
* e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that
* the driver is loaded. For AMT version (only with 82573)
- * of the f/w this means that the netowrk i/f is open.
+ * of the f/w this means that the network i/f is open.
*
**/
@@ -426,6 +439,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
uint32_t ctrl_ext;
uint32_t swsm;
uint32_t extcnf;
+
/* Let firmware know the driver has taken over */
switch (adapter->hw.mac_type) {
case e1000_82571:
@@ -601,9 +615,6 @@ void
e1000_reset(struct e1000_adapter *adapter)
{
uint32_t pba, manc;
-#ifdef DISABLE_MULR
- uint32_t tctl;
-#endif
uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
/* Repartition Pba for greater than 9k mtu
@@ -670,12 +681,7 @@ e1000_reset(struct e1000_adapter *adapter)
e1000_reset_hw(&adapter->hw);
if (adapter->hw.mac_type >= e1000_82544)
E1000_WRITE_REG(&adapter->hw, WUC, 0);
-#ifdef DISABLE_MULR
- /* disable Multiple Reads in Transmit Control Register for debugging */
- tctl = E1000_READ_REG(hw, TCTL);
- E1000_WRITE_REG(hw, TCTL, tctl & ~E1000_TCTL_MULR);
-#endif
if (e1000_init_hw(&adapter->hw))
DPRINTK(PROBE, ERR, "Hardware Error\n");
e1000_update_mng_vlan(adapter);
@@ -851,9 +857,9 @@ e1000_probe(struct pci_dev *pdev,
(adapter->hw.mac_type != e1000_82547))
netdev->features |= NETIF_F_TSO;
-#ifdef NETIF_F_TSO_IPV6
+#ifdef NETIF_F_TSO6
if (adapter->hw.mac_type > e1000_82547_rev_2)
- netdev->features |= NETIF_F_TSO_IPV6;
+ netdev->features |= NETIF_F_TSO6;
#endif
#endif
if (pci_using_dac)
@@ -968,6 +974,7 @@ e1000_probe(struct pci_dev *pdev,
break;
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
@@ -1279,12 +1286,10 @@ e1000_open(struct net_device *netdev)
return -EBUSY;
/* allocate transmit descriptors */
-
if ((err = e1000_setup_all_tx_resources(adapter)))
goto err_setup_tx;
/* allocate receive descriptors */
-
if ((err = e1000_setup_all_rx_resources(adapter)))
goto err_setup_rx;
@@ -1569,6 +1574,8 @@ e1000_configure_tx(struct e1000_adapter *adapter)
if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
tarc = E1000_READ_REG(hw, TARC0);
+ /* set the speed mode bit, we'll clear it if we're not at
+ * gigabit link later */
tarc |= (1 << 21);
E1000_WRITE_REG(hw, TARC0, tarc);
} else if (hw->mac_type == e1000_80003es2lan) {
@@ -1583,8 +1590,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
e1000_config_collision_dist(hw);
/* Setup Transmit Descriptor Settings for eop descriptor */
- adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
- E1000_TXD_CMD_IFCS;
+ adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+ /* only set IDE if we are delaying interrupts using the timers */
+ if (adapter->tx_int_delay)
+ adapter->txd_cmd |= E1000_TXD_CMD_IDE;
if (hw->mac_type < e1000_82543)
adapter->txd_cmd |= E1000_TXD_CMD_RPS;
@@ -1821,8 +1831,11 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
/* Configure extra packet-split registers */
rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
- /* disable IPv6 packet split support */
- rfctl |= E1000_RFCTL_IPV6_DIS;
+ /* disable packet split support for IPv6 extension headers,
+ * because some malformed IPv6 headers can hang the RX */
+ rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
+ E1000_RFCTL_NEW_IPV6_EXT_DIS);
+
E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
rctl |= E1000_RCTL_DTYP_PS;
@@ -1885,7 +1898,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
if (hw->mac_type >= e1000_82540) {
E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
- if (adapter->itr > 1)
+ if (adapter->itr_setting != 0)
E1000_WRITE_REG(hw, ITR,
1000000000 / (adapter->itr * 256));
}
@@ -1895,11 +1908,11 @@ e1000_configure_rx(struct e1000_adapter *adapter)
/* Reset delay timers after every interrupt */
ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
#ifdef CONFIG_E1000_NAPI
- /* Auto-Mask interrupts upon ICR read. */
+ /* Auto-Mask interrupts upon ICR access */
ctrl_ext |= E1000_CTRL_EXT_IAME;
+ E1000_WRITE_REG(hw, IAM, 0xffffffff);
#endif
E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
- E1000_WRITE_REG(hw, IAM, ~0);
E1000_WRITE_FLUSH(hw);
}
@@ -1938,6 +1951,12 @@ e1000_configure_rx(struct e1000_adapter *adapter)
E1000_WRITE_REG(hw, RXCSUM, rxcsum);
}
+ /* enable early receives on 82573, only takes effect if using > 2048
+ * byte total frame size. for example only for jumbo frames */
+#define E1000_ERT_2048 0x100
+ if (hw->mac_type == e1000_82573)
+ E1000_WRITE_REG(hw, ERT, E1000_ERT_2048);
+
/* Enable Receives */
E1000_WRITE_REG(hw, RCTL, rctl);
}
@@ -1991,10 +2010,13 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
}
- if (buffer_info->skb)
+ if (buffer_info->skb) {
dev_kfree_skb_any(buffer_info->skb);
- memset(buffer_info, 0, sizeof(struct e1000_buffer));
+ buffer_info->skb = NULL;
+ }
+ /* buffer_info must be completely set up in the transmit path */
}
/**
@@ -2418,6 +2440,7 @@ e1000_watchdog(unsigned long data)
DPRINTK(LINK, INFO,
"Gigabit has been disabled, downgrading speed\n");
}
+
if (adapter->hw.mac_type == e1000_82573) {
e1000_enable_tx_pkt_filtering(&adapter->hw);
if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
@@ -2462,13 +2485,12 @@ e1000_watchdog(unsigned long data)
if ((adapter->hw.mac_type == e1000_82571 ||
adapter->hw.mac_type == e1000_82572) &&
txb2b == 0) {
-#define SPEED_MODE_BIT (1 << 21)
uint32_t tarc0;
tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
- tarc0 &= ~SPEED_MODE_BIT;
+ tarc0 &= ~(1 << 21);
E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
}
-
+
#ifdef NETIF_F_TSO
/* disable TSO for pcie and 10/100 speeds, to avoid
* some hardware issues */
@@ -2480,9 +2502,15 @@ e1000_watchdog(unsigned long data)
DPRINTK(PROBE,INFO,
"10/100 speed: disabling TSO\n");
netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+ netdev->features &= ~NETIF_F_TSO6;
+#endif
break;
case SPEED_1000:
netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+ netdev->features |= NETIF_F_TSO6;
+#endif
break;
default:
/* oops */
@@ -2549,19 +2577,6 @@ e1000_watchdog(unsigned long data)
}
}
- /* Dynamic mode for Interrupt Throttle Rate (ITR) */
- if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
- /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
- * asymmetrical Tx or Rx gets ITR=8000; everyone
- * else is between 2000-8000. */
- uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
- uint32_t dif = (adapter->gotcl > adapter->gorcl ?
- adapter->gotcl - adapter->gorcl :
- adapter->gorcl - adapter->gotcl) / 10000;
- uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
- E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
- }
-
/* Cause software interrupt to ensure rx ring is cleaned */
E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
@@ -2577,6 +2592,135 @@ e1000_watchdog(unsigned long data)
mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
}
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * this functionality is controlled by the InterruptThrottleRate module
+ * parameter (see e1000_param.c)
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ **/
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+ uint16_t itr_setting,
+ int packets,
+ int bytes)
+{
+ unsigned int retval = itr_setting;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (unlikely(hw->mac_type < e1000_82540))
+ goto update_itr_done;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+
+ switch (itr_setting) {
+ case lowest_latency:
+ if ((packets < 5) && (bytes > 512))
+ retval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ if ((packets < 10) ||
+ ((bytes/packets) > 1200))
+ retval = bulk_latency;
+ else if ((packets > 35))
+ retval = lowest_latency;
+ } else if (packets <= 2 && bytes < 512)
+ retval = lowest_latency;
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ retval = low_latency;
+ } else {
+ if (bytes < 6000)
+ retval = low_latency;
+ }
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ uint16_t current_itr;
+ uint32_t new_itr = adapter->itr;
+
+ if (unlikely(hw->mac_type < e1000_82540))
+ return;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ if (unlikely(adapter->link_speed != SPEED_1000)) {
+ current_itr = 0;
+ new_itr = 4000;
+ goto set_itr_now;
+ }
+
+ adapter->tx_itr = e1000_update_itr(adapter,
+ adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
+ adapter->rx_itr = e1000_update_itr(adapter,
+ adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
+
+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+ /* conservative mode eliminates the lowest_latency setting */
+ if (current_itr == lowest_latency && (adapter->itr_setting == 3))
+ current_itr = low_latency;
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 70000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ new_itr = 4000;
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != adapter->itr) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing */
+ new_itr = new_itr > adapter->itr ?
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
+ adapter->itr = new_itr;
+ E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256));
+ }
+
+ return;
+}
+
#define E1000_TX_FLAGS_CSUM 0x00000001
#define E1000_TX_FLAGS_VLAN 0x00000002
#define E1000_TX_FLAGS_TSO 0x00000004
@@ -2617,7 +2761,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb->h.raw - skb->data - 1;
-#ifdef NETIF_F_TSO_IPV6
+#ifdef NETIF_F_TSO6
} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb->nh.ipv6h->payload_len = 0;
skb->h.th->check =
@@ -2653,6 +2797,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
context_desc->cmd_and_length = cpu_to_le32(cmd_length);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
if (++i == tx_ring->count) i = 0;
tx_ring->next_to_use = i;
@@ -2687,6 +2832,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
if (unlikely(++i == tx_ring->count)) i = 0;
tx_ring->next_to_use = i;
@@ -2755,6 +2901,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
size,
PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
len -= size;
offset += size;
@@ -2794,6 +2941,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
size,
PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
len -= size;
offset += size;
@@ -2859,6 +3007,9 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tdt);
+ /* we need this if more than one processor can write to our tail
+ * at a time, it syncronizes IO on IA64/Altix systems */
+ mmiowb();
}
/**
@@ -2952,6 +3103,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
/* A reprieve! */
netif_start_queue(netdev);
+ ++adapter->restart_queue;
return 0;
}
@@ -3010,9 +3162,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
- /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
- * points to just header, pull a few bytes of payload from
- * frags into skb->data */
+ /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
+ * points to just header, pull a few bytes of payload from
+ * frags into skb->data */
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
switch (adapter->hw.mac_type) {
@@ -3316,12 +3468,12 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.roc += E1000_READ_REG(hw, ROC);
if (adapter->hw.mac_type != e1000_ich8lan) {
- adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
- adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
- adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
- adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
- adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
- adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
+ adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
+ adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
+ adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
+ adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
+ adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
+ adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
}
adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
@@ -3352,12 +3504,12 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.tpr += E1000_READ_REG(hw, TPR);
if (adapter->hw.mac_type != e1000_ich8lan) {
- adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
- adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
- adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
- adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
- adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
- adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
+ adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
+ adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
+ adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
+ adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
+ adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
+ adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
}
adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
@@ -3383,18 +3535,17 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
if (adapter->hw.mac_type != e1000_ich8lan) {
- adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
- adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
- adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
- adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
- adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
- adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
- adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
+ adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
+ adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
+ adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
+ adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
+ adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
+ adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
+ adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
}
}
/* Fill out the OS statistics structure */
-
adapter->net_stats.rx_packets = adapter->stats.gprc;
adapter->net_stats.tx_packets = adapter->stats.gptc;
adapter->net_stats.rx_bytes = adapter->stats.gorcl;
@@ -3426,7 +3577,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
/* Tx Dropped needs to be maintained elsewhere */
/* Phy Stats */
-
if (hw->media_type == e1000_media_type_copper) {
if ((adapter->link_speed == SPEED_1000) &&
(!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
@@ -3442,6 +3592,95 @@ e1000_update_stats(struct e1000_adapter *adapter)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
+#ifdef CONFIG_PCI_MSI
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static
+irqreturn_t e1000_intr_msi(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+#ifndef CONFIG_E1000_NAPI
+ int i;
+#endif
+
+ /* this code avoids the read of ICR but has to get 1000 interrupts
+ * at every link change event before it will notice the change */
+ if (++adapter->detect_link >= 1000) {
+ uint32_t icr = E1000_READ_REG(hw, ICR);
+#ifdef CONFIG_E1000_NAPI
+ /* read ICR disables interrupts using IAM, so keep up with our
+ * enable/disable accounting */
+ atomic_inc(&adapter->irq_sem);
+#endif
+ adapter->detect_link = 0;
+ if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) &&
+ (icr & E1000_ICR_INT_ASSERTED)) {
+ hw->get_link_status = 1;
+ /* 80003ES2LAN workaround--
+ * For packet buffer work-around on link down event;
+ * disable receives here in the ISR and
+ * reset adapter in watchdog
+ */
+ if (netif_carrier_ok(netdev) &&
+ (adapter->hw.mac_type == e1000_80003es2lan)) {
+ /* disable receives */
+ uint32_t rctl = E1000_READ_REG(hw, RCTL);
+ E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ mod_timer(&adapter->watchdog_timer,
+ jiffies + 1);
+ }
+ } else {
+ E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ |
+ E1000_ICR_LSC)));
+ /* bummer we have to flush here, but things break otherwise as
+ * some event appears to be lost or delayed and throughput
+ * drops. In almost all tests this flush is un-necessary */
+ E1000_WRITE_FLUSH(hw);
+#ifdef CONFIG_E1000_NAPI
+ /* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are
+ * masked. No need for the IMC write, but it does mean we
+ * should account for it ASAP. */
+ atomic_inc(&adapter->irq_sem);
+#endif
+ }
+
+#ifdef CONFIG_E1000_NAPI
+ if (likely(netif_rx_schedule_prep(netdev))) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __netif_rx_schedule(netdev);
+ } else
+ e1000_irq_enable(adapter);
+#else
+ adapter->total_tx_bytes = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_packets = 0;
+
+ for (i = 0; i < E1000_MAX_INTR; i++)
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+ break;
+
+ if (likely(adapter->itr_setting & 3))
+ e1000_set_itr(adapter);
+#endif
+
+ return IRQ_HANDLED;
+}
+#endif
/**
* e1000_intr - Interrupt Handler
@@ -3458,7 +3697,17 @@ e1000_intr(int irq, void *data)
uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
#ifndef CONFIG_E1000_NAPI
int i;
-#else
+#endif
+ if (unlikely(!icr))
+ return IRQ_NONE; /* Not our interrupt */
+
+#ifdef CONFIG_E1000_NAPI
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt */
+ if (unlikely(hw->mac_type >= e1000_82571 &&
+ !(icr & E1000_ICR_INT_ASSERTED)))
+ return IRQ_NONE;
+
/* Interrupt Auto-Mask...upon reading ICR,
* interrupts are masked. No need for the
* IMC write, but it does mean we should
@@ -3467,14 +3716,6 @@ e1000_intr(int irq, void *data)
atomic_inc(&adapter->irq_sem);
#endif
- if (unlikely(!icr)) {
-#ifdef CONFIG_E1000_NAPI
- if (hw->mac_type >= e1000_82571)
- e1000_irq_enable(adapter);
-#endif
- return IRQ_NONE; /* Not our interrupt */
- }
-
if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
hw->get_link_status = 1;
/* 80003ES2LAN workaround--
@@ -3495,13 +3736,20 @@ e1000_intr(int irq, void *data)
#ifdef CONFIG_E1000_NAPI
if (unlikely(hw->mac_type < e1000_82571)) {
+ /* disable interrupts, without the synchronize_irq bit */
atomic_inc(&adapter->irq_sem);
E1000_WRITE_REG(hw, IMC, ~0);
E1000_WRITE_FLUSH(hw);
}
- if (likely(netif_rx_schedule_prep(netdev)))
+ if (likely(netif_rx_schedule_prep(netdev))) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
__netif_rx_schedule(netdev);
- else
+ } else
+ /* this really should not happen! if it does it is basically a
+ * bug, but not a hard error, so enable ints and continue */
e1000_irq_enable(adapter);
#else
/* Writing IMC and IMS is needed for 82547.
@@ -3519,16 +3767,23 @@ e1000_intr(int irq, void *data)
E1000_WRITE_REG(hw, IMC, ~0);
}
+ adapter->total_tx_bytes = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_packets = 0;
+
for (i = 0; i < E1000_MAX_INTR; i++)
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
break;
+ if (likely(adapter->itr_setting & 3))
+ e1000_set_itr(adapter);
+
if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
e1000_irq_enable(adapter);
#endif
-
return IRQ_HANDLED;
}
@@ -3572,6 +3827,8 @@ e1000_clean(struct net_device *poll_dev, int *budget)
if ((!tx_cleaned && (work_done == 0)) ||
!netif_running(poll_dev)) {
quit_polling:
+ if (likely(adapter->itr_setting & 3))
+ e1000_set_itr(adapter);
netif_rx_complete(poll_dev);
e1000_irq_enable(adapter);
return 0;
@@ -3598,6 +3855,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
unsigned int count = 0;
#endif
boolean_t cleaned = FALSE;
+ unsigned int total_tx_bytes=0, total_tx_packets=0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
@@ -3609,13 +3867,19 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
+ if (cleaned) {
+ /* this packet count is wrong for TSO but has a
+ * tendency to make dynamic ITR change more
+ * towards bulk */
+ total_tx_packets++;
+ total_tx_bytes += buffer_info->skb->len;
+ }
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
- memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
+ tx_desc->upper.data = 0;
if (unlikely(++i == tx_ring->count)) i = 0;
}
-
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
#ifdef CONFIG_E1000_NAPI
@@ -3634,8 +3898,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
* sees the new next_to_clean.
*/
smp_mb();
- if (netif_queue_stopped(netdev))
+ if (netif_queue_stopped(netdev)) {
netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
}
if (adapter->detect_tx_hung) {
@@ -3673,6 +3939,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
netif_stop_queue(netdev);
}
}
+ adapter->total_tx_bytes += total_tx_bytes;
+ adapter->total_tx_packets += total_tx_packets;
return cleaned;
}
@@ -3752,6 +4020,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
unsigned int i;
int cleaned_count = 0;
boolean_t cleaned = FALSE;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC(*rx_ring, i);
@@ -3760,6 +4029,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
while (rx_desc->status & E1000_RXD_STAT_DD) {
struct sk_buff *skb;
u8 status;
+
#ifdef CONFIG_E1000_NAPI
if (*work_done >= work_to_do)
break;
@@ -3817,6 +4087,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
* done after the TBI_ACCEPT workaround above */
length -= 4;
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += length;
+ total_rx_packets++;
+
/* code added for copybreak, this should improve
* performance for small packets with large amounts
* of reassembly being done in the stack */
@@ -3832,12 +4106,11 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
/* save the skb in buffer_info as good */
buffer_info->skb = skb;
skb = new_skb;
- skb_put(skb, length);
}
- } else
- skb_put(skb, length);
-
+ /* else just continue with the old one */
+ }
/* end copybreak code */
+ skb_put(skb, length);
/* Receive Checksum Offload */
e1000_rx_checksum(adapter,
@@ -3886,6 +4159,8 @@ next_desc:
if (cleaned_count)
adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ adapter->total_rx_packets += total_rx_packets;
+ adapter->total_rx_bytes += total_rx_bytes;
return cleaned;
}
@@ -3915,6 +4190,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
uint32_t length, staterr;
int cleaned_count = 0;
boolean_t cleaned = FALSE;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
@@ -3999,7 +4275,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
goto copydone;
} /* if */
}
-
+
for (j = 0; j < adapter->rx_ps_pages; j++) {
if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
break;
@@ -4019,6 +4295,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
pskb_trim(skb, skb->len - 4);
copydone:
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
e1000_rx_checksum(adapter, staterr,
le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
skb->protocol = eth_type_trans(skb, netdev);
@@ -4067,6 +4346,8 @@ next_desc:
if (cleaned_count)
adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ adapter->total_rx_packets += total_rx_packets;
+ adapter->total_rx_bytes += total_rx_bytes;
return cleaned;
}
@@ -4234,7 +4515,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
skb = netdev_alloc_skb(netdev,
- adapter->rx_ps_bsize0 + NET_IP_ALIGN);
+ adapter->rx_ps_bsize0 + NET_IP_ALIGN);
if (unlikely(!skb)) {
adapter->alloc_rx_buff_failed++;
@@ -4511,7 +4792,6 @@ e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
return E1000_SUCCESS;
}
-
void
e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
{
@@ -4534,12 +4814,12 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
if (adapter->hw.mac_type != e1000_ich8lan) {
- /* enable VLAN receive filtering */
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
- rctl |= E1000_RCTL_VFE;
- rctl &= ~E1000_RCTL_CFIEN;
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
- e1000_update_mng_vlan(adapter);
+ /* enable VLAN receive filtering */
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl |= E1000_RCTL_VFE;
+ rctl &= ~E1000_RCTL_CFIEN;
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ e1000_update_mng_vlan(adapter);
}
} else {
/* disable VLAN tag insert/strip */
@@ -4548,14 +4828,16 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
if (adapter->hw.mac_type != e1000_ich8lan) {
- /* disable VLAN filtering */
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
- rctl &= ~E1000_RCTL_VFE;
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
- if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
- e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- }
+ /* disable VLAN filtering */
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl &= ~E1000_RCTL_VFE;
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ if (adapter->mng_vlan_id !=
+ (uint16_t)E1000_MNG_VLAN_NONE) {
+ e1000_vlan_rx_kill_vid(netdev,
+ adapter->mng_vlan_id);
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ }
}
}
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index a464cb290621..18afc0c25dac 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -107,17 +107,16 @@ typedef enum {
#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
-#define E1000_WRITE_ICH8_REG(a, reg, value) ( \
+#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \
writel((value), ((a)->flash_address + reg)))
-#define E1000_READ_ICH8_REG(a, reg) ( \
+#define E1000_READ_ICH_FLASH_REG(a, reg) ( \
readl((a)->flash_address + reg))
-#define E1000_WRITE_ICH8_REG16(a, reg, value) ( \
+#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \
writew((value), ((a)->flash_address + reg)))
-#define E1000_READ_ICH8_REG16(a, reg) ( \
+#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \
readw((a)->flash_address + reg))
-
#endif /* _E1000_OSDEP_H_ */
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 9c3c1acefccc..cbfcd7f2889f 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -44,16 +44,6 @@
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
-/* Module Parameters are always initialized to -1, so that the driver
- * can tell the difference between no user specified value or the
- * user asking for the default value.
- * The true default values are loaded in when e1000_check_options is called.
- *
- * This is a GCC extension to ANSI C.
- * See the item "Labeled Elements in Initializers" in the section
- * "Extensions to the C Language Family" of the GCC documentation.
- */
-
#define E1000_PARAM(X, desc) \
static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static int num_##X = 0; \
@@ -67,7 +57,6 @@
*
* Default Value: 256
*/
-
E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
/* Receive Descriptor Count
@@ -77,7 +66,6 @@ E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
*
* Default Value: 256
*/
-
E1000_PARAM(RxDescriptors, "Number of receive descriptors");
/* User Specified Speed Override
@@ -90,7 +78,6 @@ E1000_PARAM(RxDescriptors, "Number of receive descriptors");
*
* Default Value: 0
*/
-
E1000_PARAM(Speed, "Speed setting");
/* User Specified Duplex Override
@@ -102,7 +89,6 @@ E1000_PARAM(Speed, "Speed setting");
*
* Default Value: 0
*/
-
E1000_PARAM(Duplex, "Duplex setting");
/* Auto-negotiation Advertisement Override
@@ -119,8 +105,9 @@ E1000_PARAM(Duplex, "Duplex setting");
*
* Default Value: 0x2F (copper); 0x20 (fiber)
*/
-
E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+#define AUTONEG_ADV_DEFAULT 0x2F
+#define AUTONEG_ADV_MASK 0x2F
/* User Specified Flow Control Override
*
@@ -132,8 +119,8 @@ E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
*
* Default Value: Read flow control settings from the EEPROM
*/
-
E1000_PARAM(FlowControl, "Flow Control setting");
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
/* XsumRX - Receive Checksum Offload Enable/Disable
*
@@ -144,53 +131,54 @@ E1000_PARAM(FlowControl, "Flow Control setting");
*
* Default Value: 1
*/
-
E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
/* Transmit Interrupt Delay in units of 1.024 microseconds
+ * Tx interrupt delay needs to typically be set to something non zero
*
* Valid Range: 0-65535
- *
- * Default Value: 64
*/
-
E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV 8
+#define MAX_TXDELAY 0xFFFF
+#define MIN_TXDELAY 0
/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
- *
- * Default Value: 0
*/
-
E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV 32
+#define MAX_TXABSDELAY 0xFFFF
+#define MIN_TXABSDELAY 0
/* Receive Interrupt Delay in units of 1.024 microseconds
+ * hardware will likely hang if you set this to anything but zero.
*
* Valid Range: 0-65535
- *
- * Default Value: 0
*/
-
E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define DEFAULT_RDTR 0
+#define MAX_RXDELAY 0xFFFF
+#define MIN_RXDELAY 0
/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
- *
- * Default Value: 128
*/
-
E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define DEFAULT_RADV 8
+#define MAX_RXABSDELAY 0xFFFF
+#define MIN_RXABSDELAY 0
/* Interrupt Throttle Rate (interrupts/sec)
*
- * Valid Range: 100-100000 (0=off, 1=dynamic)
- *
- * Default Value: 8000
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
*/
-
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR 3
+#define MAX_ITR 100000
+#define MIN_ITR 100
/* Enable Smart Power Down of the PHY
*
@@ -198,7 +186,6 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
*
* Default Value: 0 (disabled)
*/
-
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
/* Enable Kumeran Lock Loss workaround
@@ -207,33 +194,8 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
*
* Default Value: 1 (enabled)
*/
-
E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
-#define AUTONEG_ADV_DEFAULT 0x2F
-#define AUTONEG_ADV_MASK 0x2F
-#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
-
-#define DEFAULT_RDTR 0
-#define MAX_RXDELAY 0xFFFF
-#define MIN_RXDELAY 0
-
-#define DEFAULT_RADV 128
-#define MAX_RXABSDELAY 0xFFFF
-#define MIN_RXABSDELAY 0
-
-#define DEFAULT_TIDV 64
-#define MAX_TXDELAY 0xFFFF
-#define MIN_TXDELAY 0
-
-#define DEFAULT_TADV 64
-#define MAX_TXABSDELAY 0xFFFF
-#define MIN_TXABSDELAY 0
-
-#define DEFAULT_ITR 8000
-#define MAX_ITR 100000
-#define MIN_ITR 100
-
struct e1000_option {
enum { enable_option, range_option, list_option } type;
char *name;
@@ -510,15 +472,27 @@ e1000_check_options(struct e1000_adapter *adapter)
break;
case 1:
DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
- opt.name);
+ opt.name);
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
+ break;
+ case 3:
+ DPRINTK(PROBE, INFO,
+ "%s set to dynamic conservative mode\n",
+ opt.name);
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
break;
default:
e1000_validate_option(&adapter->itr, &opt,
- adapter);
+ adapter);
+ /* save the setting, because the dynamic bits change itr */
+ adapter->itr_setting = adapter->itr;
break;
}
} else {
- adapter->itr = opt.def;
+ adapter->itr_setting = opt.def;
+ adapter->itr = 20000;
}
}
{ /* Smart Power Down */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index c5ed635bce36..439f41338291 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -110,6 +110,8 @@
* 0.55: 22 Mar 2006: Add flow control (pause frame).
* 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
* 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
+ * 0.58: 30 Oct 2006: Added support for sideband management unit.
+ * 0.59: 30 Oct 2006: Added support for recoverable error.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -126,7 +128,7 @@
#else
#define DRIVERNAPI
#endif
-#define FORCEDETH_VERSION "0.57"
+#define FORCEDETH_VERSION "0.59"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@@ -174,11 +176,12 @@
#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
#define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */
#define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */
+#define DEV_HAS_MGMT_UNIT 0x1000 /* device supports management unit */
enum {
NvRegIrqStatus = 0x000,
#define NVREG_IRQSTAT_MIIEVENT 0x040
-#define NVREG_IRQSTAT_MASK 0x1ff
+#define NVREG_IRQSTAT_MASK 0x81ff
NvRegIrqMask = 0x004,
#define NVREG_IRQ_RX_ERROR 0x0001
#define NVREG_IRQ_RX 0x0002
@@ -189,15 +192,16 @@ enum {
#define NVREG_IRQ_LINK 0x0040
#define NVREG_IRQ_RX_FORCED 0x0080
#define NVREG_IRQ_TX_FORCED 0x0100
+#define NVREG_IRQ_RECOVER_ERROR 0x8000
#define NVREG_IRQMASK_THROUGHPUT 0x00df
#define NVREG_IRQMASK_CPU 0x0040
#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
-#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK)
+#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
- NVREG_IRQ_TX_FORCED))
+ NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
NvRegUnknownSetupReg6 = 0x008,
#define NVREG_UNKSETUP6_VAL 3
@@ -222,6 +226,15 @@ enum {
#define NVREG_MAC_RESET_ASSERT 0x0F3
NvRegTransmitterControl = 0x084,
#define NVREG_XMITCTL_START 0x01
+#define NVREG_XMITCTL_MGMT_ST 0x40000000
+#define NVREG_XMITCTL_SYNC_MASK 0x000f0000
+#define NVREG_XMITCTL_SYNC_NOT_READY 0x0
+#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
+#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
+#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
+#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
+#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
+#define NVREG_XMITCTL_HOST_LOADED 0x00004000
NvRegTransmitterStatus = 0x088,
#define NVREG_XMITSTAT_BUSY 0x01
@@ -304,8 +317,8 @@ enum {
#define NVREG_MIISTAT_LINKCHANGE 0x0008
#define NVREG_MIISTAT_MASK 0x000f
#define NVREG_MIISTAT_MASK2 0x000f
- NvRegUnknownSetupReg4 = 0x184,
-#define NVREG_UNKSETUP4_VAL 8
+ NvRegMIIMask = 0x184,
+#define NVREG_MII_LINKCHANGE 0x0008
NvRegAdapterControl = 0x188,
#define NVREG_ADAPTCTL_START 0x02
@@ -707,6 +720,7 @@ struct fe_priv {
unsigned int phy_model;
u16 gigabit;
int intr_test;
+ int recover_error;
/* General data: RO fields */
dma_addr_t ring_addr;
@@ -719,6 +733,7 @@ struct fe_priv {
u32 driver_data;
u32 register_size;
int rx_csum;
+ u32 mac_in_use;
void __iomem *base;
@@ -2443,6 +2458,23 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
dev->name, events);
}
+ if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
+ spin_lock(&np->lock);
+ /* disable interrupts on the nic */
+ if (!(np->msi_flags & NV_MSI_X_ENABLED))
+ writel(0, base + NvRegIrqMask);
+ else
+ writel(np->irqmask, base + NvRegIrqMask);
+ pci_push(base);
+
+ if (!np->in_shutdown) {
+ np->nic_poll_irq = np->irqmask;
+ np->recover_error = 1;
+ mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
+ }
+ spin_unlock(&np->lock);
+ break;
+ }
#ifdef CONFIG_FORCEDETH_NAPI
if (events & NVREG_IRQ_RX_ALL) {
netif_rx_schedule(dev);
@@ -2673,6 +2705,20 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
spin_unlock_irqrestore(&np->lock, flags);
np->link_timeout = jiffies + LINK_TIMEOUT;
}
+ if (events & NVREG_IRQ_RECOVER_ERROR) {
+ spin_lock_irq(&np->lock);
+ /* disable interrupts on the nic */
+ writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
+ pci_push(base);
+
+ if (!np->in_shutdown) {
+ np->nic_poll_irq |= NVREG_IRQ_OTHER;
+ np->recover_error = 1;
+ mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
+ }
+ spin_unlock_irq(&np->lock);
+ break;
+ }
if (events & (NVREG_IRQ_UNKNOWN)) {
printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
dev->name, events);
@@ -2902,6 +2948,42 @@ static void nv_do_nic_poll(unsigned long data)
}
np->nic_poll_irq = 0;
+ if (np->recover_error) {
+ np->recover_error = 0;
+ printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
+ if (netif_running(dev)) {
+ netif_tx_lock_bh(dev);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ if (nv_init_ring(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ }
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(base);
+
+ /* restart rx engine */
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ spin_unlock(&np->lock);
+ netif_tx_unlock_bh(dev);
+ }
+ }
+
/* FIXME: Do we need synchronize_irq(dev->irq) here? */
writel(mask, base + NvRegIrqMask);
@@ -4030,6 +4112,54 @@ static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
/* nothing to do */
};
+/* The mgmt unit and driver use a semaphore to access the phy during init */
+static int nv_mgmt_acquire_sema(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ int i;
+ u32 tx_ctrl, mgmt_sema;
+
+ for (i = 0; i < 10; i++) {
+ mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
+ if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
+ break;
+ msleep(500);
+ }
+
+ if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
+ return 0;
+
+ for (i = 0; i < 2; i++) {
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
+ writel(tx_ctrl, base + NvRegTransmitterControl);
+
+ /* verify that semaphore was acquired */
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
+ ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE))
+ return 1;
+ else
+ udelay(50);
+ }
+
+ return 0;
+}
+
+/* Indicate to mgmt unit whether driver is loaded or not */
+static void nv_mgmt_driver_loaded(struct net_device *dev, int loaded)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ u32 tx_ctrl;
+
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ if (loaded)
+ tx_ctrl |= NVREG_XMITCTL_HOST_LOADED;
+ else
+ tx_ctrl &= ~NVREG_XMITCTL_HOST_LOADED;
+ writel(tx_ctrl, base + NvRegTransmitterControl);
+}
+
static int nv_open(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
@@ -4085,7 +4215,7 @@ static int nv_open(struct net_device *dev)
NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
- writel(0, base + NvRegUnknownSetupReg4);
+ writel(0, base + NvRegMIIMask);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
@@ -4111,7 +4241,7 @@ static int nv_open(struct net_device *dev)
writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
base + NvRegAdapterControl);
writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
- writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
+ writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
if (np->wolenabled)
writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
@@ -4230,6 +4360,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
u8 __iomem *base;
int err, i;
u32 powerstate, txreg;
+ u32 phystate_orig = 0, phystate;
+ int phyinitialized = 0;
dev = alloc_etherdev(sizeof(struct fe_priv));
err = -ENOMEM;
@@ -4514,6 +4646,48 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->need_linktimer = 0;
}
+ /* clear phy state and temporarily halt phy interrupts */
+ writel(0, base + NvRegMIIMask);
+ phystate = readl(base + NvRegAdapterControl);
+ if (phystate & NVREG_ADAPTCTL_RUNNING) {
+ phystate_orig = 1;
+ phystate &= ~NVREG_ADAPTCTL_RUNNING;
+ writel(phystate, base + NvRegAdapterControl);
+ }
+ writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+
+ if (id->driver_data & DEV_HAS_MGMT_UNIT) {
+ writel(0x1, base + 0x204); pci_push(base);
+ msleep(500);
+ /* management unit running on the mac? */
+ np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
+ if (np->mac_in_use) {
+ u32 mgmt_sync;
+ /* management unit setup the phy already? */
+ mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK;
+ if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) {
+ if (!nv_mgmt_acquire_sema(dev)) {
+ for (i = 0; i < 5000; i++) {
+ msleep(1);
+ mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK;
+ if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY)
+ continue;
+ if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT)
+ phyinitialized = 1;
+ break;
+ }
+ } else {
+ /* we need to init the phy */
+ }
+ } else if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) {
+ /* phy is inited by SMU */
+ phyinitialized = 1;
+ } else {
+ /* we need to init the phy */
+ }
+ }
+ }
+
/* find a suitable phy */
for (i = 1; i <= 32; i++) {
int id1, id2;
@@ -4545,8 +4719,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
goto out_error;
}
- /* reset it */
- phy_init(dev);
+ if (!phyinitialized) {
+ /* reset it */
+ phy_init(dev);
+ }
+
+ if (id->driver_data & DEV_HAS_MGMT_UNIT) {
+ nv_mgmt_driver_loaded(dev, 1);
+ }
/* set default link speed settings */
np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
@@ -4565,6 +4745,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
return 0;
out_error:
+ if (phystate_orig)
+ writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
+ if (np->mac_in_use)
+ nv_mgmt_driver_loaded(dev, 0);
pci_set_drvdata(pci_dev, NULL);
out_freering:
free_rings(dev);
@@ -4594,6 +4778,9 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
writel(np->orig_mac[0], base + NvRegMacAddrA);
writel(np->orig_mac[1], base + NvRegMacAddrB);
+ if (np->mac_in_use)
+ nv_mgmt_driver_loaded(dev, 0);
+
/* free all structures */
free_rings(dev);
iounmap(get_hwbase(dev));
@@ -4603,6 +4790,50 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
pci_set_drvdata(pci_dev, NULL);
}
+#ifdef CONFIG_PM
+static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ goto out;
+
+ netif_device_detach(dev);
+
+ // Gross.
+ nv_close(dev);
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+out:
+ return 0;
+}
+
+static int nv_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ int rc = 0;
+
+ if (!netif_running(dev))
+ goto out;
+
+ netif_device_attach(dev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ rc = nv_open(dev);
+out:
+ return rc;
+}
+#else
+#define nv_suspend NULL
+#define nv_resume NULL
+#endif /* CONFIG_PM */
+
static struct pci_device_id pci_tbl[] = {
{ /* nForce Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
@@ -4658,43 +4889,59 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP67 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP67 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP67 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP67 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{0,},
};
@@ -4704,9 +4951,10 @@ static struct pci_driver driver = {
.id_table = pci_tbl,
.probe = nv_probe,
.remove = __devexit_p(nv_remove),
+ .suspend = nv_suspend,
+ .resume = nv_resume,
};
-
static int __init init_nic(void)
{
printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index cb3958704a87..889d3a13e95e 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -779,7 +779,8 @@ static int fs_init_phy(struct net_device *dev)
fep->oldspeed = 0;
fep->oldduplex = -1;
if(fep->fpi->bus_id)
- phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0);
+ phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
else {
printk("No phy bus ID specified in BSP code\n");
return -EINVAL;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index a06d8d1aaceb..baa35144134c 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -9,7 +9,7 @@
* Author: Andy Fleming
* Maintainer: Kumar Gala
*
- * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -133,6 +133,9 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
#ifdef CONFIG_GFAR_NAPI
static int gfar_poll(struct net_device *dev, int *budget);
#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void gfar_netpoll(struct net_device *dev);
+#endif
int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -260,6 +263,9 @@ static int gfar_probe(struct platform_device *pdev)
dev->poll = gfar_poll;
dev->weight = GFAR_DEV_WEIGHT;
#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = gfar_netpoll;
+#endif
dev->stop = gfar_close;
dev->get_stats = gfar_get_stats;
dev->change_mtu = gfar_change_mtu;
@@ -392,6 +398,38 @@ static int gfar_remove(struct platform_device *pdev)
}
+/* Reads the controller's registers to determine what interface
+ * connects it to the PHY.
+ */
+static phy_interface_t gfar_get_interface(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ u32 ecntrl = gfar_read(&priv->regs->ecntrl);
+
+ if (ecntrl & ECNTRL_SGMII_MODE)
+ return PHY_INTERFACE_MODE_SGMII;
+
+ if (ecntrl & ECNTRL_TBI_MODE) {
+ if (ecntrl & ECNTRL_REDUCED_MODE)
+ return PHY_INTERFACE_MODE_RTBI;
+ else
+ return PHY_INTERFACE_MODE_TBI;
+ }
+
+ if (ecntrl & ECNTRL_REDUCED_MODE) {
+ if (ecntrl & ECNTRL_REDUCED_MII_MODE)
+ return PHY_INTERFACE_MODE_RMII;
+ else
+ return PHY_INTERFACE_MODE_RGMII;
+ }
+
+ if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+ return PHY_INTERFACE_MODE_GMII;
+
+ return PHY_INTERFACE_MODE_MII;
+}
+
+
/* Initializes driver's PHY state, and attaches to the PHY.
* Returns 0 on success.
*/
@@ -403,6 +441,7 @@ static int init_phy(struct net_device *dev)
SUPPORTED_1000baseT_Full : 0;
struct phy_device *phydev;
char phy_id[BUS_ID_SIZE];
+ phy_interface_t interface;
priv->oldlink = 0;
priv->oldspeed = 0;
@@ -410,7 +449,9 @@ static int init_phy(struct net_device *dev)
snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
- phydev = phy_connect(dev, phy_id, &adjust_link, 0);
+ interface = gfar_get_interface(dev);
+
+ phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface);
if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
@@ -1536,6 +1577,33 @@ static int gfar_poll(struct net_device *dev, int *budget)
}
#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void gfar_netpoll(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* If the device has multiple interrupts, run tx/rx */
+ if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ disable_irq(priv->interruptTransmit);
+ disable_irq(priv->interruptReceive);
+ disable_irq(priv->interruptError);
+ gfar_interrupt(priv->interruptTransmit, dev);
+ enable_irq(priv->interruptError);
+ enable_irq(priv->interruptReceive);
+ enable_irq(priv->interruptTransmit);
+ } else {
+ disable_irq(priv->interruptTransmit);
+ gfar_interrupt(priv->interruptTransmit, dev);
+ enable_irq(priv->interruptTransmit);
+ }
+}
+#endif
+
/* The interrupt handler for devices with one interrupt */
static irqreturn_t gfar_interrupt(int irq, void *dev_id)
{
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 9e81a50cf2be..39e9e321fcbc 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -160,7 +160,10 @@ extern const char gfar_driver_version[];
#define ECNTRL_INIT_SETTINGS 0x00001000
#define ECNTRL_TBI_MODE 0x00000020
+#define ECNTRL_REDUCED_MODE 0x00000010
#define ECNTRL_R100 0x00000008
+#define ECNTRL_REDUCED_MII_MODE 0x00000004
+#define ECNTRL_SGMII_MODE 0x00000002
#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index 91326ea3e12b..f970bfbb9db2 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -31,7 +31,16 @@
#include <asm/amigahw.h>
#include <linux/zorro.h>
-#include "8390.h"
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+#define ei_inb(port) in_8(port)
+#define ei_outb(val,port) out_8(port,val)
+#define ei_inb_p(port) in_8(port)
+#define ei_outb_p(val,port) out_8(port,val)
+
+static const char version[] =
+ "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include "lib8390.c"
#define NE_EN0_DCFG (0x0e*2)
@@ -100,7 +109,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
};
- dev = alloc_ei_netdev();
+ dev = ____alloc_ei_netdev(0);
if (!dev)
return -ENOMEM;
SET_MODULE_OWNER(dev);
@@ -117,7 +126,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
dev->irq = IRQ_AMIGA_PORTS;
/* Install the Interrupt handler */
- if (request_irq(IRQ_AMIGA_PORTS, ei_interrupt, IRQF_SHARED, "Hydra Ethernet",
+ if (request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, "Hydra Ethernet",
dev)) {
free_netdev(dev);
return -EAGAIN;
@@ -139,10 +148,10 @@ static int __devinit hydra_init(struct zorro_dev *z)
dev->open = &hydra_open;
dev->stop = &hydra_close;
#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = ei_poll;
+ dev->poll_controller = __ei_poll;
#endif
- NS8390_init(dev, 0);
+ __NS8390_init(dev, 0);
err = register_netdev(dev);
if (err) {
@@ -164,7 +173,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
static int hydra_open(struct net_device *dev)
{
- ei_open(dev);
+ __ei_open(dev);
return 0;
}
@@ -172,7 +181,7 @@ static int hydra_close(struct net_device *dev)
{
if (ei_debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
- ei_close(dev);
+ __ei_close(dev);
return 0;
}
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
new file mode 100644
index 000000000000..e726c06b8dc6
--- /dev/null
+++ b/drivers/net/lib8390.c
@@ -0,0 +1,1097 @@
+/* 8390.c: A general NS8390 ethernet driver core for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ This is the chip-specific code for many 8390-based ethernet adaptors.
+ This is not a complete driver, it must be combined with board-specific
+ code such as ne.c, wd.c, 3c503.c, etc.
+
+ Seeing how at least eight drivers use this code, (not counting the
+ PCMCIA ones either) it is easy to break some card by what seems like
+ a simple innocent change. Please contact me or Donald if you think
+ you have found something that needs changing. -- PG
+
+
+ Changelog:
+
+ Paul Gortmaker : remove set_bit lock, other cleanups.
+ Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
+ ei_block_input() for eth_io_copy_and_sum().
+ Paul Gortmaker : exchange static int ei_pingpong for a #define,
+ also add better Tx error handling.
+ Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
+ Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
+ Paul Gortmaker : tweak ANK's above multicast changes a bit.
+ Paul Gortmaker : update packet statistics for v2.1.x
+ Alan Cox : support arbitary stupid port mappings on the
+ 68K Macintosh. Support >16bit I/O spaces
+ Paul Gortmaker : add kmod support for auto-loading of the 8390
+ module by all drivers that require it.
+ Alan Cox : Spinlocking work, added 'BUG_83C690'
+ Paul Gortmaker : Separate out Tx timeout code from Tx path.
+ Paul Gortmaker : Remove old unused single Tx buffer code.
+ Hayato Fujiwara : Add m32r support.
+ Paul Gortmaker : use skb_padto() instead of stack scratch area
+
+ Sources:
+ The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
+
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#define NS8390_CORE
+#include "8390.h"
+
+#define BUG_83C690
+
+/* These are the operational function interfaces to board-specific
+ routines.
+ void reset_8390(struct net_device *dev)
+ Resets the board associated with DEV, including a hardware reset of
+ the 8390. This is only called when there is a transmit timeout, and
+ it is always followed by 8390_init().
+ void block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+ Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
+ "page" value uses the 8390's 256-byte pages.
+ void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
+ Read the 4 byte, page aligned 8390 header. *If* there is a
+ subsequent read, it will be of the rest of the packet.
+ void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+ Read COUNT bytes from the packet buffer into the skb data area. Start
+ reading from RING_OFFSET, the address as the 8390 sees it. This will always
+ follow the read of the 8390 header.
+*/
+#define ei_reset_8390 (ei_local->reset_8390)
+#define ei_block_output (ei_local->block_output)
+#define ei_block_input (ei_local->block_input)
+#define ei_get_8390_hdr (ei_local->get_8390_hdr)
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef ei_debug
+int ei_debug = 1;
+#endif
+
+/* Index to functions. */
+static void ei_tx_intr(struct net_device *dev);
+static void ei_tx_err(struct net_device *dev);
+static void ei_tx_timeout(struct net_device *dev);
+static void ei_receive(struct net_device *dev);
+static void ei_rx_overrun(struct net_device *dev);
+
+/* Routines generic to NS8390-based boards. */
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page);
+static void set_multicast_list(struct net_device *dev);
+static void do_set_multicast_list(struct net_device *dev);
+static void __NS8390_init(struct net_device *dev, int startp);
+
+/*
+ * SMP and the 8390 setup.
+ *
+ * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
+ * a page register that controls bank and packet buffer access. We guard
+ * this with ei_local->page_lock. Nobody should assume or set the page other
+ * than zero when the lock is not held. Lock holders must restore page 0
+ * before unlocking. Even pure readers must take the lock to protect in
+ * page 0.
+ *
+ * To make life difficult the chip can also be very slow. We therefore can't
+ * just use spinlocks. For the longer lockups we disable the irq the device
+ * sits on and hold the lock. We must hold the lock because there is a dual
+ * processor case other than interrupts (get stats/set multicast list in
+ * parallel with each other and transmit).
+ *
+ * Note: in theory we can just disable the irq on the card _but_ there is
+ * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
+ * enter lock, take the queued irq. So we waddle instead of flying.
+ *
+ * Finally by special arrangement for the purpose of being generally
+ * annoying the transmit function is called bh atomic. That places
+ * restrictions on the user context callers as disable_irq won't save
+ * them.
+ */
+
+
+
+/**
+ * ei_open - Open/initialize the board.
+ * @dev: network device to initialize
+ *
+ * This routine goes all-out, setting everything
+ * up anew at each open, even though many of these registers should only
+ * need to be set once at boot.
+ */
+static int __ei_open(struct net_device *dev)
+{
+ unsigned long flags;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+
+ /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout
+ wrapper that does e.g. media check & then calls ei_tx_timeout. */
+ if (dev->tx_timeout == NULL)
+ dev->tx_timeout = ei_tx_timeout;
+ if (dev->watchdog_timeo <= 0)
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /*
+ * Grab the page lock so we own the register set, then call
+ * the init function.
+ */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ __NS8390_init(dev, 1);
+ /* Set the flag before we drop the lock, That way the IRQ arrives
+ after its set and we get no silly warnings */
+ netif_start_queue(dev);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ ei_local->irqlock = 0;
+ return 0;
+}
+
+/**
+ * ei_close - shut down network device
+ * @dev: network device to close
+ *
+ * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
+ */
+static int __ei_close(struct net_device *dev)
+{
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned long flags;
+
+ /*
+ * Hold the page lock during close
+ */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ __NS8390_init(dev, 0);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+/**
+ * ei_tx_timeout - handle transmit time out condition
+ * @dev: network device which has apparently fallen asleep
+ *
+ * Called by kernel when device never acknowledges a transmit has
+ * completed (or failed) - i.e. never posted a Tx related interrupt.
+ */
+
+static void ei_tx_timeout(struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ unsigned long flags;
+
+#if defined(CONFIG_M32R) && defined(CONFIG_SMP)
+ unsigned long icucr;
+
+ local_irq_save(flags);
+ icucr = inl(M32R_ICU_CR1_PORTL);
+ icucr |= M32R_ICUCR_ISMOD11;
+ outl(icucr, M32R_ICU_CR1_PORTL);
+ local_irq_restore(flags);
+#endif
+ ei_local->stat.tx_errors++;
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ txsr = ei_inb(e8390_base+EN0_TSR);
+ isr = ei_inb(e8390_base+EN0_ISR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
+
+ if (!isr && !ei_local->stat.tx_packets)
+ {
+ /* The 8390 probably hasn't gotten on the cable yet. */
+ ei_local->interface_num ^= 1; /* Try a different xcvr. */
+ }
+
+ /* Ugly but a reset can be slow, yet must be protected */
+
+ disable_irq_nosync_lockdep(dev->irq);
+ spin_lock(&ei_local->page_lock);
+
+ /* Try to restart the card. Perhaps the user has fixed something. */
+ ei_reset_8390(dev);
+ __NS8390_init(dev, 1);
+
+ spin_unlock(&ei_local->page_lock);
+ enable_irq_lockdep(dev->irq);
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_start_xmit - begin packet transmission
+ * @skb: packet to be sent
+ * @dev: network device to which packet is sent
+ *
+ * Sends a packet to an 8390 network device.
+ */
+
+static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int send_length = skb->len, output_page;
+ unsigned long flags;
+ char buf[ETH_ZLEN];
+ char *data = skb->data;
+
+ if (skb->len < ETH_ZLEN) {
+ memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */
+ memcpy(buf, data, skb->len);
+ send_length = ETH_ZLEN;
+ data = buf;
+ }
+
+ /* Mask interrupts from the ethercard.
+ SMP: We have to grab the lock here otherwise the IRQ handler
+ on another CPU can flip window and race the IRQ mask set. We end
+ up trashing the mcast filter not disabling irqs if we don't lock */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ ei_outb_p(0x00, e8390_base + EN0_IMR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+
+ /*
+ * Slow phase with lock held.
+ */
+
+ disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
+
+ spin_lock(&ei_local->page_lock);
+
+ ei_local->irqlock = 1;
+
+ /*
+ * We have two Tx slots available for use. Find the first free
+ * slot, and then perform some sanity checks. With two Tx bufs,
+ * you get very close to transmitting back-to-back packets. With
+ * only one Tx buf, the transmitter sits idle while you reload the
+ * card, leaving a substantial gap between each transmitted packet.
+ */
+
+ if (ei_local->tx1 == 0)
+ {
+ output_page = ei_local->tx_start_page;
+ ei_local->tx1 = send_length;
+ if (ei_debug && ei_local->tx2 > 0)
+ printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
+ }
+ else if (ei_local->tx2 == 0)
+ {
+ output_page = ei_local->tx_start_page + TX_PAGES/2;
+ ei_local->tx2 = send_length;
+ if (ei_debug && ei_local->tx1 > 0)
+ printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
+ }
+ else
+ { /* We should never get here. */
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ ei_local->irqlock = 0;
+ netif_stop_queue(dev);
+ ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ spin_unlock(&ei_local->page_lock);
+ enable_irq_lockdep_irqrestore(dev->irq, &flags);
+ ei_local->stat.tx_errors++;
+ return 1;
+ }
+
+ /*
+ * Okay, now upload the packet and trigger a send if the transmitter
+ * isn't already sending. If it is busy, the interrupt handler will
+ * trigger the send later, upon receiving a Tx done interrupt.
+ */
+
+ ei_block_output(dev, send_length, data, output_page);
+
+ if (! ei_local->txing)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, output_page);
+ dev->trans_start = jiffies;
+ if (output_page == ei_local->tx_start_page)
+ {
+ ei_local->tx1 = -1;
+ ei_local->lasttx = -1;
+ }
+ else
+ {
+ ei_local->tx2 = -1;
+ ei_local->lasttx = -2;
+ }
+ }
+ else ei_local->txqueue++;
+
+ if (ei_local->tx1 && ei_local->tx2)
+ netif_stop_queue(dev);
+ else
+ netif_start_queue(dev);
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ spin_unlock(&ei_local->page_lock);
+ enable_irq_lockdep_irqrestore(dev->irq, &flags);
+
+ dev_kfree_skb (skb);
+ ei_local->stat.tx_bytes += send_length;
+
+ return 0;
+}
+
+/**
+ * ei_interrupt - handle the interrupts from an 8390
+ * @irq: interrupt number
+ * @dev_id: a pointer to the net_device
+ *
+ * Handle the ether interface interrupts. We pull packets from
+ * the 8390 via the card specific functions and fire them at the networking
+ * stack. We also handle transmit completions and wake the transmit path if
+ * necessary. We also update the counters and do other housekeeping as
+ * needed.
+ */
+
+static irqreturn_t __ei_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ unsigned long e8390_base = dev->base_addr;
+ int interrupts, nr_serviced = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ /*
+ * Protect the irq test too.
+ */
+
+ spin_lock(&ei_local->page_lock);
+
+ if (ei_local->irqlock)
+ {
+#if 1 /* This might just be an interrupt for a PCI device sharing this line */
+ /* The "irqlock" check is only for testing. */
+ printk(ei_local->irqlock
+ ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
+ : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+ dev->name, ei_inb_p(e8390_base + EN0_ISR),
+ ei_inb_p(e8390_base + EN0_IMR));
+#endif
+ spin_unlock(&ei_local->page_lock);
+ return IRQ_NONE;
+ }
+
+ /* Change to page 0 and read the intr status reg. */
+ ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+ if (ei_debug > 3)
+ printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
+ ei_inb_p(e8390_base + EN0_ISR));
+
+ /* !!Assumption!! -- we stay in page 0. Don't break this. */
+ while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0
+ && ++nr_serviced < MAX_SERVICE)
+ {
+ if (!netif_running(dev)) {
+ printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
+ /* rmk - acknowledge the interrupts */
+ ei_outb_p(interrupts, e8390_base + EN0_ISR);
+ interrupts = 0;
+ break;
+ }
+ if (interrupts & ENISR_OVER)
+ ei_rx_overrun(dev);
+ else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
+ {
+ /* Got a good (?) packet. */
+ ei_receive(dev);
+ }
+ /* Push the next to-transmit packet through. */
+ if (interrupts & ENISR_TX)
+ ei_tx_intr(dev);
+ else if (interrupts & ENISR_TX_ERR)
+ ei_tx_err(dev);
+
+ if (interrupts & ENISR_COUNTERS)
+ {
+ ei_local->stat.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2);
+ ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
+ }
+
+ /* Ignore any RDC interrupts that make it back to here. */
+ if (interrupts & ENISR_RDC)
+ {
+ ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
+ }
+
+ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+ }
+
+ if (interrupts && ei_debug)
+ {
+ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+ if (nr_serviced >= MAX_SERVICE)
+ {
+ /* 0xFF is valid for a card removal */
+ if(interrupts!=0xFF)
+ printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
+ dev->name, interrupts);
+ ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
+ } else {
+ printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
+ ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
+ }
+ }
+ spin_unlock(&ei_local->page_lock);
+ return IRQ_RETVAL(nr_serviced > 0);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void __ei_poll(struct net_device *dev)
+{
+ disable_irq_lockdep(dev->irq);
+ __ei_interrupt(dev->irq, dev);
+ enable_irq_lockdep(dev->irq);
+}
+#endif
+
+/**
+ * ei_tx_err - handle transmitter error
+ * @dev: network device which threw the exception
+ *
+ * A transmitter error has happened. Most likely excess collisions (which
+ * is a fairly normal condition). If the error is one where the Tx will
+ * have been aborted, we try and send another one right away, instead of
+ * letting the failed packet sit and collect dust in the Tx buffer. This
+ * is a much better solution as it avoids kernel based Tx timeouts, and
+ * an unnecessary card reset.
+ *
+ * Called with lock held.
+ */
+
+static void ei_tx_err(struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
+ unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
+
+#ifdef VERBOSE_ERROR_DUMP
+ printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
+ if (txsr & ENTSR_ABT)
+ printk("excess-collisions ");
+ if (txsr & ENTSR_ND)
+ printk("non-deferral ");
+ if (txsr & ENTSR_CRS)
+ printk("lost-carrier ");
+ if (txsr & ENTSR_FU)
+ printk("FIFO-underrun ");
+ if (txsr & ENTSR_CDH)
+ printk("lost-heartbeat ");
+ printk("\n");
+#endif
+
+ ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
+
+ if (tx_was_aborted)
+ ei_tx_intr(dev);
+ else
+ {
+ ei_local->stat.tx_errors++;
+ if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
+ if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
+ if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++;
+ }
+}
+
+/**
+ * ei_tx_intr - transmit interrupt handler
+ * @dev: network device for which tx intr is handled
+ *
+ * We have finished a transmit: check for errors and then trigger the next
+ * packet to be sent. Called with lock held.
+ */
+
+static void ei_tx_intr(struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int status = ei_inb(e8390_base + EN0_TSR);
+
+ ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
+
+ /*
+ * There are two Tx buffers, see which one finished, and trigger
+ * the send of another one if it exists.
+ */
+ ei_local->txqueue--;
+
+ if (ei_local->tx1 < 0)
+ {
+ if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
+ printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx1);
+ ei_local->tx1 = 0;
+ if (ei_local->tx2 > 0)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
+ dev->trans_start = jiffies;
+ ei_local->tx2 = -1,
+ ei_local->lasttx = 2;
+ }
+ else ei_local->lasttx = 20, ei_local->txing = 0;
+ }
+ else if (ei_local->tx2 < 0)
+ {
+ if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
+ printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx2);
+ ei_local->tx2 = 0;
+ if (ei_local->tx1 > 0)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ ei_local->tx1 = -1;
+ ei_local->lasttx = 1;
+ }
+ else
+ ei_local->lasttx = 10, ei_local->txing = 0;
+ }
+// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
+// dev->name, ei_local->lasttx);
+
+ /* Minimize Tx latency: update the statistics after we restart TXing. */
+ if (status & ENTSR_COL)
+ ei_local->stat.collisions++;
+ if (status & ENTSR_PTX)
+ ei_local->stat.tx_packets++;
+ else
+ {
+ ei_local->stat.tx_errors++;
+ if (status & ENTSR_ABT)
+ {
+ ei_local->stat.tx_aborted_errors++;
+ ei_local->stat.collisions += 16;
+ }
+ if (status & ENTSR_CRS)
+ ei_local->stat.tx_carrier_errors++;
+ if (status & ENTSR_FU)
+ ei_local->stat.tx_fifo_errors++;
+ if (status & ENTSR_CDH)
+ ei_local->stat.tx_heartbeat_errors++;
+ if (status & ENTSR_OWC)
+ ei_local->stat.tx_window_errors++;
+ }
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_receive - receive some packets
+ * @dev: network device with which receive will be run
+ *
+ * We have a good packet(s), get it/them out of the buffers.
+ * Called with lock held.
+ */
+
+static void ei_receive(struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned char rxing_page, this_frame, next_frame;
+ unsigned short current_offset;
+ int rx_pkt_count = 0;
+ struct e8390_pkt_hdr rx_frame;
+ int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
+
+ while (++rx_pkt_count < 10)
+ {
+ int pkt_len, pkt_stat;
+
+ /* Get the rx page (incoming packet pointer). */
+ ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
+ rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
+ ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+
+ /* Remove one frame from the ring. Boundary is always a page behind. */
+ this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
+ if (this_frame >= ei_local->stop_page)
+ this_frame = ei_local->rx_start_page;
+
+ /* Someday we'll omit the previous, iff we never get this message.
+ (There is at least one clone claimed to have a problem.)
+
+ Keep quiet if it looks like a card removal. One problem here
+ is that some clones crash in roughly the same way.
+ */
+ if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
+ printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
+ dev->name, this_frame, ei_local->current_page);
+
+ if (this_frame == rxing_page) /* Read all the frames? */
+ break; /* Done for now */
+
+ current_offset = this_frame << 8;
+ ei_get_8390_hdr(dev, &rx_frame, this_frame);
+
+ pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
+ pkt_stat = rx_frame.status;
+
+ next_frame = this_frame + 1 + ((pkt_len+4)>>8);
+
+ /* Check for bogosity warned by 3c503 book: the status byte is never
+ written. This happened a lot during testing! This code should be
+ cleaned up someday. */
+ if (rx_frame.next != next_frame
+ && rx_frame.next != next_frame + 1
+ && rx_frame.next != next_frame - num_rx_pages
+ && rx_frame.next != next_frame + 1 - num_rx_pages) {
+ ei_local->current_page = rxing_page;
+ ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
+ ei_local->stat.rx_errors++;
+ continue;
+ }
+
+ if (pkt_len < 60 || pkt_len > 1518)
+ {
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
+ dev->name, rx_frame.count, rx_frame.status,
+ rx_frame.next);
+ ei_local->stat.rx_errors++;
+ ei_local->stat.rx_length_errors++;
+ }
+ else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
+ {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ ei_local->stat.rx_dropped++;
+ break;
+ }
+ else
+ {
+ skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
+ skb->dev = dev;
+ skb_put(skb, pkt_len); /* Make room */
+ ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ ei_local->stat.rx_packets++;
+ ei_local->stat.rx_bytes += pkt_len;
+ if (pkt_stat & ENRSR_PHY)
+ ei_local->stat.multicast++;
+ }
+ }
+ else
+ {
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ dev->name, rx_frame.status, rx_frame.next,
+ rx_frame.count);
+ ei_local->stat.rx_errors++;
+ /* NB: The NIC counts CRC, frame and missed errors. */
+ if (pkt_stat & ENRSR_FO)
+ ei_local->stat.rx_fifo_errors++;
+ }
+ next_frame = rx_frame.next;
+
+ /* This _should_ never happen: it's here for avoiding bad clones. */
+ if (next_frame >= ei_local->stop_page) {
+ printk("%s: next frame inconsistency, %#2x\n", dev->name,
+ next_frame);
+ next_frame = ei_local->rx_start_page;
+ }
+ ei_local->current_page = next_frame;
+ ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
+ }
+
+ /* We used to also ack ENISR_OVER here, but that would sometimes mask
+ a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
+ ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
+ return;
+}
+
+/**
+ * ei_rx_overrun - handle receiver overrun
+ * @dev: network device which threw exception
+ *
+ * We have a receiver overrun: we have to kick the 8390 to get it started
+ * again. Problem is that you have to kick it exactly as NS prescribes in
+ * the updated datasheets, or "the NIC may act in an unpredictable manner."
+ * This includes causing "the NIC to defer indefinitely when it is stopped
+ * on a busy network." Ugh.
+ * Called with lock held. Don't call this with the interrupts off or your
+ * computer will hate you - it takes 10ms or so.
+ */
+
+static void ei_rx_overrun(struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ unsigned char was_txing, must_resend = 0;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+
+ /*
+ * Record whether a Tx was in progress and then issue the
+ * stop command.
+ */
+ was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
+ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
+ ei_local->stat.rx_over_errors++;
+
+ /*
+ * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
+ * Early datasheets said to poll the reset bit, but now they say that
+ * it "is not a reliable indicator and subsequently should be ignored."
+ * We wait at least 10ms.
+ */
+
+ mdelay(10);
+
+ /*
+ * Reset RBCR[01] back to zero as per magic incantation.
+ */
+ ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
+ ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
+
+ /*
+ * See if any Tx was interrupted or not. According to NS, this
+ * step is vital, and skipping it will cause no end of havoc.
+ */
+
+ if (was_txing)
+ {
+ unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
+ if (!tx_completed)
+ must_resend = 1;
+ }
+
+ /*
+ * Have to enter loopback mode and then restart the NIC before
+ * you are allowed to slurp packets up off the ring.
+ */
+ ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
+ ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
+
+ /*
+ * Clear the Rx ring of all the debris, and ack the interrupt.
+ */
+ ei_receive(dev);
+ ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
+
+ /*
+ * Leave loopback mode, and resend any packet that got stopped.
+ */
+ ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
+ if (must_resend)
+ ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
+}
+
+/*
+ * Collect the stats. This is called unlocked and from several contexts.
+ */
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned long flags;
+
+ /* If the card is stopped, just return the present stats. */
+ if (!netif_running(dev))
+ return &ei_local->stat;
+
+ spin_lock_irqsave(&ei_local->page_lock,flags);
+ /* Read the counter registers, assuming we are in page 0. */
+ ei_local->stat.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ return &ei_local->stat;
+}
+
+/*
+ * Form the 64 bit 8390 multicast table from the linked list of addresses
+ * associated with this dev structure.
+ */
+
+static inline void make_mc_bits(u8 *bits, struct net_device *dev)
+{
+ struct dev_mc_list *dmi;
+
+ for (dmi=dev->mc_list; dmi; dmi=dmi->next)
+ {
+ u32 crc;
+ if (dmi->dmi_addrlen != ETH_ALEN)
+ {
+ printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
+ continue;
+ }
+ crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+ /*
+ * The 8390 uses the 6 most significant bits of the
+ * CRC to index the multicast table.
+ */
+ bits[crc>>29] |= (1<<((crc>>26)&7));
+ }
+}
+
+/**
+ * do_set_multicast_list - set/clear multicast filter
+ * @dev: net device for which multicast filter is adjusted
+ *
+ * Set or clear the multicast filter for this adaptor. May be called
+ * from a BH in 2.1.x. Must be called with lock held.
+ */
+
+static void do_set_multicast_list(struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ int i;
+ struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+
+ if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
+ {
+ memset(ei_local->mcfilter, 0, 8);
+ if (dev->mc_list)
+ make_mc_bits(ei_local->mcfilter, dev);
+ }
+ else
+ memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
+
+ /*
+ * DP8390 manuals don't specify any magic sequence for altering
+ * the multicast regs on an already running card. To be safe, we
+ * ensure multicast mode is off prior to loading up the new hash
+ * table. If this proves to be not enough, we can always resort
+ * to stopping the NIC, loading the table and then restarting.
+ *
+ * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC
+ * Elite16) appear to be write-only. The NS 8390 data sheet lists
+ * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and
+ * Ultra32 EISA) appears to have this bug fixed.
+ */
+
+ if (netif_running(dev))
+ ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
+ ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
+ for(i = 0; i < 8; i++)
+ {
+ ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
+#ifndef BUG_83C690
+ if(ei_inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i])
+ printk(KERN_ERR "Multicast filter read/write mismap %d\n",i);
+#endif
+ }
+ ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
+
+ if(dev->flags&IFF_PROMISC)
+ ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
+ else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
+ ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
+ else
+ ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
+ }
+
+/*
+ * Called without lock held. This is invoked from user context and may
+ * be parallel to just about everything else. Its also fairly quick and
+ * not called too often. Must protect against both bh and irq users
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ unsigned long flags;
+ struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ do_set_multicast_list(dev);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+}
+
+/**
+ * ethdev_setup - init rest of 8390 device struct
+ * @dev: network device structure to init
+ *
+ * Initialize the rest of the 8390 device structure. Do NOT __init
+ * this, as it is used by 8390 based modular drivers too.
+ */
+
+static void ethdev_setup(struct net_device *dev)
+{
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ if (ei_debug > 1)
+ printk(version);
+
+ dev->hard_start_xmit = &ei_start_xmit;
+ dev->get_stats = get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ ether_setup(dev);
+
+ spin_lock_init(&ei_local->page_lock);
+}
+
+/**
+ * alloc_ei_netdev - alloc_etherdev counterpart for 8390
+ * @size: extra bytes to allocate
+ *
+ * Allocate 8390-specific net_device.
+ */
+static struct net_device *____alloc_ei_netdev(int size)
+{
+ return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
+ ethdev_setup);
+}
+
+
+
+
+/* This page of functions should be 8390 generic */
+/* Follow National Semi's recommendations for initializing the "NIC". */
+
+/**
+ * NS8390_init - initialize 8390 hardware
+ * @dev: network device to initialize
+ * @startp: boolean. non-zero value to initiate chip processing
+ *
+ * Must be called with lock held.
+ */
+
+static void __NS8390_init(struct net_device *dev, int startp)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int i;
+ int endcfg = ei_local->word16
+ ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
+ : 0x48;
+
+ if(sizeof(struct e8390_pkt_hdr)!=4)
+ panic("8390.c: header struct mispacked\n");
+ /* Follow National Semi's recommendations for initing the DP83902. */
+ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
+ ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
+ /* Clear the remote byte count registers. */
+ ei_outb_p(0x00, e8390_base + EN0_RCNTLO);
+ ei_outb_p(0x00, e8390_base + EN0_RCNTHI);
+ /* Set to monitor and loopback mode -- this is vital!. */
+ ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
+ ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
+ /* Set the transmit page and receive ring. */
+ ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
+ ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
+ ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
+ ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
+ /* Clear the pending interrupts and mask. */
+ ei_outb_p(0xFF, e8390_base + EN0_ISR);
+ ei_outb_p(0x00, e8390_base + EN0_IMR);
+
+ /* Copy the station address into the DS8390 registers. */
+
+ ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
+ for(i = 0; i < 6; i++)
+ {
+ ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
+ if (ei_debug > 1 && ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
+ printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
+ }
+
+ ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
+ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ netif_start_queue(dev);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ ei_local->txing = 0;
+
+ if (startp)
+ {
+ ei_outb_p(0xff, e8390_base + EN0_ISR);
+ ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
+ ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
+ /* 3c503 TechMan says rxconfig only after the NIC is started. */
+ ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
+ do_set_multicast_list(dev); /* (re)load the mcast table */
+ }
+}
+
+/* Trigger a transmit start, assuming the length is valid.
+ Always called with the page lock held */
+
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+
+ ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
+
+ if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
+ {
+ printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
+ dev->name);
+ return;
+ }
+ ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
+ ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
+ ei_outb_p(start_page, e8390_base + EN0_TPSR);
+ ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
+}
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index ade6ff852e1a..a12bb64e3694 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -39,7 +39,16 @@
#include <asm/hwtest.h>
#include <asm/macints.h>
-#include "8390.h"
+static char version[] =
+ "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
+
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+#define ei_inb(port) in_8(port)
+#define ei_outb(val,port) out_8(port,val)
+#define ei_inb_p(port) in_8(port)
+#define ei_outb_p(val,port) out_8(port,val)
+
+#include "lib8390.c"
#define WD_START_PG 0x00 /* First page of TX buffer */
#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */
@@ -116,9 +125,6 @@ static int useresources[] = {
1, /* dayna-lc */
};
-static char version[] __initdata =
- "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
-
extern enum mac8390_type mac8390_ident(struct nubus_dev * dev);
extern int mac8390_memsize(unsigned long membase);
extern int mac8390_memtest(struct net_device * dev);
@@ -237,7 +243,7 @@ struct net_device * __init mac8390_probe(int unit)
if (!MACH_IS_MAC)
return ERR_PTR(-ENODEV);
- dev = alloc_ei_netdev();
+ dev = ____alloc_ei_netdev(0);
if (!dev)
return ERR_PTR(-ENOMEM);
@@ -438,7 +444,7 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
dev->open = &mac8390_open;
dev->stop = &mac8390_close;
#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = ei_poll;
+ dev->poll_controller = __ei_poll;
#endif
/* GAR, ei_status is actually a macro even though it looks global */
@@ -510,7 +516,7 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
return -ENODEV;
}
- NS8390_init(dev, 0);
+ __NS8390_init(dev, 0);
/* Good, done, now spit out some messages */
printk(KERN_INFO "%s: %s in slot %X (type %s)\n",
@@ -532,8 +538,8 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
static int mac8390_open(struct net_device *dev)
{
- ei_open(dev);
- if (request_irq(dev->irq, ei_interrupt, 0, "8390 Ethernet", dev)) {
+ __ei_open(dev);
+ if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
return -EAGAIN;
}
@@ -543,7 +549,7 @@ static int mac8390_open(struct net_device *dev)
static int mac8390_close(struct net_device *dev)
{
free_irq(dev->irq, dev);
- ei_close(dev);
+ __ei_close(dev);
return 0;
}
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
new file mode 100644
index 000000000000..bd0ce98c939c
--- /dev/null
+++ b/drivers/net/macb.c
@@ -0,0 +1,1210 @@
+/*
+ * Atmel MACB Ethernet Controller driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/board.h>
+
+#include "macb.h"
+
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+
+#define RX_BUFFER_SIZE 128
+#define RX_RING_SIZE 512
+#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE)
+
+/* Make the IP header word-aligned (the ethernet header is 14 bytes) */
+#define RX_OFFSET 2
+
+#define TX_RING_SIZE 128
+#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1)
+#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE)
+
+#define TX_RING_GAP(bp) \
+ (TX_RING_SIZE - (bp)->tx_pending)
+#define TX_BUFFS_AVAIL(bp) \
+ (((bp)->tx_tail <= (bp)->tx_head) ? \
+ (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \
+ (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
+#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1))
+
+#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1))
+
+/* minimum number of free TX descriptors before waking up TX process */
+#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4)
+
+#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
+ | MACB_BIT(ISR_ROVR))
+
+static void __macb_set_hwaddr(struct macb *bp)
+{
+ u32 bottom;
+ u16 top;
+
+ bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
+ macb_writel(bp, SA1B, bottom);
+ top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+ macb_writel(bp, SA1T, top);
+}
+
+static void __init macb_get_hwaddr(struct macb *bp)
+{
+ u32 bottom;
+ u16 top;
+ u8 addr[6];
+
+ bottom = macb_readl(bp, SA1B);
+ top = macb_readl(bp, SA1T);
+
+ addr[0] = bottom & 0xff;
+ addr[1] = (bottom >> 8) & 0xff;
+ addr[2] = (bottom >> 16) & 0xff;
+ addr[3] = (bottom >> 24) & 0xff;
+ addr[4] = top & 0xff;
+ addr[5] = (top >> 8) & 0xff;
+
+ if (is_valid_ether_addr(addr))
+ memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+}
+
+static void macb_enable_mdio(struct macb *bp)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ reg = macb_readl(bp, NCR);
+ reg |= MACB_BIT(MPE);
+ macb_writel(bp, NCR, reg);
+ macb_writel(bp, IER, MACB_BIT(MFD));
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static void macb_disable_mdio(struct macb *bp)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ reg = macb_readl(bp, NCR);
+ reg &= ~MACB_BIT(MPE);
+ macb_writel(bp, NCR, reg);
+ macb_writel(bp, IDR, MACB_BIT(MFD));
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static int macb_mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct macb *bp = netdev_priv(dev);
+ int value;
+
+ mutex_lock(&bp->mdio_mutex);
+
+ macb_enable_mdio(bp);
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
+ | MACB_BF(RW, MACB_MAN_READ)
+ | MACB_BF(PHYA, phy_id)
+ | MACB_BF(REGA, location)
+ | MACB_BF(CODE, MACB_MAN_CODE)));
+
+ wait_for_completion(&bp->mdio_complete);
+
+ value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
+ macb_disable_mdio(bp);
+ mutex_unlock(&bp->mdio_mutex);
+
+ return value;
+}
+
+static void macb_mdio_write(struct net_device *dev, int phy_id,
+ int location, int val)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ dev_dbg(&bp->pdev->dev, "mdio_write %02x:%02x <- %04x\n",
+ phy_id, location, val);
+
+ mutex_lock(&bp->mdio_mutex);
+ macb_enable_mdio(bp);
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
+ | MACB_BF(RW, MACB_MAN_WRITE)
+ | MACB_BF(PHYA, phy_id)
+ | MACB_BF(REGA, location)
+ | MACB_BF(CODE, MACB_MAN_CODE)
+ | MACB_BF(DATA, val)));
+
+ wait_for_completion(&bp->mdio_complete);
+
+ macb_disable_mdio(bp);
+ mutex_unlock(&bp->mdio_mutex);
+}
+
+static int macb_phy_probe(struct macb *bp)
+{
+ int phy_address;
+ u16 phyid1, phyid2;
+
+ for (phy_address = 0; phy_address < 32; phy_address++) {
+ phyid1 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID1);
+ phyid2 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID2);
+
+ if (phyid1 != 0xffff && phyid1 != 0x0000
+ && phyid2 != 0xffff && phyid2 != 0x0000)
+ break;
+ }
+
+ if (phy_address == 32)
+ return -ENODEV;
+
+ dev_info(&bp->pdev->dev,
+ "detected PHY at address %d (ID %04x:%04x)\n",
+ phy_address, phyid1, phyid2);
+
+ bp->mii.phy_id = phy_address;
+ return 0;
+}
+
+static void macb_set_media(struct macb *bp, int media)
+{
+ u32 reg;
+
+ spin_lock_irq(&bp->lock);
+ reg = macb_readl(bp, NCFGR);
+ reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+ if (media & (ADVERTISE_100HALF | ADVERTISE_100FULL))
+ reg |= MACB_BIT(SPD);
+ if (media & ADVERTISE_FULL)
+ reg |= MACB_BIT(FD);
+ macb_writel(bp, NCFGR, reg);
+ spin_unlock_irq(&bp->lock);
+}
+
+static void macb_check_media(struct macb *bp, int ok_to_print, int init_media)
+{
+ struct mii_if_info *mii = &bp->mii;
+ unsigned int old_carrier, new_carrier;
+ int advertise, lpa, media, duplex;
+
+ /* if forced media, go no further */
+ if (mii->force_media)
+ return;
+
+ /* check current and old link status */
+ old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0;
+ new_carrier = (unsigned int) mii_link_ok(mii);
+
+ /* if carrier state did not change, assume nothing else did */
+ if (!init_media && old_carrier == new_carrier)
+ return;
+
+ /* no carrier, nothing much to do */
+ if (!new_carrier) {
+ netif_carrier_off(mii->dev);
+ printk(KERN_INFO "%s: link down\n", mii->dev->name);
+ return;
+ }
+
+ /*
+ * we have carrier, see who's on the other end
+ */
+ netif_carrier_on(mii->dev);
+
+ /* get MII advertise and LPA values */
+ if (!init_media && mii->advertising) {
+ advertise = mii->advertising;
+ } else {
+ advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
+ mii->advertising = advertise;
+ }
+ lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
+
+ /* figure out media and duplex from advertise and LPA values */
+ media = mii_nway_result(lpa & advertise);
+ duplex = (media & ADVERTISE_FULL) ? 1 : 0;
+
+ if (ok_to_print)
+ printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n",
+ mii->dev->name,
+ media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10",
+ duplex ? "full" : "half", lpa);
+
+ mii->full_duplex = duplex;
+
+ /* Let the MAC know about the new link state */
+ macb_set_media(bp, media);
+}
+
+static void macb_update_stats(struct macb *bp)
+{
+ u32 __iomem *reg = bp->regs + MACB_PFR;
+ u32 *p = &bp->hw_stats.rx_pause_frames;
+ u32 *end = &bp->hw_stats.tx_pause_frames + 1;
+
+ WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
+
+ for(; p < end; p++, reg++)
+ *p += readl(reg);
+}
+
+static void macb_periodic_task(void *arg)
+{
+ struct macb *bp = arg;
+
+ macb_update_stats(bp);
+ macb_check_media(bp, 1, 0);
+
+ schedule_delayed_work(&bp->periodic_task, HZ);
+}
+
+static void macb_tx(struct macb *bp)
+{
+ unsigned int tail;
+ unsigned int head;
+ u32 status;
+
+ status = macb_readl(bp, TSR);
+ macb_writel(bp, TSR, status);
+
+ dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n",
+ (unsigned long)status);
+
+ if (status & MACB_BIT(UND)) {
+ printk(KERN_ERR "%s: TX underrun, resetting buffers\n",
+ bp->dev->name);
+ bp->tx_head = bp->tx_tail = 0;
+ }
+
+ if (!(status & MACB_BIT(COMP)))
+ /*
+ * This may happen when a buffer becomes complete
+ * between reading the ISR and scanning the
+ * descriptors. Nothing to worry about.
+ */
+ return;
+
+ head = bp->tx_head;
+ for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
+ struct ring_info *rp = &bp->tx_skb[tail];
+ struct sk_buff *skb = rp->skb;
+ u32 bufstat;
+
+ BUG_ON(skb == NULL);
+
+ rmb();
+ bufstat = bp->tx_ring[tail].ctrl;
+
+ if (!(bufstat & MACB_BIT(TX_USED)))
+ break;
+
+ dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n",
+ tail, skb->data);
+ dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
+ DMA_TO_DEVICE);
+ bp->stats.tx_packets++;
+ bp->stats.tx_bytes += skb->len;
+ rp->skb = NULL;
+ dev_kfree_skb_irq(skb);
+ }
+
+ bp->tx_tail = tail;
+ if (netif_queue_stopped(bp->dev) &&
+ TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH)
+ netif_wake_queue(bp->dev);
+}
+
+static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
+ unsigned int last_frag)
+{
+ unsigned int len;
+ unsigned int frag;
+ unsigned int offset = 0;
+ struct sk_buff *skb;
+
+ len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
+
+ dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+ first_frag, last_frag, len);
+
+ skb = dev_alloc_skb(len + RX_OFFSET);
+ if (!skb) {
+ bp->stats.rx_dropped++;
+ for (frag = first_frag; ; frag = NEXT_RX(frag)) {
+ bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+ if (frag == last_frag)
+ break;
+ }
+ wmb();
+ return 1;
+ }
+
+ skb_reserve(skb, RX_OFFSET);
+ skb->dev = bp->dev;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_put(skb, len);
+
+ for (frag = first_frag; ; frag = NEXT_RX(frag)) {
+ unsigned int frag_len = RX_BUFFER_SIZE;
+
+ if (offset + frag_len > len) {
+ BUG_ON(frag != last_frag);
+ frag_len = len - offset;
+ }
+ memcpy(skb->data + offset,
+ bp->rx_buffers + (RX_BUFFER_SIZE * frag),
+ frag_len);
+ offset += RX_BUFFER_SIZE;
+ bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+ wmb();
+
+ if (frag == last_frag)
+ break;
+ }
+
+ skb->protocol = eth_type_trans(skb, bp->dev);
+
+ bp->stats.rx_packets++;
+ bp->stats.rx_bytes += len;
+ bp->dev->last_rx = jiffies;
+ dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n",
+ skb->len, skb->csum);
+ netif_receive_skb(skb);
+
+ return 0;
+}
+
+/* Mark DMA descriptors from begin up to and not including end as unused */
+static void discard_partial_frame(struct macb *bp, unsigned int begin,
+ unsigned int end)
+{
+ unsigned int frag;
+
+ for (frag = begin; frag != end; frag = NEXT_RX(frag))
+ bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+ wmb();
+
+ /*
+ * When this happens, the hardware stats registers for
+ * whatever caused this is updated, so we don't have to record
+ * anything.
+ */
+}
+
+static int macb_rx(struct macb *bp, int budget)
+{
+ int received = 0;
+ unsigned int tail = bp->rx_tail;
+ int first_frag = -1;
+
+ for (; budget > 0; tail = NEXT_RX(tail)) {
+ u32 addr, ctrl;
+
+ rmb();
+ addr = bp->rx_ring[tail].addr;
+ ctrl = bp->rx_ring[tail].ctrl;
+
+ if (!(addr & MACB_BIT(RX_USED)))
+ break;
+
+ if (ctrl & MACB_BIT(RX_SOF)) {
+ if (first_frag != -1)
+ discard_partial_frame(bp, first_frag, tail);
+ first_frag = tail;
+ }
+
+ if (ctrl & MACB_BIT(RX_EOF)) {
+ int dropped;
+ BUG_ON(first_frag == -1);
+
+ dropped = macb_rx_frame(bp, first_frag, tail);
+ first_frag = -1;
+ if (!dropped) {
+ received++;
+ budget--;
+ }
+ }
+ }
+
+ if (first_frag != -1)
+ bp->rx_tail = first_frag;
+ else
+ bp->rx_tail = tail;
+
+ return received;
+}
+
+static int macb_poll(struct net_device *dev, int *budget)
+{
+ struct macb *bp = netdev_priv(dev);
+ int orig_budget, work_done, retval = 0;
+ u32 status;
+
+ status = macb_readl(bp, RSR);
+ macb_writel(bp, RSR, status);
+
+ if (!status) {
+ /*
+ * This may happen if an interrupt was pending before
+ * this function was called last time, and no packets
+ * have been received since.
+ */
+ netif_rx_complete(dev);
+ goto out;
+ }
+
+ dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n",
+ (unsigned long)status, *budget);
+
+ if (!(status & MACB_BIT(REC))) {
+ dev_warn(&bp->pdev->dev,
+ "No RX buffers complete, status = %02lx\n",
+ (unsigned long)status);
+ netif_rx_complete(dev);
+ goto out;
+ }
+
+ orig_budget = *budget;
+ if (orig_budget > dev->quota)
+ orig_budget = dev->quota;
+
+ work_done = macb_rx(bp, orig_budget);
+ if (work_done < orig_budget) {
+ netif_rx_complete(dev);
+ retval = 0;
+ } else {
+ retval = 1;
+ }
+
+ /*
+ * We've done what we can to clean the buffers. Make sure we
+ * get notified when new packets arrive.
+ */
+out:
+ macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+
+ /* TODO: Handle errors */
+
+ return retval;
+}
+
+static irqreturn_t macb_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct macb *bp = netdev_priv(dev);
+ u32 status;
+
+ status = macb_readl(bp, ISR);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ spin_lock(&bp->lock);
+
+ while (status) {
+ if (status & MACB_BIT(MFD))
+ complete(&bp->mdio_complete);
+
+ /* close possible race with dev_close */
+ if (unlikely(!netif_running(dev))) {
+ macb_writel(bp, IDR, ~0UL);
+ break;
+ }
+
+ if (status & MACB_RX_INT_FLAGS) {
+ if (netif_rx_schedule_prep(dev)) {
+ /*
+ * There's no point taking any more interrupts
+ * until we have processed the buffers
+ */
+ macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+ dev_dbg(&bp->pdev->dev, "scheduling RX softirq\n");
+ __netif_rx_schedule(dev);
+ }
+ }
+
+ if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND)))
+ macb_tx(bp);
+
+ /*
+ * Link change detection isn't possible with RMII, so we'll
+ * add that if/when we get our hands on a full-blown MII PHY.
+ */
+
+ if (status & MACB_BIT(HRESP)) {
+ /*
+ * TODO: Reset the hardware, and maybe move the printk
+ * to a lower-priority context as well (work queue?)
+ */
+ printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n",
+ dev->name);
+ }
+
+ status = macb_readl(bp, ISR);
+ }
+
+ spin_unlock(&bp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ dma_addr_t mapping;
+ unsigned int len, entry;
+ u32 ctrl;
+
+#ifdef DEBUG
+ int i;
+ dev_dbg(&bp->pdev->dev,
+ "start_xmit: len %u head %p data %p tail %p end %p\n",
+ skb->len, skb->head, skb->data, skb->tail, skb->end);
+ dev_dbg(&bp->pdev->dev,
+ "data:");
+ for (i = 0; i < 16; i++)
+ printk(" %02x", (unsigned int)skb->data[i]);
+ printk("\n");
+#endif
+
+ len = skb->len;
+ spin_lock_irq(&bp->lock);
+
+ /* This is a hard error, log it. */
+ if (TX_BUFFS_AVAIL(bp) < 1) {
+ netif_stop_queue(dev);
+ spin_unlock_irq(&bp->lock);
+ dev_err(&bp->pdev->dev,
+ "BUG! Tx Ring full when queue awake!\n");
+ dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
+ bp->tx_head, bp->tx_tail);
+ return 1;
+ }
+
+ entry = bp->tx_head;
+ dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ len, DMA_TO_DEVICE);
+ bp->tx_skb[entry].skb = skb;
+ bp->tx_skb[entry].mapping = mapping;
+ dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n",
+ skb->data, (unsigned long)mapping);
+
+ ctrl = MACB_BF(TX_FRMLEN, len);
+ ctrl |= MACB_BIT(TX_LAST);
+ if (entry == (TX_RING_SIZE - 1))
+ ctrl |= MACB_BIT(TX_WRAP);
+
+ bp->tx_ring[entry].addr = mapping;
+ bp->tx_ring[entry].ctrl = ctrl;
+ wmb();
+
+ entry = NEXT_TX(entry);
+ bp->tx_head = entry;
+
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+
+ if (TX_BUFFS_AVAIL(bp) < 1)
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&bp->lock);
+
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+static void macb_free_consistent(struct macb *bp)
+{
+ if (bp->tx_skb) {
+ kfree(bp->tx_skb);
+ bp->tx_skb = NULL;
+ }
+ if (bp->rx_ring) {
+ dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
+ bp->rx_ring, bp->rx_ring_dma);
+ bp->rx_ring = NULL;
+ }
+ if (bp->tx_ring) {
+ dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
+ bp->tx_ring, bp->tx_ring_dma);
+ bp->tx_ring = NULL;
+ }
+ if (bp->rx_buffers) {
+ dma_free_coherent(&bp->pdev->dev,
+ RX_RING_SIZE * RX_BUFFER_SIZE,
+ bp->rx_buffers, bp->rx_buffers_dma);
+ bp->rx_buffers = NULL;
+ }
+}
+
+static int macb_alloc_consistent(struct macb *bp)
+{
+ int size;
+
+ size = TX_RING_SIZE * sizeof(struct ring_info);
+ bp->tx_skb = kmalloc(size, GFP_KERNEL);
+ if (!bp->tx_skb)
+ goto out_err;
+
+ size = RX_RING_BYTES;
+ bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+ &bp->rx_ring_dma, GFP_KERNEL);
+ if (!bp->rx_ring)
+ goto out_err;
+ dev_dbg(&bp->pdev->dev,
+ "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+
+ size = TX_RING_BYTES;
+ bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+ &bp->tx_ring_dma, GFP_KERNEL);
+ if (!bp->tx_ring)
+ goto out_err;
+ dev_dbg(&bp->pdev->dev,
+ "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
+
+ size = RX_RING_SIZE * RX_BUFFER_SIZE;
+ bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+ &bp->rx_buffers_dma, GFP_KERNEL);
+ if (!bp->rx_buffers)
+ goto out_err;
+ dev_dbg(&bp->pdev->dev,
+ "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+
+ return 0;
+
+out_err:
+ macb_free_consistent(bp);
+ return -ENOMEM;
+}
+
+static void macb_init_rings(struct macb *bp)
+{
+ int i;
+ dma_addr_t addr;
+
+ addr = bp->rx_buffers_dma;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ bp->rx_ring[i].addr = addr;
+ bp->rx_ring[i].ctrl = 0;
+ addr += RX_BUFFER_SIZE;
+ }
+ bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ bp->tx_ring[i].addr = 0;
+ bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ }
+ bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
+ bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
+}
+
+static void macb_reset_hw(struct macb *bp)
+{
+ /* Make sure we have the write buffer for ourselves */
+ wmb();
+
+ /*
+ * Disable RX and TX (XXX: Should we halt the transmission
+ * more gracefully?)
+ */
+ macb_writel(bp, NCR, 0);
+
+ /* Clear the stats registers (XXX: Update stats first?) */
+ macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
+
+ /* Clear all status flags */
+ macb_writel(bp, TSR, ~0UL);
+ macb_writel(bp, RSR, ~0UL);
+
+ /* Disable all interrupts */
+ macb_writel(bp, IDR, ~0UL);
+ macb_readl(bp, ISR);
+}
+
+static void macb_init_hw(struct macb *bp)
+{
+ u32 config;
+
+ macb_reset_hw(bp);
+ __macb_set_hwaddr(bp);
+
+ config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
+ config |= MACB_BIT(PAE); /* PAuse Enable */
+ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+ if (bp->dev->flags & IFF_PROMISC)
+ config |= MACB_BIT(CAF); /* Copy All Frames */
+ if (!(bp->dev->flags & IFF_BROADCAST))
+ config |= MACB_BIT(NBC); /* No BroadCast */
+ macb_writel(bp, NCFGR, config);
+
+ /* Initialize TX and RX buffers */
+ macb_writel(bp, RBQP, bp->rx_ring_dma);
+ macb_writel(bp, TBQP, bp->tx_ring_dma);
+
+ /* Enable TX and RX */
+ macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE));
+
+ /* Enable interrupts */
+ macb_writel(bp, IER, (MACB_BIT(RCOMP)
+ | MACB_BIT(RXUBR)
+ | MACB_BIT(ISR_TUND)
+ | MACB_BIT(ISR_RLE)
+ | MACB_BIT(TXERR)
+ | MACB_BIT(TCOMP)
+ | MACB_BIT(ISR_ROVR)
+ | MACB_BIT(HRESP)));
+}
+
+static void macb_init_phy(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ /* Set some reasonable default settings */
+ macb_mdio_write(dev, bp->mii.phy_id, MII_ADVERTISE,
+ ADVERTISE_CSMA | ADVERTISE_ALL);
+ macb_mdio_write(dev, bp->mii.phy_id, MII_BMCR,
+ (BMCR_SPEED100 | BMCR_ANENABLE
+ | BMCR_ANRESTART | BMCR_FULLDPLX));
+}
+
+static int macb_open(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ int err;
+
+ dev_dbg(&bp->pdev->dev, "open\n");
+
+ if (!is_valid_ether_addr(dev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ err = macb_alloc_consistent(bp);
+ if (err) {
+ printk(KERN_ERR
+ "%s: Unable to allocate DMA memory (error %d)\n",
+ dev->name, err);
+ return err;
+ }
+
+ macb_init_rings(bp);
+ macb_init_hw(bp);
+ macb_init_phy(dev);
+
+ macb_check_media(bp, 1, 1);
+ netif_start_queue(dev);
+
+ schedule_delayed_work(&bp->periodic_task, HZ);
+
+ return 0;
+}
+
+static int macb_close(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ unsigned long flags;
+
+ cancel_rearming_delayed_work(&bp->periodic_task);
+
+ netif_stop_queue(dev);
+
+ spin_lock_irqsave(&bp->lock, flags);
+ macb_reset_hw(bp);
+ netif_carrier_off(dev);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ macb_free_consistent(bp);
+
+ return 0;
+}
+
+static struct net_device_stats *macb_get_stats(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct net_device_stats *nstat = &bp->stats;
+ struct macb_stats *hwstat = &bp->hw_stats;
+
+ /* Convert HW stats into netdevice stats */
+ nstat->rx_errors = (hwstat->rx_fcs_errors +
+ hwstat->rx_align_errors +
+ hwstat->rx_resource_errors +
+ hwstat->rx_overruns +
+ hwstat->rx_oversize_pkts +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersize_pkts +
+ hwstat->sqe_test_errors +
+ hwstat->rx_length_mismatch);
+ nstat->tx_errors = (hwstat->tx_late_cols +
+ hwstat->tx_excessive_cols +
+ hwstat->tx_underruns +
+ hwstat->tx_carrier_errors);
+ nstat->collisions = (hwstat->tx_single_cols +
+ hwstat->tx_multiple_cols +
+ hwstat->tx_excessive_cols);
+ nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersize_pkts +
+ hwstat->rx_length_mismatch);
+ nstat->rx_over_errors = hwstat->rx_resource_errors;
+ nstat->rx_crc_errors = hwstat->rx_fcs_errors;
+ nstat->rx_frame_errors = hwstat->rx_align_errors;
+ nstat->rx_fifo_errors = hwstat->rx_overruns;
+ /* XXX: What does "missed" mean? */
+ nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
+ nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
+ nstat->tx_fifo_errors = hwstat->tx_underruns;
+ /* Don't know about heartbeat or window errors... */
+
+ return nstat;
+}
+
+static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct macb *bp = netdev_priv(dev);
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ ret = mii_ethtool_gset(&bp->mii, cmd);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return ret;
+}
+
+static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct macb *bp = netdev_priv(dev);
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ ret = mii_ethtool_sset(&bp->mii, cmd);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return ret;
+}
+
+static void macb_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ strcpy(info->driver, bp->pdev->dev.driver->name);
+ strcpy(info->version, "$Revision: 1.14 $");
+ strcpy(info->bus_info, bp->pdev->dev.bus_id);
+}
+
+static int macb_nway_reset(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ return mii_nway_restart(&bp->mii);
+}
+
+static struct ethtool_ops macb_ethtool_ops = {
+ .get_settings = macb_get_settings,
+ .set_settings = macb_set_settings,
+ .get_drvinfo = macb_get_drvinfo,
+ .nway_reset = macb_nway_reset,
+ .get_link = ethtool_op_get_link,
+};
+
+static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct macb *bp = netdev_priv(dev);
+ int ret;
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ ret = generic_mii_ioctl(&bp->mii, if_mii(rq), cmd, NULL);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return ret;
+}
+
+static ssize_t macb_mii_show(const struct class_device *cd, char *buf,
+ unsigned long addr)
+{
+ struct net_device *dev = to_net_dev(cd);
+ struct macb *bp = netdev_priv(dev);
+ ssize_t ret = -EINVAL;
+
+ if (netif_running(dev)) {
+ int value;
+ value = macb_mdio_read(dev, bp->mii.phy_id, addr);
+ ret = sprintf(buf, "0x%04x\n", (uint16_t)value);
+ }
+
+ return ret;
+}
+
+#define MII_ENTRY(name, addr) \
+static ssize_t show_##name(struct class_device *cd, char *buf) \
+{ \
+ return macb_mii_show(cd, buf, addr); \
+} \
+static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+
+MII_ENTRY(bmcr, MII_BMCR);
+MII_ENTRY(bmsr, MII_BMSR);
+MII_ENTRY(physid1, MII_PHYSID1);
+MII_ENTRY(physid2, MII_PHYSID2);
+MII_ENTRY(advertise, MII_ADVERTISE);
+MII_ENTRY(lpa, MII_LPA);
+MII_ENTRY(expansion, MII_EXPANSION);
+
+static struct attribute *macb_mii_attrs[] = {
+ &class_device_attr_bmcr.attr,
+ &class_device_attr_bmsr.attr,
+ &class_device_attr_physid1.attr,
+ &class_device_attr_physid2.attr,
+ &class_device_attr_advertise.attr,
+ &class_device_attr_lpa.attr,
+ &class_device_attr_expansion.attr,
+ NULL,
+};
+
+static struct attribute_group macb_mii_group = {
+ .name = "mii",
+ .attrs = macb_mii_attrs,
+};
+
+static void macb_unregister_sysfs(struct net_device *net)
+{
+ struct class_device *class_dev = &net->class_dev;
+
+ sysfs_remove_group(&class_dev->kobj, &macb_mii_group);
+}
+
+static int macb_register_sysfs(struct net_device *net)
+{
+ struct class_device *class_dev = &net->class_dev;
+ int ret;
+
+ ret = sysfs_create_group(&class_dev->kobj, &macb_mii_group);
+ if (ret)
+ printk(KERN_WARNING
+ "%s: sysfs mii attribute registration failed: %d\n",
+ net->name, ret);
+ return ret;
+}
+static int __devinit macb_probe(struct platform_device *pdev)
+{
+ struct eth_platform_data *pdata;
+ struct resource *regs;
+ struct net_device *dev;
+ struct macb *bp;
+ unsigned long pclk_hz;
+ u32 config;
+ int err = -ENXIO;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "no mmio resource defined\n");
+ goto err_out;
+ }
+
+ err = -ENOMEM;
+ dev = alloc_etherdev(sizeof(*bp));
+ if (!dev) {
+ dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
+ goto err_out;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* TODO: Actually, we have some interesting features... */
+ dev->features |= 0;
+
+ bp = netdev_priv(dev);
+ bp->pdev = pdev;
+ bp->dev = dev;
+
+ spin_lock_init(&bp->lock);
+
+ bp->pclk = clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(bp->pclk)) {
+ dev_err(&pdev->dev, "failed to get pclk\n");
+ goto err_out_free_dev;
+ }
+ bp->hclk = clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(bp->hclk)) {
+ dev_err(&pdev->dev, "failed to get hclk\n");
+ goto err_out_put_pclk;
+ }
+
+ clk_enable(bp->pclk);
+ clk_enable(bp->hclk);
+
+ bp->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ if (!bp->regs) {
+ dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ err = -ENOMEM;
+ goto err_out_disable_clocks;
+ }
+
+ dev->irq = platform_get_irq(pdev, 0);
+ err = request_irq(dev->irq, macb_interrupt, SA_SAMPLE_RANDOM,
+ dev->name, dev);
+ if (err) {
+ printk(KERN_ERR
+ "%s: Unable to request IRQ %d (error %d)\n",
+ dev->name, dev->irq, err);
+ goto err_out_iounmap;
+ }
+
+ dev->open = macb_open;
+ dev->stop = macb_close;
+ dev->hard_start_xmit = macb_start_xmit;
+ dev->get_stats = macb_get_stats;
+ dev->do_ioctl = macb_ioctl;
+ dev->poll = macb_poll;
+ dev->weight = 64;
+ dev->ethtool_ops = &macb_ethtool_ops;
+
+ dev->base_addr = regs->start;
+
+ INIT_WORK(&bp->periodic_task, macb_periodic_task, bp);
+ mutex_init(&bp->mdio_mutex);
+ init_completion(&bp->mdio_complete);
+
+ /* Set MII management clock divider */
+ pclk_hz = clk_get_rate(bp->pclk);
+ if (pclk_hz <= 20000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV8);
+ else if (pclk_hz <= 40000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV16);
+ else if (pclk_hz <= 80000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV32);
+ else
+ config = MACB_BF(CLK, MACB_CLK_DIV64);
+ macb_writel(bp, NCFGR, config);
+
+ bp->mii.dev = dev;
+ bp->mii.mdio_read = macb_mdio_read;
+ bp->mii.mdio_write = macb_mdio_write;
+ bp->mii.phy_id_mask = 0x1f;
+ bp->mii.reg_num_mask = 0x1f;
+
+ macb_get_hwaddr(bp);
+ err = macb_phy_probe(bp);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to detect PHY, aborting.\n");
+ goto err_out_free_irq;
+ }
+
+ pdata = pdev->dev.platform_data;
+ if (pdata && pdata->is_rmii)
+ macb_writel(bp, USRIO, 0);
+ else
+ macb_writel(bp, USRIO, MACB_BIT(MII));
+
+ bp->tx_pending = DEF_TX_RING_PENDING;
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_free_irq;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ macb_register_sysfs(dev);
+
+ printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d "
+ "(%02x:%02x:%02x:%02x:%02x:%02x)\n",
+ dev->name, dev->base_addr, dev->irq,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ return 0;
+
+err_out_free_irq:
+ free_irq(dev->irq, dev);
+err_out_iounmap:
+ iounmap(bp->regs);
+err_out_disable_clocks:
+ clk_disable(bp->hclk);
+ clk_disable(bp->pclk);
+ clk_put(bp->hclk);
+err_out_put_pclk:
+ clk_put(bp->pclk);
+err_out_free_dev:
+ free_netdev(dev);
+err_out:
+ platform_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static int __devexit macb_remove(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct macb *bp;
+
+ dev = platform_get_drvdata(pdev);
+
+ if (dev) {
+ bp = netdev_priv(dev);
+ macb_unregister_sysfs(dev);
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ iounmap(bp->regs);
+ clk_disable(bp->hclk);
+ clk_disable(bp->pclk);
+ clk_put(bp->hclk);
+ clk_put(bp->pclk);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+static struct platform_driver macb_driver = {
+ .probe = macb_probe,
+ .remove = __devexit_p(macb_remove),
+ .driver = {
+ .name = "macb",
+ },
+};
+
+static int __init macb_init(void)
+{
+ return platform_driver_register(&macb_driver);
+}
+
+static void __exit macb_exit(void)
+{
+ platform_driver_unregister(&macb_driver);
+}
+
+module_init(macb_init);
+module_exit(macb_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Atmel MACB Ethernet driver");
+MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
diff --git a/drivers/net/macb.h b/drivers/net/macb.h
new file mode 100644
index 000000000000..8c253db69881
--- /dev/null
+++ b/drivers/net/macb.h
@@ -0,0 +1,387 @@
+/*
+ * Atmel MACB Ethernet Controller driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _MACB_H
+#define _MACB_H
+
+/* MACB register offsets */
+#define MACB_NCR 0x0000
+#define MACB_NCFGR 0x0004
+#define MACB_NSR 0x0008
+#define MACB_TSR 0x0014
+#define MACB_RBQP 0x0018
+#define MACB_TBQP 0x001c
+#define MACB_RSR 0x0020
+#define MACB_ISR 0x0024
+#define MACB_IER 0x0028
+#define MACB_IDR 0x002c
+#define MACB_IMR 0x0030
+#define MACB_MAN 0x0034
+#define MACB_PTR 0x0038
+#define MACB_PFR 0x003c
+#define MACB_FTO 0x0040
+#define MACB_SCF 0x0044
+#define MACB_MCF 0x0048
+#define MACB_FRO 0x004c
+#define MACB_FCSE 0x0050
+#define MACB_ALE 0x0054
+#define MACB_DTF 0x0058
+#define MACB_LCOL 0x005c
+#define MACB_EXCOL 0x0060
+#define MACB_TUND 0x0064
+#define MACB_CSE 0x0068
+#define MACB_RRE 0x006c
+#define MACB_ROVR 0x0070
+#define MACB_RSE 0x0074
+#define MACB_ELE 0x0078
+#define MACB_RJA 0x007c
+#define MACB_USF 0x0080
+#define MACB_STE 0x0084
+#define MACB_RLE 0x0088
+#define MACB_TPF 0x008c
+#define MACB_HRB 0x0090
+#define MACB_HRT 0x0094
+#define MACB_SA1B 0x0098
+#define MACB_SA1T 0x009c
+#define MACB_SA2B 0x00a0
+#define MACB_SA2T 0x00a4
+#define MACB_SA3B 0x00a8
+#define MACB_SA3T 0x00ac
+#define MACB_SA4B 0x00b0
+#define MACB_SA4T 0x00b4
+#define MACB_TID 0x00b8
+#define MACB_TPQ 0x00bc
+#define MACB_USRIO 0x00c0
+#define MACB_WOL 0x00c4
+
+/* Bitfields in NCR */
+#define MACB_LB_OFFSET 0
+#define MACB_LB_SIZE 1
+#define MACB_LLB_OFFSET 1
+#define MACB_LLB_SIZE 1
+#define MACB_RE_OFFSET 2
+#define MACB_RE_SIZE 1
+#define MACB_TE_OFFSET 3
+#define MACB_TE_SIZE 1
+#define MACB_MPE_OFFSET 4
+#define MACB_MPE_SIZE 1
+#define MACB_CLRSTAT_OFFSET 5
+#define MACB_CLRSTAT_SIZE 1
+#define MACB_INCSTAT_OFFSET 6
+#define MACB_INCSTAT_SIZE 1
+#define MACB_WESTAT_OFFSET 7
+#define MACB_WESTAT_SIZE 1
+#define MACB_BP_OFFSET 8
+#define MACB_BP_SIZE 1
+#define MACB_TSTART_OFFSET 9
+#define MACB_TSTART_SIZE 1
+#define MACB_THALT_OFFSET 10
+#define MACB_THALT_SIZE 1
+#define MACB_NCR_TPF_OFFSET 11
+#define MACB_NCR_TPF_SIZE 1
+#define MACB_TZQ_OFFSET 12
+#define MACB_TZQ_SIZE 1
+
+/* Bitfields in NCFGR */
+#define MACB_SPD_OFFSET 0
+#define MACB_SPD_SIZE 1
+#define MACB_FD_OFFSET 1
+#define MACB_FD_SIZE 1
+#define MACB_BIT_RATE_OFFSET 2
+#define MACB_BIT_RATE_SIZE 1
+#define MACB_JFRAME_OFFSET 3
+#define MACB_JFRAME_SIZE 1
+#define MACB_CAF_OFFSET 4
+#define MACB_CAF_SIZE 1
+#define MACB_NBC_OFFSET 5
+#define MACB_NBC_SIZE 1
+#define MACB_NCFGR_MTI_OFFSET 6
+#define MACB_NCFGR_MTI_SIZE 1
+#define MACB_UNI_OFFSET 7
+#define MACB_UNI_SIZE 1
+#define MACB_BIG_OFFSET 8
+#define MACB_BIG_SIZE 1
+#define MACB_EAE_OFFSET 9
+#define MACB_EAE_SIZE 1
+#define MACB_CLK_OFFSET 10
+#define MACB_CLK_SIZE 2
+#define MACB_RTY_OFFSET 12
+#define MACB_RTY_SIZE 1
+#define MACB_PAE_OFFSET 13
+#define MACB_PAE_SIZE 1
+#define MACB_RBOF_OFFSET 14
+#define MACB_RBOF_SIZE 2
+#define MACB_RLCE_OFFSET 16
+#define MACB_RLCE_SIZE 1
+#define MACB_DRFCS_OFFSET 17
+#define MACB_DRFCS_SIZE 1
+#define MACB_EFRHD_OFFSET 18
+#define MACB_EFRHD_SIZE 1
+#define MACB_IRXFCS_OFFSET 19
+#define MACB_IRXFCS_SIZE 1
+
+/* Bitfields in NSR */
+#define MACB_NSR_LINK_OFFSET 0
+#define MACB_NSR_LINK_SIZE 1
+#define MACB_MDIO_OFFSET 1
+#define MACB_MDIO_SIZE 1
+#define MACB_IDLE_OFFSET 2
+#define MACB_IDLE_SIZE 1
+
+/* Bitfields in TSR */
+#define MACB_UBR_OFFSET 0
+#define MACB_UBR_SIZE 1
+#define MACB_COL_OFFSET 1
+#define MACB_COL_SIZE 1
+#define MACB_TSR_RLE_OFFSET 2
+#define MACB_TSR_RLE_SIZE 1
+#define MACB_TGO_OFFSET 3
+#define MACB_TGO_SIZE 1
+#define MACB_BEX_OFFSET 4
+#define MACB_BEX_SIZE 1
+#define MACB_COMP_OFFSET 5
+#define MACB_COMP_SIZE 1
+#define MACB_UND_OFFSET 6
+#define MACB_UND_SIZE 1
+
+/* Bitfields in RSR */
+#define MACB_BNA_OFFSET 0
+#define MACB_BNA_SIZE 1
+#define MACB_REC_OFFSET 1
+#define MACB_REC_SIZE 1
+#define MACB_OVR_OFFSET 2
+#define MACB_OVR_SIZE 1
+
+/* Bitfields in ISR/IER/IDR/IMR */
+#define MACB_MFD_OFFSET 0
+#define MACB_MFD_SIZE 1
+#define MACB_RCOMP_OFFSET 1
+#define MACB_RCOMP_SIZE 1
+#define MACB_RXUBR_OFFSET 2
+#define MACB_RXUBR_SIZE 1
+#define MACB_TXUBR_OFFSET 3
+#define MACB_TXUBR_SIZE 1
+#define MACB_ISR_TUND_OFFSET 4
+#define MACB_ISR_TUND_SIZE 1
+#define MACB_ISR_RLE_OFFSET 5
+#define MACB_ISR_RLE_SIZE 1
+#define MACB_TXERR_OFFSET 6
+#define MACB_TXERR_SIZE 1
+#define MACB_TCOMP_OFFSET 7
+#define MACB_TCOMP_SIZE 1
+#define MACB_ISR_LINK_OFFSET 9
+#define MACB_ISR_LINK_SIZE 1
+#define MACB_ISR_ROVR_OFFSET 10
+#define MACB_ISR_ROVR_SIZE 1
+#define MACB_HRESP_OFFSET 11
+#define MACB_HRESP_SIZE 1
+#define MACB_PFR_OFFSET 12
+#define MACB_PFR_SIZE 1
+#define MACB_PTZ_OFFSET 13
+#define MACB_PTZ_SIZE 1
+
+/* Bitfields in MAN */
+#define MACB_DATA_OFFSET 0
+#define MACB_DATA_SIZE 16
+#define MACB_CODE_OFFSET 16
+#define MACB_CODE_SIZE 2
+#define MACB_REGA_OFFSET 18
+#define MACB_REGA_SIZE 5
+#define MACB_PHYA_OFFSET 23
+#define MACB_PHYA_SIZE 5
+#define MACB_RW_OFFSET 28
+#define MACB_RW_SIZE 2
+#define MACB_SOF_OFFSET 30
+#define MACB_SOF_SIZE 2
+
+/* Bitfields in USRIO */
+#define MACB_MII_OFFSET 0
+#define MACB_MII_SIZE 1
+#define MACB_EAM_OFFSET 1
+#define MACB_EAM_SIZE 1
+#define MACB_TX_PAUSE_OFFSET 2
+#define MACB_TX_PAUSE_SIZE 1
+#define MACB_TX_PAUSE_ZERO_OFFSET 3
+#define MACB_TX_PAUSE_ZERO_SIZE 1
+
+/* Bitfields in WOL */
+#define MACB_IP_OFFSET 0
+#define MACB_IP_SIZE 16
+#define MACB_MAG_OFFSET 16
+#define MACB_MAG_SIZE 1
+#define MACB_ARP_OFFSET 17
+#define MACB_ARP_SIZE 1
+#define MACB_SA1_OFFSET 18
+#define MACB_SA1_SIZE 1
+#define MACB_WOL_MTI_OFFSET 19
+#define MACB_WOL_MTI_SIZE 1
+
+/* Constants for CLK */
+#define MACB_CLK_DIV8 0
+#define MACB_CLK_DIV16 1
+#define MACB_CLK_DIV32 2
+#define MACB_CLK_DIV64 3
+
+/* Constants for MAN register */
+#define MACB_MAN_SOF 1
+#define MACB_MAN_WRITE 1
+#define MACB_MAN_READ 2
+#define MACB_MAN_CODE 2
+
+/* Bit manipulation macros */
+#define MACB_BIT(name) \
+ (1 << MACB_##name##_OFFSET)
+#define MACB_BF(name,value) \
+ (((value) & ((1 << MACB_##name##_SIZE) - 1)) \
+ << MACB_##name##_OFFSET)
+#define MACB_BFEXT(name,value)\
+ (((value) >> MACB_##name##_OFFSET) \
+ & ((1 << MACB_##name##_SIZE) - 1))
+#define MACB_BFINS(name,value,old) \
+ (((old) & ~(((1 << MACB_##name##_SIZE) - 1) \
+ << MACB_##name##_OFFSET)) \
+ | MACB_BF(name,value))
+
+/* Register access macros */
+#define macb_readl(port,reg) \
+ readl((port)->regs + MACB_##reg)
+#define macb_writel(port,reg,value) \
+ writel((value), (port)->regs + MACB_##reg)
+
+struct dma_desc {
+ u32 addr;
+ u32 ctrl;
+};
+
+/* DMA descriptor bitfields */
+#define MACB_RX_USED_OFFSET 0
+#define MACB_RX_USED_SIZE 1
+#define MACB_RX_WRAP_OFFSET 1
+#define MACB_RX_WRAP_SIZE 1
+#define MACB_RX_WADDR_OFFSET 2
+#define MACB_RX_WADDR_SIZE 30
+
+#define MACB_RX_FRMLEN_OFFSET 0
+#define MACB_RX_FRMLEN_SIZE 12
+#define MACB_RX_OFFSET_OFFSET 12
+#define MACB_RX_OFFSET_SIZE 2
+#define MACB_RX_SOF_OFFSET 14
+#define MACB_RX_SOF_SIZE 1
+#define MACB_RX_EOF_OFFSET 15
+#define MACB_RX_EOF_SIZE 1
+#define MACB_RX_CFI_OFFSET 16
+#define MACB_RX_CFI_SIZE 1
+#define MACB_RX_VLAN_PRI_OFFSET 17
+#define MACB_RX_VLAN_PRI_SIZE 3
+#define MACB_RX_PRI_TAG_OFFSET 20
+#define MACB_RX_PRI_TAG_SIZE 1
+#define MACB_RX_VLAN_TAG_OFFSET 21
+#define MACB_RX_VLAN_TAG_SIZE 1
+#define MACB_RX_TYPEID_MATCH_OFFSET 22
+#define MACB_RX_TYPEID_MATCH_SIZE 1
+#define MACB_RX_SA4_MATCH_OFFSET 23
+#define MACB_RX_SA4_MATCH_SIZE 1
+#define MACB_RX_SA3_MATCH_OFFSET 24
+#define MACB_RX_SA3_MATCH_SIZE 1
+#define MACB_RX_SA2_MATCH_OFFSET 25
+#define MACB_RX_SA2_MATCH_SIZE 1
+#define MACB_RX_SA1_MATCH_OFFSET 26
+#define MACB_RX_SA1_MATCH_SIZE 1
+#define MACB_RX_EXT_MATCH_OFFSET 28
+#define MACB_RX_EXT_MATCH_SIZE 1
+#define MACB_RX_UHASH_MATCH_OFFSET 29
+#define MACB_RX_UHASH_MATCH_SIZE 1
+#define MACB_RX_MHASH_MATCH_OFFSET 30
+#define MACB_RX_MHASH_MATCH_SIZE 1
+#define MACB_RX_BROADCAST_OFFSET 31
+#define MACB_RX_BROADCAST_SIZE 1
+
+#define MACB_TX_FRMLEN_OFFSET 0
+#define MACB_TX_FRMLEN_SIZE 11
+#define MACB_TX_LAST_OFFSET 15
+#define MACB_TX_LAST_SIZE 1
+#define MACB_TX_NOCRC_OFFSET 16
+#define MACB_TX_NOCRC_SIZE 1
+#define MACB_TX_BUF_EXHAUSTED_OFFSET 27
+#define MACB_TX_BUF_EXHAUSTED_SIZE 1
+#define MACB_TX_UNDERRUN_OFFSET 28
+#define MACB_TX_UNDERRUN_SIZE 1
+#define MACB_TX_ERROR_OFFSET 29
+#define MACB_TX_ERROR_SIZE 1
+#define MACB_TX_WRAP_OFFSET 30
+#define MACB_TX_WRAP_SIZE 1
+#define MACB_TX_USED_OFFSET 31
+#define MACB_TX_USED_SIZE 1
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+
+/*
+ * Hardware-collected statistics. Used when updating the network
+ * device stats by a periodic timer.
+ */
+struct macb_stats {
+ u32 rx_pause_frames;
+ u32 tx_ok;
+ u32 tx_single_cols;
+ u32 tx_multiple_cols;
+ u32 rx_ok;
+ u32 rx_fcs_errors;
+ u32 rx_align_errors;
+ u32 tx_deferred;
+ u32 tx_late_cols;
+ u32 tx_excessive_cols;
+ u32 tx_underruns;
+ u32 tx_carrier_errors;
+ u32 rx_resource_errors;
+ u32 rx_overruns;
+ u32 rx_symbol_errors;
+ u32 rx_oversize_pkts;
+ u32 rx_jabbers;
+ u32 rx_undersize_pkts;
+ u32 sqe_test_errors;
+ u32 rx_length_mismatch;
+ u32 tx_pause_frames;
+};
+
+struct macb {
+ void __iomem *regs;
+
+ unsigned int rx_tail;
+ struct dma_desc *rx_ring;
+ void *rx_buffers;
+
+ unsigned int tx_head, tx_tail;
+ struct dma_desc *tx_ring;
+ struct ring_info *tx_skb;
+
+ spinlock_t lock;
+ struct platform_device *pdev;
+ struct clk *pclk;
+ struct clk *hclk;
+ struct net_device *dev;
+ struct net_device_stats stats;
+ struct macb_stats hw_stats;
+
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_buffers_dma;
+
+ unsigned int rx_pending, tx_pending;
+
+ struct work_struct periodic_task;
+
+ struct mutex mdio_mutex;
+ struct completion mdio_complete;
+ struct mii_if_info mii;
+};
+
+#endif /* _MACB_H */
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index eb893d7e8834..38fd525f0f13 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -33,6 +33,8 @@ static const char version1[] =
#include <asm/io.h>
#include <asm/irq.h>
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+
#include "8390.h"
#define DRV_NAME "ne-h8300"
@@ -52,6 +54,11 @@ static const char version1[] =
/* ---- No user-serviceable parts below ---- */
+static const char version[] =
+ "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include "lib8390.c"
+
#define NE_BASE (dev->base_addr)
#define NE_CMD 0x00
#define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */
@@ -162,7 +169,7 @@ static void cleanup_card(struct net_device *dev)
#ifndef MODULE
struct net_device * __init ne_probe(int unit)
{
- struct net_device *dev = alloc_ei_netdev();
+ struct net_device *dev = ____alloc_ei_netdev(0);
int err;
if (!dev)
@@ -283,7 +290,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
/* Snarf the interrupt now. There's no point in waiting since we cannot
share and the board will usually be enabled. */
- ret = request_irq(dev->irq, ei_interrupt, 0, name, dev);
+ ret = request_irq(dev->irq, __ei_interrupt, 0, name, dev);
if (ret) {
printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
goto err_out;
@@ -318,9 +325,9 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
dev->open = &ne_open;
dev->stop = &ne_close;
#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = ei_poll;
+ dev->poll_controller = __ei_poll;
#endif
- NS8390_init(dev, 0);
+ __NS8390_init(dev, 0);
ret = register_netdev(dev);
if (ret)
@@ -335,7 +342,7 @@ err_out:
static int ne_open(struct net_device *dev)
{
- ei_open(dev);
+ __ei_open(dev);
return 0;
}
@@ -343,7 +350,7 @@ static int ne_close(struct net_device *dev)
{
if (ei_debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
- ei_close(dev);
+ __ei_close(dev);
return 0;
}
@@ -584,7 +591,7 @@ retry:
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
ne_reset_8390(dev);
- NS8390_init(dev,1);
+ __NS8390_init(dev,1);
break;
}
@@ -620,7 +627,7 @@ int init_module(void)
int err;
for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
- struct net_device *dev = alloc_ei_netdev();
+ struct net_device *dev = ____alloc_ei_netdev(0);
if (!dev)
break;
if (io[this_dev]) {
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile
new file mode 100644
index 000000000000..a07cdc6f7384
--- /dev/null
+++ b/drivers/net/netxen/Makefile
@@ -0,0 +1,35 @@
+# Copyright (C) 2003 - 2006 NetXen, Inc.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+# MA 02111-1307, USA.
+#
+# The full GNU General Public License is included in this distribution
+# in the file called LICENSE.
+#
+# Contact Information:
+# info@netxen.com
+# NetXen,
+# 3965 Freedom Circle, Fourth floor,
+# Santa Clara, CA 95054
+#
+# Makefile for the NetXen NIC Driver
+#
+
+
+obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o
+
+netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \
+ netxen_nic_isr.o netxen_nic_ethtool.o netxen_nic_niu.o
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
new file mode 100644
index 000000000000..d925053fe597
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic.h
@@ -0,0 +1,1028 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ * info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ */
+
+#ifndef _NETXEN_NIC_H_
+#define _NETXEN_NIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+
+#include <linux/mm.h>
+#include <linux/mman.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+#include "netxen_nic_hw.h"
+
+#define NETXEN_NIC_BUILD_NO "5"
+#define _NETXEN_NIC_LINUX_MAJOR 2
+#define _NETXEN_NIC_LINUX_MINOR 3
+#define _NETXEN_NIC_LINUX_SUBVERSION 59
+#define NETXEN_NIC_LINUX_VERSIONID "2.3.59" "-" NETXEN_NIC_BUILD_NO
+#define NETXEN_NIC_FW_VERSIONID "2.3.59"
+
+#define RCV_DESC_RINGSIZE \
+ (sizeof(struct rcv_desc) * adapter->max_rx_desc_count)
+#define STATUS_DESC_RINGSIZE \
+ (sizeof(struct status_desc)* adapter->max_rx_desc_count)
+#define TX_RINGSIZE \
+ (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count)
+#define RCV_BUFFSIZE \
+ (sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count)
+#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
+
+#define NETXEN_NETDEV_STATUS 0x1
+
+#define ADDR_IN_WINDOW1(off) \
+ ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
+
+/*
+ * normalize a 64MB crb address to 32MB PCI window
+ * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1
+ */
+#define NETXEN_CRB_NORMAL(reg) \
+ (reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST
+
+#define NETXEN_CRB_NORMALIZE(adapter, reg) \
+ pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg))
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END 0x400000
+
+#define SECOND_PAGE_GROUP_START 0x4000000
+#define SECOND_PAGE_GROUP_END 0x66BC000
+
+#define THIRD_PAGE_GROUP_START 0x70E4000
+#define THIRD_PAGE_GROUP_END 0x8000000
+
+#define FIRST_PAGE_GROUP_SIZE FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START
+#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START
+#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START
+
+#define MAX_RX_BUFFER_LENGTH 2000
+#define MAX_RX_JUMBO_BUFFER_LENGTH 9046
+#define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - NET_IP_ALIGN)
+#define RX_JUMBO_DMA_MAP_LEN \
+ (MAX_RX_JUMBO_BUFFER_LENGTH - NET_IP_ALIGN)
+#define NETXEN_ROM_ROUNDUP 0x80000000ULL
+
+/*
+ * Maximum number of ring contexts
+ */
+#define MAX_RING_CTX 1
+
+/* Opcodes to be used with the commands */
+enum {
+ TX_ETHER_PKT = 0x01,
+/* The following opcodes are for IP checksum */
+ TX_TCP_PKT,
+ TX_UDP_PKT,
+ TX_IP_PKT,
+ TX_TCP_LSO,
+ TX_IPSEC,
+ TX_IPSEC_CMD
+};
+
+/* The following opcodes are for internal consumption. */
+#define NETXEN_CONTROL_OP 0x10
+#define PEGNET_REQUEST 0x11
+
+#define MAX_NUM_CARDS 4
+
+#define MAX_BUFFERS_PER_CMD 32
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_START 0xff00
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+
+#define NUM_RCV_DESC_RINGS 2 /* No of Rcv Descriptor contexts */
+
+/* descriptor types */
+#define RCV_DESC_NORMAL 0x01
+#define RCV_DESC_JUMBO 0x02
+#define RCV_DESC_NORMAL_CTXID 0
+#define RCV_DESC_JUMBO_CTXID 1
+
+#define RCV_DESC_TYPE(ID) \
+ ((ID == RCV_DESC_JUMBO_CTXID) ? RCV_DESC_JUMBO : RCV_DESC_NORMAL)
+
+#define MAX_CMD_DESCRIPTORS 1024
+#define MAX_RCV_DESCRIPTORS 32768
+#define MAX_JUMBO_RCV_DESCRIPTORS 1024
+#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
+#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS
+#define MAX_RCV_DESC MAX_RCV_DESCRIPTORS
+#define MAX_RCVSTATUS_DESC MAX_RCV_DESCRIPTORS
+#define NUM_RCV_DESC (MAX_RCV_DESC + MAX_JUMBO_RCV_DESCRIPTORS)
+#define MAX_EPG_DESCRIPTORS (MAX_CMD_DESCRIPTORS * 8)
+
+#define MIN_TX_COUNT 4096
+#define MIN_RX_COUNT 4096
+
+#define MAX_FRAME_SIZE 0x10000 /* 64K MAX size for LSO */
+
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+#define PHAN_PEG_RCV_START_INITIALIZE 0xff00
+
+#define get_next_index(index, length) \
+ (((index) + 1) & ((length) - 1))
+
+#define get_index_range(index,length,count) \
+ (((index) + (count)) & ((length) - 1))
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+
+/*
+ * The size of reference handle been changed to 16 bits to pass the MSS fields
+ * for the LSO packet
+ */
+
+#define FLAGS_CHECKSUM_ENABLED 0x01
+#define FLAGS_LSO_ENABLED 0x02
+#define FLAGS_IPSEC_SA_ADD 0x04
+#define FLAGS_IPSEC_SA_DELETE 0x08
+#define FLAGS_VLAN_TAGGED 0x10
+
+#define CMD_DESC_TOTAL_LENGTH(cmd_desc) \
+ ((cmd_desc)->length_tcp_hdr & 0x00FFFFFF)
+#define CMD_DESC_TCP_HDR_OFFSET(cmd_desc) \
+ (((cmd_desc)->length_tcp_hdr >> 24) & 0x0FF)
+#define CMD_DESC_PORT(cmd_desc) ((cmd_desc)->port_ctxid & 0x0F)
+#define CMD_DESC_CTX_ID(cmd_desc) (((cmd_desc)->port_ctxid >> 4) & 0x0F)
+
+#define CMD_DESC_TOTAL_LENGTH_WRT(cmd_desc, var) \
+ ((cmd_desc)->length_tcp_hdr |= ((var) & 0x00FFFFFF))
+#define CMD_DESC_TCP_HDR_OFFSET_WRT(cmd_desc, var) \
+ ((cmd_desc)->length_tcp_hdr |= (((var) << 24) & 0xFF000000))
+#define CMD_DESC_PORT_WRT(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+
+struct cmd_desc_type0 {
+ u64 netxen_next; /* for fragments handled by Phantom */
+ union {
+ struct {
+ u32 addr_low_part2;
+ u32 addr_high_part2;
+ };
+ u64 addr_buffer2;
+ };
+
+ /* Bit pattern: 0-23 total length, 24-32 tcp header offset */
+ u32 length_tcp_hdr;
+ u8 ip_hdr_offset; /* For LSO only */
+ u8 num_of_buffers; /* total number of segments */
+ u8 flags; /* as defined above */
+ u8 opcode;
+
+ u16 reference_handle; /* changed to u16 to add mss */
+ u16 mss; /* passed by NDIS_PACKET for LSO */
+ /* Bit pattern 0-3 port, 0-3 ctx id */
+ u8 port_ctxid;
+ u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ u16 conn_id; /* IPSec offoad only */
+
+ union {
+ struct {
+ u32 addr_low_part3;
+ u32 addr_high_part3;
+ };
+ u64 addr_buffer3;
+ };
+
+ union {
+ struct {
+ u32 addr_low_part1;
+ u32 addr_high_part1;
+ };
+ u64 addr_buffer1;
+ };
+
+ u16 buffer1_length;
+ u16 buffer2_length;
+ u16 buffer3_length;
+ u16 buffer4_length;
+
+ union {
+ struct {
+ u32 addr_low_part4;
+ u32 addr_high_part4;
+ };
+ u64 addr_buffer4;
+ };
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+struct rcv_desc {
+ u16 reference_handle;
+ u16 reserved;
+ u32 buffer_length; /* allocated buffer length (usually 2K) */
+ u64 addr_buffer;
+};
+
+/* opcode field in status_desc */
+#define RCV_NIC_PKT (0xA)
+#define STATUS_NIC_PKT ((RCV_NIC_PKT) << 12)
+
+/* for status field in status_desc */
+#define STATUS_NEED_CKSUM (1)
+#define STATUS_CKSUM_OK (2)
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST (0x1)
+#define STATUS_OWNER_PHANTOM (0x2)
+
+#define NETXEN_PROT_IP (1)
+#define NETXEN_PROT_UNKNOWN (0)
+
+/* Note: sizeof(status_desc) should always be a mutliple of 2 */
+#define STATUS_DESC_PORT(status_desc) \
+ ((status_desc)->port_status_type_op & 0x0F)
+#define STATUS_DESC_STATUS(status_desc) \
+ (((status_desc)->port_status_type_op >> 4) & 0x0F)
+#define STATUS_DESC_TYPE(status_desc) \
+ (((status_desc)->port_status_type_op >> 8) & 0x0F)
+#define STATUS_DESC_OPCODE(status_desc) \
+ (((status_desc)->port_status_type_op >> 12) & 0x0F)
+
+struct status_desc {
+ /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-15 opcode */
+ u16 port_status_type_op;
+ u16 total_length; /* NIC mode */
+ u16 reference_handle; /* handle for the associated packet */
+ /* Bit pattern: 0-1 owner, 2-5 protocol */
+ u16 owner; /* Owner of the descriptor */
+} __attribute__ ((aligned(8)));
+
+enum {
+ NETXEN_RCV_PEG_0 = 0,
+ NETXEN_RCV_PEG_1
+};
+/* The version of the main data structure */
+#define NETXEN_BDINFO_VERSION 1
+
+/* Magic number to let user know flash is programmed */
+#define NETXEN_BDINFO_MAGIC 0x12345678
+
+/* Max number of Gig ports on a Phantom board */
+#define NETXEN_MAX_PORTS 4
+
+typedef enum {
+ NETXEN_BRDTYPE_P1_BD = 0x0000,
+ NETXEN_BRDTYPE_P1_SB = 0x0001,
+ NETXEN_BRDTYPE_P1_SMAX = 0x0002,
+ NETXEN_BRDTYPE_P1_SOCK = 0x0003,
+
+ NETXEN_BRDTYPE_P2_SOCK_31 = 0x0008,
+ NETXEN_BRDTYPE_P2_SOCK_35 = 0x0009,
+ NETXEN_BRDTYPE_P2_SB35_4G = 0x000a,
+ NETXEN_BRDTYPE_P2_SB31_10G = 0x000b,
+ NETXEN_BRDTYPE_P2_SB31_2G = 0x000c,
+
+ NETXEN_BRDTYPE_P2_SB31_10G_IMEZ = 0x000d,
+ NETXEN_BRDTYPE_P2_SB31_10G_HMEZ = 0x000e,
+ NETXEN_BRDTYPE_P2_SB31_10G_CX4 = 0x000f
+} netxen_brdtype_t;
+
+typedef enum {
+ NETXEN_BRDMFG_INVENTEC = 1
+} netxen_brdmfg;
+
+typedef enum {
+ MEM_ORG_128Mbx4 = 0x0, /* DDR1 only */
+ MEM_ORG_128Mbx8 = 0x1, /* DDR1 only */
+ MEM_ORG_128Mbx16 = 0x2, /* DDR1 only */
+ MEM_ORG_256Mbx4 = 0x3,
+ MEM_ORG_256Mbx8 = 0x4,
+ MEM_ORG_256Mbx16 = 0x5,
+ MEM_ORG_512Mbx4 = 0x6,
+ MEM_ORG_512Mbx8 = 0x7,
+ MEM_ORG_512Mbx16 = 0x8,
+ MEM_ORG_1Gbx4 = 0x9,
+ MEM_ORG_1Gbx8 = 0xa,
+ MEM_ORG_1Gbx16 = 0xb,
+ MEM_ORG_2Gbx4 = 0xc,
+ MEM_ORG_2Gbx8 = 0xd,
+ MEM_ORG_2Gbx16 = 0xe,
+ MEM_ORG_128Mbx32 = 0x10002, /* GDDR only */
+ MEM_ORG_256Mbx32 = 0x10005 /* GDDR only */
+} netxen_mn_mem_org_t;
+
+typedef enum {
+ MEM_ORG_512Kx36 = 0x0,
+ MEM_ORG_1Mx36 = 0x1,
+ MEM_ORG_2Mx36 = 0x2
+} netxen_sn_mem_org_t;
+
+typedef enum {
+ MEM_DEPTH_4MB = 0x1,
+ MEM_DEPTH_8MB = 0x2,
+ MEM_DEPTH_16MB = 0x3,
+ MEM_DEPTH_32MB = 0x4,
+ MEM_DEPTH_64MB = 0x5,
+ MEM_DEPTH_128MB = 0x6,
+ MEM_DEPTH_256MB = 0x7,
+ MEM_DEPTH_512MB = 0x8,
+ MEM_DEPTH_1GB = 0x9,
+ MEM_DEPTH_2GB = 0xa,
+ MEM_DEPTH_4GB = 0xb,
+ MEM_DEPTH_8GB = 0xc,
+ MEM_DEPTH_16GB = 0xd,
+ MEM_DEPTH_32GB = 0xe
+} netxen_mem_depth_t;
+
+struct netxen_board_info {
+ u32 header_version;
+
+ u32 board_mfg;
+ u32 board_type;
+ u32 board_num;
+ u32 chip_id;
+ u32 chip_minor;
+ u32 chip_major;
+ u32 chip_pkg;
+ u32 chip_lot;
+
+ u32 port_mask; /* available niu ports */
+ u32 peg_mask; /* available pegs */
+ u32 icache_ok; /* can we run with icache? */
+ u32 dcache_ok; /* can we run with dcache? */
+ u32 casper_ok;
+
+ u32 mac_addr_lo_0;
+ u32 mac_addr_lo_1;
+ u32 mac_addr_lo_2;
+ u32 mac_addr_lo_3;
+
+ /* MN-related config */
+ u32 mn_sync_mode; /* enable/ sync shift cclk/ sync shift mclk */
+ u32 mn_sync_shift_cclk;
+ u32 mn_sync_shift_mclk;
+ u32 mn_wb_en;
+ u32 mn_crystal_freq; /* in MHz */
+ u32 mn_speed; /* in MHz */
+ u32 mn_org;
+ u32 mn_depth;
+ u32 mn_ranks_0; /* ranks per slot */
+ u32 mn_ranks_1; /* ranks per slot */
+ u32 mn_rd_latency_0;
+ u32 mn_rd_latency_1;
+ u32 mn_rd_latency_2;
+ u32 mn_rd_latency_3;
+ u32 mn_rd_latency_4;
+ u32 mn_rd_latency_5;
+ u32 mn_rd_latency_6;
+ u32 mn_rd_latency_7;
+ u32 mn_rd_latency_8;
+ u32 mn_dll_val[18];
+ u32 mn_mode_reg; /* MIU DDR Mode Register */
+ u32 mn_ext_mode_reg; /* MIU DDR Extended Mode Register */
+ u32 mn_timing_0; /* MIU Memory Control Timing Rgister */
+ u32 mn_timing_1; /* MIU Extended Memory Ctrl Timing Register */
+ u32 mn_timing_2; /* MIU Extended Memory Ctrl Timing2 Register */
+
+ /* SN-related config */
+ u32 sn_sync_mode; /* enable/ sync shift cclk / sync shift mclk */
+ u32 sn_pt_mode; /* pass through mode */
+ u32 sn_ecc_en;
+ u32 sn_wb_en;
+ u32 sn_crystal_freq;
+ u32 sn_speed;
+ u32 sn_org;
+ u32 sn_depth;
+ u32 sn_dll_tap;
+ u32 sn_rd_latency;
+
+ u32 mac_addr_hi_0;
+ u32 mac_addr_hi_1;
+ u32 mac_addr_hi_2;
+ u32 mac_addr_hi_3;
+
+ u32 magic; /* indicates flash has been initialized */
+
+ u32 mn_rdimm;
+ u32 mn_dll_override;
+
+};
+
+#define FLASH_NUM_PORTS (4)
+
+struct netxen_flash_mac_addr {
+ u32 flash_addr[32];
+};
+
+struct netxen_user_old_info {
+ u8 flash_md5[16];
+ u8 crbinit_md5[16];
+ u8 brdcfg_md5[16];
+ /* bootloader */
+ u32 bootld_version;
+ u32 bootld_size;
+ u8 bootld_md5[16];
+ /* image */
+ u32 image_version;
+ u32 image_size;
+ u8 image_md5[16];
+ /* primary image status */
+ u32 primary_status;
+ u32 secondary_present;
+
+ /* MAC address , 4 ports */
+ struct netxen_flash_mac_addr mac_addr[FLASH_NUM_PORTS];
+};
+#define FLASH_NUM_MAC_PER_PORT 32
+struct netxen_user_info {
+ u8 flash_md5[16 * 64];
+ /* bootloader */
+ u32 bootld_version;
+ u32 bootld_size;
+ /* image */
+ u32 image_version;
+ u32 image_size;
+ /* primary image status */
+ u32 primary_status;
+ u32 secondary_present;
+
+ /* MAC address , 4 ports, 32 address per port */
+ u64 mac_addr[FLASH_NUM_PORTS * FLASH_NUM_MAC_PER_PORT];
+ u32 sub_sys_id;
+ u8 serial_num[32];
+
+ /* Any user defined data */
+};
+
+/*
+ * Flash Layout - new format.
+ */
+struct netxen_new_user_info {
+ u8 flash_md5[16 * 64];
+ /* bootloader */
+ u32 bootld_version;
+ u32 bootld_size;
+ /* image */
+ u32 image_version;
+ u32 image_size;
+ /* primary image status */
+ u32 primary_status;
+ u32 secondary_present;
+
+ /* MAC address , 4 ports, 32 address per port */
+ u64 mac_addr[FLASH_NUM_PORTS * FLASH_NUM_MAC_PER_PORT];
+ u32 sub_sys_id;
+ u8 serial_num[32];
+
+ /* Any user defined data */
+};
+
+#define SECONDARY_IMAGE_PRESENT 0xb3b4b5b6
+#define SECONDARY_IMAGE_ABSENT 0xffffffff
+#define PRIMARY_IMAGE_GOOD 0x5a5a5a5a
+#define PRIMARY_IMAGE_BAD 0xffffffff
+
+/* Flash memory map */
+typedef enum {
+ CRBINIT_START = 0, /* Crbinit section */
+ BRDCFG_START = 0x4000, /* board config */
+ INITCODE_START = 0x6000, /* pegtune code */
+ BOOTLD_START = 0x10000, /* bootld */
+ IMAGE_START = 0x43000, /* compressed image */
+ SECONDARY_START = 0x200000, /* backup images */
+ PXE_START = 0x3E0000, /* user defined region */
+ USER_START = 0x3E8000, /* User defined region for new boards */
+ FIXED_START = 0x3F0000 /* backup of crbinit */
+} netxen_flash_map_t;
+
+#define USER_START_OLD PXE_START /* for backward compatibility */
+
+#define FLASH_START (CRBINIT_START)
+#define INIT_SECTOR (0)
+#define PRIMARY_START (BOOTLD_START)
+#define FLASH_CRBINIT_SIZE (0x4000)
+#define FLASH_BRDCFG_SIZE (sizeof(struct netxen_board_info))
+#define FLASH_USER_SIZE (sizeof(netxen_user_info)/sizeof(u32))
+#define FLASH_SECONDARY_SIZE (USER_START-SECONDARY_START)
+#define NUM_PRIMARY_SECTORS (0x20)
+#define NUM_CONFIG_SECTORS (1)
+#define PFX "netxen: "
+
+/* Note: Make sure to not call this before adapter->port is valid */
+#if !defined(NETXEN_DEBUG)
+#define DPRINTK(klevel, fmt, args...) do { \
+ } while (0)
+#else
+#define DPRINTK(klevel, fmt, args...) do { \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, __FUNCTION__,\
+ (adapter != NULL && adapter->port != NULL && \
+ adapter->port[0] != NULL && \
+ adapter->port[0]->netdev != NULL) ? \
+ adapter->port[0]->netdev->name : NULL, \
+ ## args); } while(0)
+#endif
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE (128)
+
+/*
+ * netxen_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of netxen_tx_buffer{}.
+ */
+struct netxen_skb_frag {
+ u64 dma;
+ u32 length;
+};
+
+/* Following defines are for the state of the buffers */
+#define NETXEN_BUFFER_FREE 0
+#define NETXEN_BUFFER_BUSY 1
+
+/*
+ * There will be one netxen_buffer per skb packet. These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct netxen_cmd_buffer {
+ struct sk_buff *skb;
+ struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+ u32 total_length;
+ u32 mss;
+ u16 port;
+ u8 cmd;
+ u8 frag_count;
+ unsigned long time_stamp;
+ u32 state;
+ u32 no_of_descriptors;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct netxen_rx_buffer {
+ struct sk_buff *skb;
+ u64 dma;
+ u16 ref_handle;
+ u16 state;
+};
+
+/* Board types */
+#define NETXEN_NIC_GBE 0x01
+#define NETXEN_NIC_XGBE 0x02
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct netxen_hardware_context {
+ struct pci_dev *pdev;
+ void __iomem *pci_base0;
+ void __iomem *pci_base1;
+ void __iomem *pci_base2;
+
+ u8 revision_id;
+ u16 board_type;
+ u16 max_ports;
+ struct netxen_board_info boardcfg;
+ u32 xg_linkup;
+ u32 qg_linksup;
+ /* Address of cmd ring in Phantom */
+ struct cmd_desc_type0 *cmd_desc_head;
+ char *pauseaddr;
+ struct pci_dev *cmd_desc_pdev;
+ dma_addr_t cmd_desc_phys_addr;
+ dma_addr_t pause_physaddr;
+ struct pci_dev *pause_pdev;
+ struct netxen_adapter *adapter;
+};
+
+#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
+#define ETHERNET_FCS_SIZE 4
+
+struct netxen_adapter_stats {
+ u64 ints;
+ u64 hostints;
+ u64 otherints;
+ u64 process_rcv;
+ u64 process_xmit;
+ u64 noxmitdone;
+ u64 xmitcsummed;
+ u64 post_called;
+ u64 posted;
+ u64 lastposted;
+ u64 goodskbposts;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct netxen_rcv_desc_ctx {
+ u32 flags;
+ u32 producer;
+ u32 rcv_pending; /* Num of bufs posted in phantom */
+ u32 rcv_free; /* Num of bufs in free list */
+ dma_addr_t phys_addr;
+ struct pci_dev *phys_pdev;
+ struct rcv_desc *desc_head; /* address of rx ring in Phantom */
+ u32 max_rx_desc_count;
+ u32 dma_size;
+ u32 skb_size;
+ struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */
+ int begin_alloc;
+};
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct netxen_recv_context {
+ struct netxen_rcv_desc_ctx rcv_desc[NUM_RCV_DESC_RINGS];
+ u32 status_rx_producer;
+ u32 status_rx_consumer;
+ dma_addr_t rcv_status_desc_phys_addr;
+ struct pci_dev *rcv_status_desc_pdev;
+ struct status_desc *rcv_status_desc_head;
+};
+
+#define NETXEN_NIC_MSI_ENABLED 0x02
+
+struct netxen_drvops;
+
+struct netxen_adapter {
+ struct netxen_hardware_context ahw;
+ int port_count; /* Number of configured ports */
+ int active_ports; /* Number of open ports */
+ struct netxen_port *port[NETXEN_MAX_PORTS]; /* ptr to each port */
+ spinlock_t tx_lock;
+ spinlock_t lock;
+ struct work_struct watchdog_task;
+ struct work_struct tx_timeout_task;
+ struct timer_list watchdog_timer;
+
+ u32 curr_window;
+
+ u32 cmd_producer;
+ u32 cmd_consumer;
+
+ u32 last_cmd_consumer;
+ u32 max_tx_desc_count;
+ u32 max_rx_desc_count;
+ u32 max_jumbo_rx_desc_count;
+ /* Num of instances active on cmd buffer ring */
+ u32 proc_cmd_buf_counter;
+
+ u32 num_threads, total_threads; /*Use to keep track of xmit threads */
+
+ u32 flags;
+ u32 irq;
+ int driver_mismatch;
+ u32 temp;
+
+ struct netxen_adapter_stats stats;
+
+ struct netxen_cmd_buffer *cmd_buf_arr; /* Command buffers for xmit */
+
+ /*
+ * Receive instances. These can be either one per port,
+ * or one per peg, etc.
+ */
+ struct netxen_recv_context recv_ctx[MAX_RCV_CTX];
+
+ int is_up;
+ int work_done;
+ struct netxen_drvops *ops;
+}; /* netxen_adapter structure */
+
+/* Max number of xmit producer threads that can run simultaneously */
+#define MAX_XMIT_PRODUCERS 16
+
+struct netxen_port_stats {
+ u64 rcvdbadskb;
+ u64 xmitcalled;
+ u64 xmitedframes;
+ u64 xmitfinished;
+ u64 badskblen;
+ u64 nocmddescriptor;
+ u64 polled;
+ u64 uphappy;
+ u64 updropped;
+ u64 uplcong;
+ u64 uphcong;
+ u64 upmcong;
+ u64 updunno;
+ u64 skbfreed;
+ u64 txdropped;
+ u64 txnullskb;
+ u64 csummed;
+ u64 no_rcv;
+ u64 rxbytes;
+ u64 txbytes;
+};
+
+struct netxen_port {
+ struct netxen_adapter *adapter;
+
+ u16 portnum; /* GBE port number */
+ u16 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+
+ int flags;
+
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+ struct netxen_port_stats stats;
+};
+
+#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base0 + (off))
+#define PCI_OFFSET_SECOND_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START)
+#define PCI_OFFSET_THIRD_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START)
+
+static inline void __iomem *pci_base_offset(struct netxen_adapter *adapter,
+ unsigned long off)
+{
+ if ((off < FIRST_PAGE_GROUP_END) && (off >= FIRST_PAGE_GROUP_START)) {
+ return (adapter->ahw.pci_base0 + off);
+ } else if ((off < SECOND_PAGE_GROUP_END) &&
+ (off >= SECOND_PAGE_GROUP_START)) {
+ return (adapter->ahw.pci_base1 + off - SECOND_PAGE_GROUP_START);
+ } else if ((off < THIRD_PAGE_GROUP_END) &&
+ (off >= THIRD_PAGE_GROUP_START)) {
+ return (adapter->ahw.pci_base2 + off - THIRD_PAGE_GROUP_START);
+ }
+ return NULL;
+}
+
+static inline void __iomem *pci_base(struct netxen_adapter *adapter,
+ unsigned long off)
+{
+ if ((off < FIRST_PAGE_GROUP_END) && (off >= FIRST_PAGE_GROUP_START)) {
+ return adapter->ahw.pci_base0;
+ } else if ((off < SECOND_PAGE_GROUP_END) &&
+ (off >= SECOND_PAGE_GROUP_START)) {
+ return adapter->ahw.pci_base1;
+ } else if ((off < THIRD_PAGE_GROUP_END) &&
+ (off >= THIRD_PAGE_GROUP_START)) {
+ return adapter->ahw.pci_base2;
+ }
+ return NULL;
+}
+
+struct netxen_drvops {
+ int (*enable_phy_interrupts) (struct netxen_adapter *, int);
+ int (*disable_phy_interrupts) (struct netxen_adapter *, int);
+ void (*handle_phy_intr) (struct netxen_adapter *);
+ int (*macaddr_set) (struct netxen_port *, netxen_ethernet_macaddr_t);
+ int (*set_mtu) (struct netxen_port *, int);
+ int (*set_promisc) (struct netxen_adapter *, int,
+ netxen_niu_prom_mode_t);
+ int (*unset_promisc) (struct netxen_adapter *, int,
+ netxen_niu_prom_mode_t);
+ int (*phy_read) (struct netxen_adapter *, long phy, long reg, u32 *);
+ int (*phy_write) (struct netxen_adapter *, long phy, long reg, u32 val);
+ int (*init_port) (struct netxen_adapter *, int);
+ void (*init_niu) (struct netxen_adapter *);
+ int (*stop_port) (struct netxen_adapter *, int);
+};
+
+extern char netxen_nic_driver_name[];
+
+int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter,
+ int port);
+int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter,
+ int port);
+int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter,
+ int port);
+int netxen_niu_gbe_disable_phy_interrupts(struct netxen_adapter *adapter,
+ int port);
+int netxen_niu_xgbe_clear_phy_interrupts(struct netxen_adapter *adapter,
+ int port);
+int netxen_niu_gbe_clear_phy_interrupts(struct netxen_adapter *adapter,
+ int port);
+void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter);
+void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter);
+void netxen_niu_gbe_set_mii_mode(struct netxen_adapter *adapter, int port,
+ long enable);
+void netxen_niu_gbe_set_gmii_mode(struct netxen_adapter *adapter, int port,
+ long enable);
+int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy, long reg,
+ __le32 * readval);
+int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long phy,
+ long reg, __le32 val);
+
+/* Functions available from netxen_nic_hw.c */
+int netxen_nic_set_mtu_xgb(struct netxen_port *port, int new_mtu);
+int netxen_nic_set_mtu_gb(struct netxen_port *port, int new_mtu);
+void netxen_nic_init_niu_gb(struct netxen_adapter *adapter);
+void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw);
+void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val);
+int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off);
+void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value);
+void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 * value);
+
+int netxen_nic_get_board_info(struct netxen_adapter *adapter);
+int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data,
+ int len);
+int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data,
+ int len);
+void netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
+ unsigned long off, int data);
+
+/* Functions from netxen_nic_init.c */
+void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
+void netxen_load_firmware(struct netxen_adapter *adapter);
+int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
+int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data);
+int netxen_rom_se(struct netxen_adapter *adapter, int addr);
+int netxen_do_rom_se(struct netxen_adapter *adapter, int addr);
+
+/* Functions from netxen_nic_isr.c */
+void netxen_nic_isr_other(struct netxen_adapter *adapter);
+void netxen_indicate_link_status(struct netxen_adapter *adapter, u32 port,
+ u32 link);
+void netxen_handle_port_int(struct netxen_adapter *adapter, u32 port,
+ u32 enable);
+void netxen_nic_stop_all_ports(struct netxen_adapter *adapter);
+void netxen_initialize_adapter_sw(struct netxen_adapter *adapter);
+void netxen_initialize_adapter_hw(struct netxen_adapter *adapter);
+void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr,
+ struct pci_dev **used_dev);
+void netxen_initialize_adapter_ops(struct netxen_adapter *adapter);
+int netxen_init_firmware(struct netxen_adapter *adapter);
+void netxen_free_hw_resources(struct netxen_adapter *adapter);
+void netxen_tso_check(struct netxen_adapter *adapter,
+ struct cmd_desc_type0 *desc, struct sk_buff *skb);
+int netxen_nic_hw_resources(struct netxen_adapter *adapter);
+void netxen_nic_clear_stats(struct netxen_adapter *adapter);
+int
+netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
+ struct netxen_port *port);
+int netxen_nic_rx_has_work(struct netxen_adapter *adapter);
+int netxen_nic_tx_has_work(struct netxen_adapter *adapter);
+void netxen_watchdog_task(unsigned long v);
+void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
+ u32 ringid);
+void netxen_process_cmd_ring(unsigned long data);
+u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
+void netxen_nic_set_multi(struct net_device *netdev);
+int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
+int netxen_nic_set_mac(struct net_device *netdev, void *p);
+struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
+
+static inline void netxen_nic_disable_int(struct netxen_adapter *adapter)
+{
+ /*
+ * ISR_INT_MASK: Can be read from window 0 or 1.
+ */
+ writel(0x7ff, PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_MASK));
+
+}
+
+static inline void netxen_nic_enable_int(struct netxen_adapter *adapter)
+{
+ u32 mask;
+
+ switch (adapter->ahw.board_type) {
+ case NETXEN_NIC_GBE:
+ mask = 0x77b;
+ break;
+ case NETXEN_NIC_XGBE:
+ mask = 0x77f;
+ break;
+ default:
+ mask = 0x7ff;
+ break;
+ }
+
+ writel(mask, PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_MASK));
+
+ if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
+ mask = 0xbff;
+ writel(mask, PCI_OFFSET_SECOND_RANGE(adapter,
+ ISR_INT_TARGET_MASK));
+ }
+}
+
+/*
+ * NetXen Board information
+ */
+
+#define NETXEN_MAX_SHORT_NAME 16
+struct netxen_brdinfo {
+ netxen_brdtype_t brdtype; /* type of board */
+ long ports; /* max no of physical ports */
+ char short_name[NETXEN_MAX_SHORT_NAME];
+};
+
+static const struct netxen_brdinfo netxen_boards[] = {
+ {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"},
+ {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"},
+ {NETXEN_BRDTYPE_P2_SB31_10G_IMEZ, 2, "XGb IMEZ"},
+ {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"},
+ {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"},
+ {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"},
+};
+
+#define NUM_SUPPORTED_BOARDS (sizeof(netxen_boards)/sizeof(struct netxen_brdinfo))
+
+static inline void get_brd_port_by_type(u32 type, int *ports)
+{
+ int i, found = 0;
+ for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+ if (netxen_boards[i].brdtype == type) {
+ *ports = netxen_boards[i].ports;
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ *ports = 0;
+}
+
+static inline void get_brd_name_by_type(u32 type, char *name)
+{
+ int i, found = 0;
+ for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+ if (netxen_boards[i].brdtype == type) {
+ strcpy(name, netxen_boards[i].short_name);
+ found = 1;
+ break;
+ }
+
+ }
+ if (!found)
+ name = "Unknown";
+}
+
+int netxen_is_flash_supported(struct netxen_adapter *adapter);
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 mac[]);
+
+extern void netxen_change_ringparam(struct netxen_adapter *adapter);
+extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
+ int *valp);
+
+extern struct ethtool_ops netxen_nic_ethtool_ops;
+
+#endif /* __NETXEN_NIC_H_ */
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
new file mode 100644
index 000000000000..9a914aeba5bc
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -0,0 +1,741 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ * info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ *
+ * ethtool support for netxen nic
+ *
+ */
+
+#include <linux/types.h>
+#include <asm/uaccess.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/version.h>
+
+#include "netxen_nic_hw.h"
+#include "netxen_nic.h"
+#include "netxen_nic_phan_reg.h"
+#include "netxen_nic_ioctl.h"
+
+struct netxen_nic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define NETXEN_NIC_STAT(m) sizeof(((struct netxen_port *)0)->m), \
+ offsetof(struct netxen_port, m)
+
+#define NETXEN_NIC_PORT_WINDOW 0x10000
+#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF
+
+static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = {
+ {"rcvd_bad_skb", NETXEN_NIC_STAT(stats.rcvdbadskb)},
+ {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)},
+ {"xmited_frames", NETXEN_NIC_STAT(stats.xmitedframes)},
+ {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)},
+ {"bad_skb_len", NETXEN_NIC_STAT(stats.badskblen)},
+ {"no_cmd_desc", NETXEN_NIC_STAT(stats.nocmddescriptor)},
+ {"polled", NETXEN_NIC_STAT(stats.polled)},
+ {"uphappy", NETXEN_NIC_STAT(stats.uphappy)},
+ {"updropped", NETXEN_NIC_STAT(stats.updropped)},
+ {"uplcong", NETXEN_NIC_STAT(stats.uplcong)},
+ {"uphcong", NETXEN_NIC_STAT(stats.uphcong)},
+ {"upmcong", NETXEN_NIC_STAT(stats.upmcong)},
+ {"updunno", NETXEN_NIC_STAT(stats.updunno)},
+ {"skb_freed", NETXEN_NIC_STAT(stats.skbfreed)},
+ {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)},
+ {"tx_null_skb", NETXEN_NIC_STAT(stats.txnullskb)},
+ {"csummed", NETXEN_NIC_STAT(stats.csummed)},
+ {"no_rcv", NETXEN_NIC_STAT(stats.no_rcv)},
+ {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)},
+ {"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)},
+};
+
+#define NETXEN_NIC_STATS_LEN \
+ sizeof(netxen_nic_gstrings_stats) / sizeof(struct netxen_nic_stats)
+
+static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register_Test_offline", "EEPROM_Test_offline",
+ "Interrupt_Test_offline", "Loopback_Test_offline",
+ "Link_Test_on_offline"
+};
+
+#define NETXEN_NIC_TEST_LEN sizeof(netxen_nic_gstrings_test) / ETH_GSTRING_LEN
+
+#define NETXEN_NIC_REGS_COUNT 42
+#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32))
+#define NETXEN_MAX_EEPROM_LEN 1024
+
+static int netxen_nic_get_eeprom_len(struct net_device *dev)
+{
+ struct netxen_port *port = netdev_priv(dev);
+ struct netxen_adapter *adapter = port->adapter;
+ int n;
+
+ if ((netxen_rom_fast_read(adapter, 0, &n) == 0)
+ && (n & NETXEN_ROM_ROUNDUP)) {
+ n &= ~NETXEN_ROM_ROUNDUP;
+ if (n < NETXEN_MAX_EEPROM_LEN)
+ return n;
+ }
+ return 0;
+}
+
+static void
+netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct netxen_port *port = netdev_priv(dev);
+ struct netxen_adapter *adapter = port->adapter;
+ u32 fw_major = 0;
+ u32 fw_minor = 0;
+ u32 fw_build = 0;
+
+ strncpy(drvinfo->driver, "netxen_nic", 32);
+ strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
+ fw_major = readl(NETXEN_CRB_NORMALIZE(adapter,
+ NETXEN_FW_VERSION_MAJOR));
+ fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter,
+ NETXEN_FW_VERSION_MINOR));
+ fw_build = readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB));
+ sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strncpy(drvinfo->bus_info, pci_name(port->pdev), 32);
+ drvinfo->n_stats = NETXEN_NIC_STATS_LEN;
+ drvinfo->testinfo_len = NETXEN_NIC_TEST_LEN;
+ drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
+ drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
+}
+
+static int
+netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netxen_port *port = netdev_priv(dev);
+ struct netxen_adapter *adapter = port->adapter;
+ struct netxen_board_info *boardinfo = &adapter->ahw.boardcfg;
+
+ /* read which mode */
+ if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ ecmd->advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ ecmd->port = PORT_TP;
+
+ if (netif_running(dev)) {
+ ecmd->speed = port->link_speed;
+ ecmd->duplex = port->link_duplex;
+ } else
+ return -EIO; /* link absent */
+ } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
+ ecmd->supported = (SUPPORTED_TP |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full);
+ ecmd->advertising = (ADVERTISED_TP |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_10000baseT_Full);
+ ecmd->port = PORT_TP;
+
+ ecmd->speed = SPEED_10000;
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ } else
+ return -EIO;
+
+ ecmd->phy_address = port->portnum;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ switch ((netxen_brdtype_t) boardinfo->board_type) {
+ case NETXEN_BRDTYPE_P2_SB35_4G:
+ case NETXEN_BRDTYPE_P2_SB31_2G:
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->autoneg = (boardinfo->board_type ==
+ NETXEN_BRDTYPE_P2_SB31_10G_CX4) ?
+ (AUTONEG_DISABLE) : (port->link_autoneg);
+ break;
+ case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ ecmd->supported |= SUPPORTED_MII;
+ ecmd->advertising |= ADVERTISED_MII;
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ default:
+ printk(KERN_ERR "netxen-nic: Unsupported board model %d\n",
+ (netxen_brdtype_t) boardinfo->board_type);
+ return -EIO;
+
+ }
+
+ return 0;
+}
+
+static int
+netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netxen_port *port = netdev_priv(dev);
+ struct netxen_adapter *adapter = port->adapter;
+ __le32 status;
+
+ /* read which mode */
+ if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
+ /* autonegotiation */
+ if (adapter->ops->phy_write
+ && adapter->ops->phy_write(adapter, port->portnum,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+ (__le32) ecmd->autoneg) != 0)
+ return -EIO;
+ else
+ port->link_autoneg = ecmd->autoneg;
+
+ if (adapter->ops->phy_read
+ && adapter->ops->phy_read(adapter, port->portnum,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ &status) != 0)
+ return -EIO;
+
+ /* speed */
+ switch (ecmd->speed) {
+ case SPEED_10:
+ netxen_set_phy_speed(status, 0);
+ break;
+ case SPEED_100:
+ netxen_set_phy_speed(status, 1);
+ break;
+ case SPEED_1000:
+ netxen_set_phy_speed(status, 2);
+ break;
+ }
+ /* set duplex mode */
+ if (ecmd->duplex == DUPLEX_HALF)
+ netxen_clear_phy_duplex(status);
+ if (ecmd->duplex == DUPLEX_FULL)
+ netxen_set_phy_duplex(status);
+ if (adapter->ops->phy_write
+ && adapter->ops->phy_write(adapter, port->portnum,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ *((int *)&status)) != 0)
+ return -EIO;
+ else {
+ port->link_speed = ecmd->speed;
+ port->link_duplex = ecmd->duplex;
+ }
+ } else
+ return -EOPNOTSUPP;
+
+ if (netif_running(dev)) {
+ dev->stop(dev);
+ dev->open(dev);
+ }
+ return 0;
+}
+
+static int netxen_nic_get_regs_len(struct net_device *dev)
+{
+ return NETXEN_NIC_REGS_LEN;
+}
+
+struct netxen_niu_regs {
+ __le32 reg[NETXEN_NIC_REGS_COUNT];
+};
+
+static struct netxen_niu_regs niu_registers[] = {
+ {
+ /* GB Mode */
+ {
+ NETXEN_NIU_GB_SERDES_RESET,
+ NETXEN_NIU_GB0_MII_MODE,
+ NETXEN_NIU_GB1_MII_MODE,
+ NETXEN_NIU_GB2_MII_MODE,
+ NETXEN_NIU_GB3_MII_MODE,
+ NETXEN_NIU_GB0_GMII_MODE,
+ NETXEN_NIU_GB1_GMII_MODE,
+ NETXEN_NIU_GB2_GMII_MODE,
+ NETXEN_NIU_GB3_GMII_MODE,
+ NETXEN_NIU_REMOTE_LOOPBACK,
+ NETXEN_NIU_GB0_HALF_DUPLEX,
+ NETXEN_NIU_GB1_HALF_DUPLEX,
+ NETXEN_NIU_RESET_SYS_FIFOS,
+ NETXEN_NIU_GB_CRC_DROP,
+ NETXEN_NIU_GB_DROP_WRONGADDR,
+ NETXEN_NIU_TEST_MUX_CTL,
+
+ NETXEN_NIU_GB_MAC_CONFIG_0(0),
+ NETXEN_NIU_GB_MAC_CONFIG_1(0),
+ NETXEN_NIU_GB_HALF_DUPLEX_CTRL(0),
+ NETXEN_NIU_GB_MAX_FRAME_SIZE(0),
+ NETXEN_NIU_GB_TEST_REG(0),
+ NETXEN_NIU_GB_MII_MGMT_CONFIG(0),
+ NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
+ NETXEN_NIU_GB_MII_MGMT_ADDR(0),
+ NETXEN_NIU_GB_MII_MGMT_CTRL(0),
+ NETXEN_NIU_GB_MII_MGMT_STATUS(0),
+ NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
+ NETXEN_NIU_GB_INTERFACE_CTRL(0),
+ NETXEN_NIU_GB_INTERFACE_STATUS(0),
+ NETXEN_NIU_GB_STATION_ADDR_0(0),
+ NETXEN_NIU_GB_STATION_ADDR_1(0),
+ -1,
+ }
+ },
+ {
+ /* XG Mode */
+ {
+ NETXEN_NIU_XG_SINGLE_TERM,
+ NETXEN_NIU_XG_DRIVE_HI,
+ NETXEN_NIU_XG_DRIVE_LO,
+ NETXEN_NIU_XG_DTX,
+ NETXEN_NIU_XG_DEQ,
+ NETXEN_NIU_XG_WORD_ALIGN,
+ NETXEN_NIU_XG_RESET,
+ NETXEN_NIU_XG_POWER_DOWN,
+ NETXEN_NIU_XG_RESET_PLL,
+ NETXEN_NIU_XG_SERDES_LOOPBACK,
+ NETXEN_NIU_XG_DO_BYTE_ALIGN,
+ NETXEN_NIU_XG_TX_ENABLE,
+ NETXEN_NIU_XG_RX_ENABLE,
+ NETXEN_NIU_XG_STATUS,
+ NETXEN_NIU_XG_PAUSE_THRESHOLD,
+ NETXEN_NIU_XGE_CONFIG_0,
+ NETXEN_NIU_XGE_CONFIG_1,
+ NETXEN_NIU_XGE_IPG,
+ NETXEN_NIU_XGE_STATION_ADDR_0_HI,
+ NETXEN_NIU_XGE_STATION_ADDR_0_1,
+ NETXEN_NIU_XGE_STATION_ADDR_1_LO,
+ NETXEN_NIU_XGE_STATUS,
+ NETXEN_NIU_XGE_MAX_FRAME_SIZE,
+ NETXEN_NIU_XGE_PAUSE_FRAME_VALUE,
+ NETXEN_NIU_XGE_TX_BYTE_CNT,
+ NETXEN_NIU_XGE_TX_FRAME_CNT,
+ NETXEN_NIU_XGE_RX_BYTE_CNT,
+ NETXEN_NIU_XGE_RX_FRAME_CNT,
+ NETXEN_NIU_XGE_AGGR_ERROR_CNT,
+ NETXEN_NIU_XGE_MULTICAST_FRAME_CNT,
+ NETXEN_NIU_XGE_UNICAST_FRAME_CNT,
+ NETXEN_NIU_XGE_CRC_ERROR_CNT,
+ NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR,
+ NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR,
+ NETXEN_NIU_XGE_LOCAL_ERROR_CNT,
+ NETXEN_NIU_XGE_REMOTE_ERROR_CNT,
+ NETXEN_NIU_XGE_CONTROL_CHAR_CNT,
+ NETXEN_NIU_XGE_PAUSE_FRAME_CNT,
+ -1,
+ }
+ }
+};
+
+static void
+netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ struct netxen_port *port = netdev_priv(dev);
+ struct netxen_adapter *adapter = port->adapter;
+ __le32 mode, *regs_buff = p;
+ void __iomem *addr;
+ int i, window;
+
+ memset(p, 0, NETXEN_NIC_REGS_LEN);
+ regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
+ (port->pdev)->device;
+ /* which mode */
+ NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_MODE, &regs_buff[0]);
+ mode = regs_buff[0];
+
+ /* Common registers to all the modes */
+ NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER,
+ &regs_buff[2]);
+ /* GB/XGB Mode */
+ mode = (mode / 2) - 1;
+ window = 0;
+ if (mode <= 1) {
+ for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) {
+ /* GB: port specific registers */
+ if (mode == 0 && i >= 19)
+ window = port->portnum * NETXEN_NIC_PORT_WINDOW;
+
+ NETXEN_NIC_LOCKED_READ_REG(niu_registers[mode].
+ reg[i - 3] + window,
+ &regs_buff[i]);
+ }
+
+ }
+}
+
+static void
+netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
+ /* options can be added depending upon the mode */
+ wol->wolopts = 0;
+}
+
+static u32 netxen_nic_get_link(struct net_device *dev)
+{
+ struct netxen_port *port = netdev_priv(dev);
+ struct netxen_adapter *adapter = port->adapter;
+ __le32 status;
+
+ /* read which mode */
+ if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
+ if (adapter->ops->phy_read
+ && adapter->ops->phy_read(adapter, port->portnum,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ &status) != 0)
+ return -EIO;
+ else
+ return (netxen_get_phy_link(status));
+ } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
+ int val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
+ return val == XG_LINK_UP;
+ }
+ return -EIO;
+}
+
+static int
+netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 * bytes)
+{
+ struct netxen_port *port = netdev_priv(dev);
+ struct netxen_adapter *adapter = port->adapter;
+ int offset;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = (port->pdev)->vendor | ((port->pdev)->device << 16);
+ for (offset = 0; offset < eeprom->len; offset++)
+ if (netxen_rom_fast_read
+ (adapter, (8 * offset) + 8, (int *)eeprom->data) == -1)
+ return -EIO;
+ return 0;
+}
+
+static void
+netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
+{
+ struct netxen_port *port = netdev_priv(dev);
+ struct netxen_adapter *adapter = port->adapter;
+ int i, j;
+
+ ring->rx_pending = 0;
+ for (i = 0; i < MAX_RCV_CTX; ++i) {
+ for (j = 0; j < NUM_RCV_DESC_RINGS; j++)
+ ring->rx_pending +=
+ adapter->recv_ctx[i].rcv_desc[j].rcv_pending;
+ }
+
+ ring->rx_max_pending = adapter->max_rx_desc_count;
+ ring->tx_max_pending = adapter->max_tx_desc_count;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static void
+netxen_nic_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct netxen_port *port = netdev_priv(dev);
+ struct netxen_adapter *adapter = port->adapter;
+ __le32 val;
+
+ if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
+ /* get flow control settings */
+ netxen_nic_read_w0(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum),
+ (u32 *) & val);
+ pause->rx_pause = netxen_gb_get_rx_flowctl(val);
+ pause->tx_pause = netxen_gb_get_tx_flowctl(val);
+ /* get autoneg settings */
+ pause->autoneg = port->link_autoneg;
+ }
+}
+
+static int
+netxen_nic_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct netxen_port *port = netdev_priv(dev);
+ struct netxen_adapter *adapter = port->adapter;
+ __le32 val;
+ unsigned int autoneg;
+
+ /* read mode */
+ if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
+ /* set flow control */
+ netxen_nic_read_w0(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum),
+ (u32 *) & val);
+ if (pause->tx_pause)
+ netxen_gb_tx_flowctl(val);
+ else
+ netxen_gb_unset_tx_flowctl(val);
+ if (pause->rx_pause)
+ netxen_gb_rx_flowctl(val);
+ else
+ netxen_gb_unset_rx_flowctl(val);
+
+ netxen_nic_write_w0(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum),
+ *(u32 *) (&val));
+ /* set autoneg */
+ autoneg = pause->autoneg;
+ if (adapter->ops->phy_write
+ && adapter->ops->phy_write(adapter, port->portnum,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+ (__le32) autoneg) != 0)
+ return -EIO;
+ else {
+ port->link_autoneg = pause->autoneg;
+ return 0;
+ }
+ } else
+ return -EOPNOTSUPP;
+}
+
+static int netxen_nic_reg_test(struct net_device *dev)
+{
+ struct netxen_port *port = netdev_priv(dev);
+ struct netxen_adapter *adapter = port->adapter;
+ u32 data_read, data_written, save;
+ __le32 mode;
+
+ /*
+ * first test the "Read Only" registers by writing which mode
+ */
+ netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
+ if (netxen_get_niu_enable_ge(mode)) { /* GB Mode */
+ netxen_nic_read_w0(adapter,
+ NETXEN_NIU_GB_MII_MGMT_STATUS(port->portnum),
+ &data_read);
+
+ save = data_read;
+ if (data_read)
+ data_written = data_read & NETXEN_NIC_INVALID_DATA;
+ else
+ data_written = NETXEN_NIC_INVALID_DATA;
+ netxen_nic_write_w0(adapter,
+ NETXEN_NIU_GB_MII_MGMT_STATUS(port->
+ portnum),
+ data_written);
+ netxen_nic_read_w0(adapter,
+ NETXEN_NIU_GB_MII_MGMT_STATUS(port->portnum),
+ &data_read);
+
+ if (data_written == data_read) {
+ netxen_nic_write_w0(adapter,
+ NETXEN_NIU_GB_MII_MGMT_STATUS(port->
+ portnum),
+ save);
+
+ return 0;
+ }
+
+ /* netxen_niu_gb_mii_mgmt_indicators is read only */
+ netxen_nic_read_w0(adapter,
+ NETXEN_NIU_GB_MII_MGMT_INDICATE(port->
+ portnum),
+ &data_read);
+
+ save = data_read;
+ if (data_read)
+ data_written = data_read & NETXEN_NIC_INVALID_DATA;
+ else
+ data_written = NETXEN_NIC_INVALID_DATA;
+ netxen_nic_write_w0(adapter,
+ NETXEN_NIU_GB_MII_MGMT_INDICATE(port->
+ portnum),
+ data_written);
+
+ netxen_nic_read_w0(adapter,
+ NETXEN_NIU_GB_MII_MGMT_INDICATE(port->
+ portnum),
+ &data_read);
+
+ if (data_written == data_read) {
+ netxen_nic_write_w0(adapter,
+ NETXEN_NIU_GB_MII_MGMT_INDICATE
+ (port->portnum), save);
+ return 0;
+ }
+
+ /* netxen_niu_gb_interface_status is read only */
+ netxen_nic_read_w0(adapter,
+ NETXEN_NIU_GB_INTERFACE_STATUS(port->
+ portnum),
+ &data_read);
+
+ save = data_read;
+ if (data_read)
+ data_written = data_read & NETXEN_NIC_INVALID_DATA;
+ else
+ data_written = NETXEN_NIC_INVALID_DATA;
+ netxen_nic_write_w0(adapter,
+ NETXEN_NIU_GB_INTERFACE_STATUS(port->
+ portnum),
+ data_written);
+
+ netxen_nic_read_w0(adapter,
+ NETXEN_NIU_GB_INTERFACE_STATUS(port->
+ portnum),
+ &data_read);
+
+ if (data_written == data_read) {
+ netxen_nic_write_w0(adapter,
+ NETXEN_NIU_GB_INTERFACE_STATUS
+ (port->portnum), save);
+
+ return 0;
+ }
+ } /* GB Mode */
+ return 1;
+}
+
+static int netxen_nic_diag_test_count(struct net_device *dev)
+{
+ return NETXEN_NIC_TEST_LEN;
+}
+
+static void
+netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+ u64 * data)
+{
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* offline tests */
+ /* link test */
+ if (!(data[4] = (u64) netxen_nic_get_link(dev)))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (netif_running(dev))
+ dev->stop(dev);
+
+ /* register tests */
+ if (!(data[0] = netxen_nic_reg_test(dev)))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ /* other tests pass as of now */
+ data[1] = data[2] = data[3] = 1;
+ if (netif_running(dev))
+ dev->open(dev);
+ } else { /* online tests */
+ /* link test */
+ if (!(data[4] = (u64) netxen_nic_get_link(dev)))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* other tests pass by default */
+ data[0] = data[1] = data[2] = data[3] = 1;
+ }
+}
+
+static void
+netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int index;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *netxen_nic_gstrings_test,
+ NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ netxen_nic_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static int netxen_nic_get_stats_count(struct net_device *dev)
+{
+ return NETXEN_NIC_STATS_LEN;
+}
+
+static void
+netxen_nic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ struct netxen_port *port = netdev_priv(dev);
+ int index;
+
+ for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
+ char *p =
+ (char *)port + netxen_nic_gstrings_stats[index].stat_offset;
+ data[index] =
+ (netxen_nic_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *) p : *(u32 *) p;
+ }
+
+}
+
+struct ethtool_ops netxen_nic_ethtool_ops = {
+ .get_settings = netxen_nic_get_settings,
+ .set_settings = netxen_nic_set_settings,
+ .get_drvinfo = netxen_nic_get_drvinfo,
+ .get_regs_len = netxen_nic_get_regs_len,
+ .get_regs = netxen_nic_get_regs,
+ .get_wol = netxen_nic_get_wol,
+ .get_link = netxen_nic_get_link,
+ .get_eeprom_len = netxen_nic_get_eeprom_len,
+ .get_eeprom = netxen_nic_get_eeprom,
+ .get_ringparam = netxen_nic_get_ringparam,
+ .get_pauseparam = netxen_nic_get_pauseparam,
+ .set_pauseparam = netxen_nic_set_pauseparam,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+ .self_test_count = netxen_nic_diag_test_count,
+ .self_test = netxen_nic_diag_test,
+ .get_strings = netxen_nic_get_strings,
+ .get_stats_count = netxen_nic_get_stats_count,
+ .get_ethtool_stats = netxen_nic_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
+};
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
new file mode 100644
index 000000000000..72c6ec4ee2a0
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -0,0 +1,678 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ * info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ */
+
+#ifndef __NETXEN_NIC_HDR_H_
+#define __NETXEN_NIC_HDR_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#include <asm/semaphore.h>
+#include <linux/spinlock.h>
+#include <asm/irq.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <asm/uaccess.h>
+#include <asm/string.h> /* for memset */
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+typedef __le32 netxen_crbword_t; /* single word in CRB space */
+
+enum {
+ NETXEN_HW_H0_CH_HUB_ADR = 0x05,
+ NETXEN_HW_H1_CH_HUB_ADR = 0x0E,
+ NETXEN_HW_H2_CH_HUB_ADR = 0x03,
+ NETXEN_HW_H3_CH_HUB_ADR = 0x01,
+ NETXEN_HW_H4_CH_HUB_ADR = 0x06,
+ NETXEN_HW_H5_CH_HUB_ADR = 0x07,
+ NETXEN_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/* Hub 0 */
+enum {
+ NETXEN_HW_MN_CRB_AGT_ADR = 0x15,
+ NETXEN_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/* Hub 1 */
+enum {
+ NETXEN_HW_PS_CRB_AGT_ADR = 0x73,
+ NETXEN_HW_SS_CRB_AGT_ADR = 0x20,
+ NETXEN_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+ NETXEN_HW_QMS_CRB_AGT_ADR = 0x00,
+ NETXEN_HW_SQGS0_CRB_AGT_ADR = 0x01,
+ NETXEN_HW_SQGS1_CRB_AGT_ADR = 0x02,
+ NETXEN_HW_SQGS2_CRB_AGT_ADR = 0x03,
+ NETXEN_HW_SQGS3_CRB_AGT_ADR = 0x04,
+ NETXEN_HW_C2C0_CRB_AGT_ADR = 0x58,
+ NETXEN_HW_C2C1_CRB_AGT_ADR = 0x59,
+ NETXEN_HW_C2C2_CRB_AGT_ADR = 0x5a,
+ NETXEN_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+ NETXEN_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+ NETXEN_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+ NETXEN_HW_RPMX9_CRB_AGT_ADR = 0x12,
+ NETXEN_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/* Hub 2 */
+enum {
+ NETXEN_HW_NIU_CRB_AGT_ADR = 0x31,
+ NETXEN_HW_I2C0_CRB_AGT_ADR = 0x19,
+ NETXEN_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+ NETXEN_HW_SN_CRB_AGT_ADR = 0x10,
+ NETXEN_HW_I2Q_CRB_AGT_ADR = 0x20,
+ NETXEN_HW_LPC_CRB_AGT_ADR = 0x22,
+ NETXEN_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+ NETXEN_HW_QM_CRB_AGT_ADR = 0x66,
+ NETXEN_HW_SQG0_CRB_AGT_ADR = 0x60,
+ NETXEN_HW_SQG1_CRB_AGT_ADR = 0x61,
+ NETXEN_HW_SQG2_CRB_AGT_ADR = 0x62,
+ NETXEN_HW_SQG3_CRB_AGT_ADR = 0x63,
+ NETXEN_HW_RPMX1_CRB_AGT_ADR = 0x09,
+ NETXEN_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+ NETXEN_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+ NETXEN_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/* Hub 3 */
+enum {
+ NETXEN_HW_PH_CRB_AGT_ADR = 0x1A,
+ NETXEN_HW_SRE_CRB_AGT_ADR = 0x50,
+ NETXEN_HW_EG_CRB_AGT_ADR = 0x51,
+ NETXEN_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/* Hub 4 */
+enum {
+ NETXEN_HW_PEGN0_CRB_AGT_ADR = 0x40,
+ NETXEN_HW_PEGN1_CRB_AGT_ADR,
+ NETXEN_HW_PEGN2_CRB_AGT_ADR,
+ NETXEN_HW_PEGN3_CRB_AGT_ADR,
+ NETXEN_HW_PEGNI_CRB_AGT_ADR,
+ NETXEN_HW_PEGND_CRB_AGT_ADR,
+ NETXEN_HW_PEGNC_CRB_AGT_ADR,
+ NETXEN_HW_PEGR0_CRB_AGT_ADR,
+ NETXEN_HW_PEGR1_CRB_AGT_ADR,
+ NETXEN_HW_PEGR2_CRB_AGT_ADR,
+ NETXEN_HW_PEGR3_CRB_AGT_ADR
+};
+
+/* Hub 5 */
+enum {
+ NETXEN_HW_PEGS0_CRB_AGT_ADR = 0x40,
+ NETXEN_HW_PEGS1_CRB_AGT_ADR,
+ NETXEN_HW_PEGS2_CRB_AGT_ADR,
+ NETXEN_HW_PEGS3_CRB_AGT_ADR,
+ NETXEN_HW_PEGSI_CRB_AGT_ADR,
+ NETXEN_HW_PEGSD_CRB_AGT_ADR,
+ NETXEN_HW_PEGSC_CRB_AGT_ADR
+};
+
+/* Hub 6 */
+enum {
+ NETXEN_HW_CAS0_CRB_AGT_ADR = 0x46,
+ NETXEN_HW_CAS1_CRB_AGT_ADR = 0x47,
+ NETXEN_HW_CAS2_CRB_AGT_ADR = 0x48,
+ NETXEN_HW_CAS3_CRB_AGT_ADR = 0x49,
+ NETXEN_HW_NCM_CRB_AGT_ADR = 0x16,
+ NETXEN_HW_TMR_CRB_AGT_ADR = 0x17,
+ NETXEN_HW_XDMA_CRB_AGT_ADR = 0x05,
+ NETXEN_HW_OCM0_CRB_AGT_ADR = 0x06,
+ NETXEN_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/* Floaters - non existent modules */
+#define NETXEN_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+ NETXEN_HW_PX_MAP_CRB_PH = 0,
+ NETXEN_HW_PX_MAP_CRB_PS,
+ NETXEN_HW_PX_MAP_CRB_MN,
+ NETXEN_HW_PX_MAP_CRB_MS,
+ NETXEN_HW_PX_MAP_CRB_PGR1,
+ NETXEN_HW_PX_MAP_CRB_SRE,
+ NETXEN_HW_PX_MAP_CRB_NIU,
+ NETXEN_HW_PX_MAP_CRB_QMN,
+ NETXEN_HW_PX_MAP_CRB_SQN0,
+ NETXEN_HW_PX_MAP_CRB_SQN1,
+ NETXEN_HW_PX_MAP_CRB_SQN2,
+ NETXEN_HW_PX_MAP_CRB_SQN3,
+ NETXEN_HW_PX_MAP_CRB_QMS,
+ NETXEN_HW_PX_MAP_CRB_SQS0,
+ NETXEN_HW_PX_MAP_CRB_SQS1,
+ NETXEN_HW_PX_MAP_CRB_SQS2,
+ NETXEN_HW_PX_MAP_CRB_SQS3,
+ NETXEN_HW_PX_MAP_CRB_PGN0,
+ NETXEN_HW_PX_MAP_CRB_PGN1,
+ NETXEN_HW_PX_MAP_CRB_PGN2,
+ NETXEN_HW_PX_MAP_CRB_PGN3,
+ NETXEN_HW_PX_MAP_CRB_PGND,
+ NETXEN_HW_PX_MAP_CRB_PGNI,
+ NETXEN_HW_PX_MAP_CRB_PGS0,
+ NETXEN_HW_PX_MAP_CRB_PGS1,
+ NETXEN_HW_PX_MAP_CRB_PGS2,
+ NETXEN_HW_PX_MAP_CRB_PGS3,
+ NETXEN_HW_PX_MAP_CRB_PGSD,
+ NETXEN_HW_PX_MAP_CRB_PGSI,
+ NETXEN_HW_PX_MAP_CRB_SN,
+ NETXEN_HW_PX_MAP_CRB_PGR2,
+ NETXEN_HW_PX_MAP_CRB_EG,
+ NETXEN_HW_PX_MAP_CRB_PH2,
+ NETXEN_HW_PX_MAP_CRB_PS2,
+ NETXEN_HW_PX_MAP_CRB_CAM,
+ NETXEN_HW_PX_MAP_CRB_CAS0,
+ NETXEN_HW_PX_MAP_CRB_CAS1,
+ NETXEN_HW_PX_MAP_CRB_CAS2,
+ NETXEN_HW_PX_MAP_CRB_C2C0,
+ NETXEN_HW_PX_MAP_CRB_C2C1,
+ NETXEN_HW_PX_MAP_CRB_TIMR,
+ NETXEN_HW_PX_MAP_CRB_PGR3,
+ NETXEN_HW_PX_MAP_CRB_RPMX1,
+ NETXEN_HW_PX_MAP_CRB_RPMX2,
+ NETXEN_HW_PX_MAP_CRB_RPMX3,
+ NETXEN_HW_PX_MAP_CRB_RPMX4,
+ NETXEN_HW_PX_MAP_CRB_RPMX5,
+ NETXEN_HW_PX_MAP_CRB_RPMX6,
+ NETXEN_HW_PX_MAP_CRB_RPMX7,
+ NETXEN_HW_PX_MAP_CRB_XDMA,
+ NETXEN_HW_PX_MAP_CRB_I2Q,
+ NETXEN_HW_PX_MAP_CRB_ROMUSB,
+ NETXEN_HW_PX_MAP_CRB_CAS3,
+ NETXEN_HW_PX_MAP_CRB_RPMX0,
+ NETXEN_HW_PX_MAP_CRB_RPMX8,
+ NETXEN_HW_PX_MAP_CRB_RPMX9,
+ NETXEN_HW_PX_MAP_CRB_OCM0,
+ NETXEN_HW_PX_MAP_CRB_OCM1,
+ NETXEN_HW_PX_MAP_CRB_SMB,
+ NETXEN_HW_PX_MAP_CRB_I2C0,
+ NETXEN_HW_PX_MAP_CRB_I2C1,
+ NETXEN_HW_PX_MAP_CRB_LPC,
+ NETXEN_HW_PX_MAP_CRB_PGNC,
+ NETXEN_HW_PX_MAP_CRB_PGR0
+};
+
+/* This field defines CRB adr [31:20] of the agents */
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_MN \
+ ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MN_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PH \
+ ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_PH_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_MS \
+ ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MS_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PS \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_PS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SS \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_QMS \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_QMS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS0 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS1 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS2 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS3 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C0 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C1 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX4_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX7_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX9_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SMB \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SMB_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_NIU \
+ ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_NIU_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C0 \
+ ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C1 \
+ ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C1_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SRE \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SRE_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_EG \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_EG_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_QMN \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_QM_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN0 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN1 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN2 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN3 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX5_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX6_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX8_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS0 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS1 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS2 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS3 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS3_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNI \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNI_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGND \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGND_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN0 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN1 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN2 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR1 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR2 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR3 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR3_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSI \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSI_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSD \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSD_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS0 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS1 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS2 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS3 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSC \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSC_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAM \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_NCM_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_TIMR \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_TMR_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_XDMA \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_XDMA_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SN \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_SN_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2Q \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_I2Q_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_ROMUSB_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM0 \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM1 \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR)
+
+/*
+ * MAX_RCV_CTX : The number of receive contexts that are available on
+ * the phantom.
+ */
+#define MAX_RCV_CTX 1
+
+#define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034)
+#define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014)
+#define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000)
+#define NETXEN_SRE_L2RE_CTL (NETXEN_CRB_SRE + 0x05000)
+#define NETXEN_SRE_BUF_CTL (NETXEN_CRB_SRE + 0x01000)
+
+#define NETXEN_DMA_BASE(U) (NETXEN_CRB_PCIX_MD + 0x20000 + ((U)<<16))
+#define NETXEN_DMA_COMMAND(U) (NETXEN_DMA_BASE(U) + 0x00008)
+
+#define NETXEN_I2Q_CLR_PCI_HI (NETXEN_CRB_I2Q + 0x00034)
+
+#define PEG_NETWORK_BASE(N) (NETXEN_CRB_PEG_NET_0 + (((N)&3) << 20))
+#define CRB_REG_EX_PC 0x3c
+
+#define ROMUSB_GLB (NETXEN_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM (NETXEN_CRB_ROMUSB + 0x10000)
+
+#define NETXEN_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define NETXEN_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define NETXEN_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
+#define NETXEN_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+#define NETXEN_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define NETXEN_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
+
+#define NETXEN_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define NETXEN_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define NETXEN_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define NETXEN_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define NETXEN_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define NETXEN_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+* Instructions
+*/
+#define M25P_INSTR_WREN 0x06
+#define M25P_INSTR_WRDI 0x04
+#define M25P_INSTR_RDID 0x9f
+#define M25P_INSTR_RDSR 0x05
+#define M25P_INSTR_WRSR 0x01
+#define M25P_INSTR_READ 0x03
+#define M25P_INSTR_FAST_READ 0x0b
+#define M25P_INSTR_PP 0x02
+#define M25P_INSTR_SE 0xd8
+#define M25P_INSTR_BE 0xc7
+#define M25P_INSTR_DP 0xb9
+#define M25P_INSTR_RES 0xab
+
+/* all are 1MB windows */
+
+#define NETXEN_PCI_CRB_WINDOWSIZE 0x00100000
+#define NETXEN_PCI_CRB_WINDOW(A) \
+ (NETXEN_PCI_CRBSPACE + (A)*NETXEN_PCI_CRB_WINDOWSIZE)
+
+#define NETXEN_CRB_NIU NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_NIU)
+#define NETXEN_CRB_SRE NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SRE)
+#define NETXEN_CRB_ROMUSB \
+ NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
+#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
+#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64)
+
+#define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH)
+#define NETXEN_CRB_PCIX_HOST2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH2)
+#define NETXEN_CRB_PEG_NET_0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN0)
+#define NETXEN_CRB_PEG_NET_1 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN1)
+#define NETXEN_CRB_PEG_NET_2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN2)
+#define NETXEN_CRB_PEG_NET_3 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN3)
+#define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND)
+#define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI)
+#define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN)
+
+#define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS)
+#define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR (NETXEN_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK (NETXEN_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW (NETXEN_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK))
+
+#define NETXEN_PCI_MAPSIZE 128
+#define NETXEN_PCI_DDR_NET (0x00000000UL)
+#define NETXEN_PCI_QDR_NET (0x04000000UL)
+#define NETXEN_PCI_DIRECT_CRB (0x04400000UL)
+#define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL)
+#define NETXEN_PCI_OCM0 (0x05000000UL)
+#define NETXEN_PCI_OCM0_MAX (0x050fffffUL)
+#define NETXEN_PCI_OCM1 (0x05100000UL)
+#define NETXEN_PCI_OCM1_MAX (0x051fffffUL)
+#define NETXEN_PCI_CRBSPACE (0x06000000UL)
+
+#define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM)
+
+#define NETXEN_ADDR_DDR_NET (0x0000000000000000ULL)
+#define NETXEN_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define NETXEN_ADDR_OCM0 (0x0000000200000000ULL)
+#define NETXEN_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define NETXEN_ADDR_OCM1 (0x0000000200400000ULL)
+#define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL)
+#define NETXEN_ADDR_QDR_NET_MAX (0x00000003003fffffULL)
+
+ /* 200ms delay in each loop */
+#define NETXEN_NIU_PHY_WAITLEN 200000
+ /* 10 seconds before we give up */
+#define NETXEN_NIU_PHY_WAITMAX 50
+#define NETXEN_NIU_MAX_GBE_PORTS 4
+
+#define NETXEN_NIU_MODE (NETXEN_CRB_NIU + 0x00000)
+
+#define NETXEN_NIU_XG_SINGLE_TERM (NETXEN_CRB_NIU + 0x00004)
+#define NETXEN_NIU_XG_DRIVE_HI (NETXEN_CRB_NIU + 0x00008)
+#define NETXEN_NIU_XG_DRIVE_LO (NETXEN_CRB_NIU + 0x0000c)
+#define NETXEN_NIU_XG_DTX (NETXEN_CRB_NIU + 0x00010)
+#define NETXEN_NIU_XG_DEQ (NETXEN_CRB_NIU + 0x00014)
+#define NETXEN_NIU_XG_WORD_ALIGN (NETXEN_CRB_NIU + 0x00018)
+#define NETXEN_NIU_XG_RESET (NETXEN_CRB_NIU + 0x0001c)
+#define NETXEN_NIU_XG_POWER_DOWN (NETXEN_CRB_NIU + 0x00020)
+#define NETXEN_NIU_XG_RESET_PLL (NETXEN_CRB_NIU + 0x00024)
+#define NETXEN_NIU_XG_SERDES_LOOPBACK (NETXEN_CRB_NIU + 0x00028)
+#define NETXEN_NIU_XG_DO_BYTE_ALIGN (NETXEN_CRB_NIU + 0x0002c)
+#define NETXEN_NIU_XG_TX_ENABLE (NETXEN_CRB_NIU + 0x00030)
+#define NETXEN_NIU_XG_RX_ENABLE (NETXEN_CRB_NIU + 0x00034)
+#define NETXEN_NIU_XG_STATUS (NETXEN_CRB_NIU + 0x00038)
+#define NETXEN_NIU_XG_PAUSE_THRESHOLD (NETXEN_CRB_NIU + 0x0003c)
+#define NETXEN_NIU_INT_MASK (NETXEN_CRB_NIU + 0x00040)
+#define NETXEN_NIU_ACTIVE_INT (NETXEN_CRB_NIU + 0x00044)
+#define NETXEN_NIU_MASKABLE_INT (NETXEN_CRB_NIU + 0x00048)
+
+#define NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER (NETXEN_CRB_NIU + 0x0004c)
+
+#define NETXEN_NIU_GB_SERDES_RESET (NETXEN_CRB_NIU + 0x00050)
+#define NETXEN_NIU_GB0_GMII_MODE (NETXEN_CRB_NIU + 0x00054)
+#define NETXEN_NIU_GB0_MII_MODE (NETXEN_CRB_NIU + 0x00058)
+#define NETXEN_NIU_GB1_GMII_MODE (NETXEN_CRB_NIU + 0x0005c)
+#define NETXEN_NIU_GB1_MII_MODE (NETXEN_CRB_NIU + 0x00060)
+#define NETXEN_NIU_GB2_GMII_MODE (NETXEN_CRB_NIU + 0x00064)
+#define NETXEN_NIU_GB2_MII_MODE (NETXEN_CRB_NIU + 0x00068)
+#define NETXEN_NIU_GB3_GMII_MODE (NETXEN_CRB_NIU + 0x0006c)
+#define NETXEN_NIU_GB3_MII_MODE (NETXEN_CRB_NIU + 0x00070)
+#define NETXEN_NIU_REMOTE_LOOPBACK (NETXEN_CRB_NIU + 0x00074)
+#define NETXEN_NIU_GB0_HALF_DUPLEX (NETXEN_CRB_NIU + 0x00078)
+#define NETXEN_NIU_GB1_HALF_DUPLEX (NETXEN_CRB_NIU + 0x0007c)
+#define NETXEN_NIU_RESET_SYS_FIFOS (NETXEN_CRB_NIU + 0x00088)
+#define NETXEN_NIU_GB_CRC_DROP (NETXEN_CRB_NIU + 0x0008c)
+#define NETXEN_NIU_GB_DROP_WRONGADDR (NETXEN_CRB_NIU + 0x00090)
+#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094)
+#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098)
+#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc)
+#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128)
+
+#define NETXEN_NIU_FULL_LEVEL_XG (NETXEN_CRB_NIU + 0x00450)
+
+#define NETXEN_NIU_XG1_RESET (NETXEN_CRB_NIU + 0x0011c)
+#define NETXEN_NIU_XG1_POWER_DOWN (NETXEN_CRB_NIU + 0x00120)
+#define NETXEN_NIU_XG1_RESET_PLL (NETXEN_CRB_NIU + 0x00124)
+
+#define NETXEN_MAC_ADDR_CNTL_REG (NETXEN_CRB_NIU + 0x1000)
+
+#define NETXEN_MULTICAST_ADDR_HI_0 (NETXEN_CRB_NIU + 0x1010)
+#define NETXEN_MULTICAST_ADDR_HI_1 (NETXEN_CRB_NIU + 0x1014)
+#define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018)
+#define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c)
+
+#define NETXEN_NIU_GB_MAC_CONFIG_0(I) \
+ (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000)
+#define NETXEN_NIU_GB_MAC_CONFIG_1(I) \
+ (NETXEN_CRB_NIU + 0x30004 + (I)*0x10000)
+#define NETXEN_NIU_GB_MAC_IPG_IFG(I) \
+ (NETXEN_CRB_NIU + 0x30008 + (I)*0x10000)
+#define NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I) \
+ (NETXEN_CRB_NIU + 0x3000c + (I)*0x10000)
+#define NETXEN_NIU_GB_MAX_FRAME_SIZE(I) \
+ (NETXEN_CRB_NIU + 0x30010 + (I)*0x10000)
+#define NETXEN_NIU_GB_TEST_REG(I) \
+ (NETXEN_CRB_NIU + 0x3001c + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_CONFIG(I) \
+ (NETXEN_CRB_NIU + 0x30020 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_COMMAND(I) \
+ (NETXEN_CRB_NIU + 0x30024 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_ADDR(I) \
+ (NETXEN_CRB_NIU + 0x30028 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_CTRL(I) \
+ (NETXEN_CRB_NIU + 0x3002c + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_STATUS(I) \
+ (NETXEN_CRB_NIU + 0x30030 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_INDICATE(I) \
+ (NETXEN_CRB_NIU + 0x30034 + (I)*0x10000)
+#define NETXEN_NIU_GB_INTERFACE_CTRL(I) \
+ (NETXEN_CRB_NIU + 0x30038 + (I)*0x10000)
+#define NETXEN_NIU_GB_INTERFACE_STATUS(I) \
+ (NETXEN_CRB_NIU + 0x3003c + (I)*0x10000)
+#define NETXEN_NIU_GB_STATION_ADDR_0(I) \
+ (NETXEN_CRB_NIU + 0x30040 + (I)*0x10000)
+#define NETXEN_NIU_GB_STATION_ADDR_1(I) \
+ (NETXEN_CRB_NIU + 0x30044 + (I)*0x10000)
+
+#define NETXEN_NIU_XGE_CONFIG_0 (NETXEN_CRB_NIU + 0x70000)
+#define NETXEN_NIU_XGE_CONFIG_1 (NETXEN_CRB_NIU + 0x70004)
+#define NETXEN_NIU_XGE_IPG (NETXEN_CRB_NIU + 0x70008)
+#define NETXEN_NIU_XGE_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x7000c)
+#define NETXEN_NIU_XGE_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x70010)
+#define NETXEN_NIU_XGE_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x70014)
+#define NETXEN_NIU_XGE_STATUS (NETXEN_CRB_NIU + 0x70018)
+#define NETXEN_NIU_XGE_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x7001c)
+#define NETXEN_NIU_XGE_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x70020)
+#define NETXEN_NIU_XGE_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x70024)
+#define NETXEN_NIU_XGE_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x70028)
+#define NETXEN_NIU_XGE_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x7002c)
+#define NETXEN_NIU_XGE_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x70030)
+#define NETXEN_NIU_XGE_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x70034)
+#define NETXEN_NIU_XGE_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x70038)
+#define NETXEN_NIU_XGE_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x7003c)
+#define NETXEN_NIU_XGE_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x70040)
+#define NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70044)
+#define NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70048)
+#define NETXEN_NIU_XGE_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x7004c)
+#define NETXEN_NIU_XGE_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x70050)
+#define NETXEN_NIU_XGE_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x70054)
+#define NETXEN_NIU_XGE_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x70058)
+#define NETXEN_NIU_XG1_CONFIG_0 (NETXEN_CRB_NIU + 0x80000)
+#define NETXEN_NIU_XG1_CONFIG_1 (NETXEN_CRB_NIU + 0x80004)
+#define NETXEN_NIU_XG1_IPG (NETXEN_CRB_NIU + 0x80008)
+#define NETXEN_NIU_XG1_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x8000c)
+#define NETXEN_NIU_XG1_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x80010)
+#define NETXEN_NIU_XG1_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x80014)
+#define NETXEN_NIU_XG1_STATUS (NETXEN_CRB_NIU + 0x80018)
+#define NETXEN_NIU_XG1_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x8001c)
+#define NETXEN_NIU_XG1_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x80020)
+#define NETXEN_NIU_XG1_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x80024)
+#define NETXEN_NIU_XG1_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x80028)
+#define NETXEN_NIU_XG1_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x8002c)
+#define NETXEN_NIU_XG1_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x80030)
+#define NETXEN_NIU_XG1_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x80034)
+#define NETXEN_NIU_XG1_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x80038)
+#define NETXEN_NIU_XG1_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x8003c)
+#define NETXEN_NIU_XG1_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x80040)
+#define NETXEN_NIU_XG1_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80044)
+#define NETXEN_NIU_XG1_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80048)
+#define NETXEN_NIU_XG1_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x8004c)
+#define NETXEN_NIU_XG1_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x80050)
+#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054)
+#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058)
+
+/* XG Link status */
+#define XG_LINK_UP 0x10
+#define XG_LINK_DOWN 0x20
+
+#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000)
+#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg))
+#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150))
+#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154))
+#define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158))
+#define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100))
+
+#define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120))
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER 0x44524956
+
+/* Used for PS PCI Memory access */
+#define PCIX_PS_OP_ADDR_LO (0x10000)
+/* via CRB (PS side only) */
+#define PCIX_PS_OP_ADDR_HI (0x10004)
+
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+#define PCIX_MN_WINDOW (0x10200)
+#define PCIX_MS_WINDOW (0x10204)
+#define PCIX_SN_WINDOW (0x10208)
+#define PCIX_CRB_WINDOW (0x10210)
+
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_MASK (0x10128)
+
+#define PCIX_MSI_F0 (0x13000)
+
+#define PCIX_PS_MEM_SPACE (0x90000)
+
+#define NETXEN_PCIX_PH_REG(reg) (NETXEN_CRB_PCIE + (reg))
+#define NETXEN_PCIX_PS_REG(reg) (NETXEN_CRB_PCIX_MD + (reg))
+
+#define NETXEN_PCIE_REG(reg) (NETXEN_CRB_PCIE + (reg))
+
+#define PCIE_MAX_DMA_XFER_SIZE (0x1404c)
+
+#define PCIE_DCR 0x00d8
+
+#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */
+#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
+#define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */
+#define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */
+
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+
+#define PCIE_MAX_MASTER_SPLIT (0x14048)
+
+#endif /* __NETXEN_NIC_HDR_H_ */
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
new file mode 100644
index 000000000000..105c24f0ad4c
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -0,0 +1,1010 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ * info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ *
+ * Source file for NIC routines to access the Phantom hardware
+ *
+ */
+
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+#include "netxen_nic_phan_reg.h"
+
+/* PCI Windowing for DDR regions. */
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) <= (high)) && ((addr) >= (low)))
+
+#define NETXEN_FLASH_BASE (BOOTLD_START)
+#define NETXEN_PHANTOM_MEM_BASE (NETXEN_FLASH_BASE)
+#define NETXEN_MAX_MTU 8000
+#define NETXEN_MIN_MTU 64
+#define NETXEN_ETH_FCS_SIZE 4
+#define NETXEN_ENET_HEADER_SIZE 14
+#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
+#define NETXEN_FIRMWARE_LEN ((16 * 1024) / 4)
+#define NETXEN_NIU_HDRSIZE (0x1 << 6)
+#define NETXEN_NIU_TLRSIZE (0x1 << 5)
+
+#define lower32(x) ((u32)((x) & 0xffffffff))
+#define upper32(x) \
+ ((u32)(((unsigned long long)(x) >> 32) & 0xffffffff))
+
+#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL
+#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL
+#define NETXEN_NIC_EPG_PAUSE_ADDR1 0x2200010000c28001ULL
+#define NETXEN_NIC_EPG_PAUSE_ADDR2 0x0100088866554433ULL
+
+#define NETXEN_NIC_WINDOW_MARGIN 0x100000
+
+unsigned long netxen_nic_pci_set_window(struct netxen_adapter *adapter,
+ unsigned long long addr);
+void netxen_free_hw_resources(struct netxen_adapter *adapter);
+
+int netxen_nic_set_mac(struct net_device *netdev, void *p)
+{
+ struct netxen_port *port = netdev_priv(netdev);
+ struct netxen_adapter *adapter = port->adapter;
+ struct sockaddr *addr = p;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ DPRINTK(INFO, "valid ether addr\n");
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ if (adapter->ops->macaddr_set)
+ adapter->ops->macaddr_set(port, addr->sa_data);
+
+ return 0;
+}
+
+/*
+ * netxen_nic_set_multi - Multicast
+ */
+void netxen_nic_set_multi(struct net_device *netdev)
+{
+ struct netxen_port *port = netdev_priv(netdev);
+ struct netxen_adapter *adapter = port->adapter;
+ struct dev_mc_list *mc_ptr;
+ __le32 netxen_mac_addr_cntl_data = 0;
+
+ mc_ptr = netdev->mc_list;
+ if (netdev->flags & IFF_PROMISC) {
+ if (adapter->ops->set_promisc)
+ adapter->ops->set_promisc(adapter,
+ port->portnum,
+ NETXEN_NIU_PROMISC_MODE);
+ } else {
+ if (adapter->ops->unset_promisc &&
+ adapter->ahw.boardcfg.board_type
+ != NETXEN_BRDTYPE_P2_SB31_10G_IMEZ)
+ adapter->ops->unset_promisc(adapter,
+ port->portnum,
+ NETXEN_NIU_NON_PROMISC_MODE);
+ }
+ if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
+ netxen_nic_mcr_set_mode_select(netxen_mac_addr_cntl_data, 0x03);
+ netxen_nic_mcr_set_id_pool0(netxen_mac_addr_cntl_data, 0x00);
+ netxen_nic_mcr_set_id_pool1(netxen_mac_addr_cntl_data, 0x00);
+ netxen_nic_mcr_set_id_pool2(netxen_mac_addr_cntl_data, 0x00);
+ netxen_nic_mcr_set_id_pool3(netxen_mac_addr_cntl_data, 0x00);
+ netxen_nic_mcr_set_enable_xtnd0(netxen_mac_addr_cntl_data);
+ netxen_nic_mcr_set_enable_xtnd1(netxen_mac_addr_cntl_data);
+ netxen_nic_mcr_set_enable_xtnd2(netxen_mac_addr_cntl_data);
+ netxen_nic_mcr_set_enable_xtnd3(netxen_mac_addr_cntl_data);
+ } else {
+ netxen_nic_mcr_set_mode_select(netxen_mac_addr_cntl_data, 0x00);
+ netxen_nic_mcr_set_id_pool0(netxen_mac_addr_cntl_data, 0x00);
+ netxen_nic_mcr_set_id_pool1(netxen_mac_addr_cntl_data, 0x01);
+ netxen_nic_mcr_set_id_pool2(netxen_mac_addr_cntl_data, 0x02);
+ netxen_nic_mcr_set_id_pool3(netxen_mac_addr_cntl_data, 0x03);
+ }
+ writel(netxen_mac_addr_cntl_data,
+ NETXEN_CRB_NORMALIZE(adapter, NETXEN_MAC_ADDR_CNTL_REG));
+ if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
+ writel(netxen_mac_addr_cntl_data,
+ NETXEN_CRB_NORMALIZE(adapter,
+ NETXEN_MULTICAST_ADDR_HI_0));
+ } else {
+ writel(netxen_mac_addr_cntl_data,
+ NETXEN_CRB_NORMALIZE(adapter,
+ NETXEN_MULTICAST_ADDR_HI_1));
+ }
+ netxen_mac_addr_cntl_data = 0;
+ writel(netxen_mac_addr_cntl_data,
+ NETXEN_CRB_NORMALIZE(adapter, NETXEN_NIU_GB_DROP_WRONGADDR));
+}
+
+/*
+ * netxen_nic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct netxen_port *port = netdev_priv(netdev);
+ struct netxen_adapter *adapter = port->adapter;
+ int eff_mtu = mtu + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE;
+
+ if ((eff_mtu > NETXEN_MAX_MTU) || (eff_mtu < NETXEN_MIN_MTU)) {
+ printk(KERN_ERR "%s: %s %d is not supported.\n",
+ netxen_nic_driver_name, netdev->name, mtu);
+ return -EINVAL;
+ }
+
+ if (adapter->ops->set_mtu)
+ adapter->ops->set_mtu(port, mtu);
+ netdev->mtu = mtu;
+
+ return 0;
+}
+
+/*
+ * check if the firmware has been downloaded and ready to run and
+ * setup the address for the descriptors in the adapter
+ */
+int netxen_nic_hw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_hardware_context *hw = &adapter->ahw;
+ u32 state = 0;
+ void *addr;
+ void *pause_addr;
+ int loops = 0, err = 0;
+ int ctx, ring;
+ u32 card_cmdring = 0;
+ struct netxen_rcv_desc_crb *rcv_desc_crb = NULL;
+ struct netxen_recv_context *recv_ctx;
+ struct netxen_rcv_desc_ctx *rcv_desc;
+
+ DPRINTK(INFO, "crb_base: %lx %lx", NETXEN_PCI_CRBSPACE,
+ PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCI_CRBSPACE));
+ DPRINTK(INFO, "cam base: %lx %lx", NETXEN_CRB_CAM,
+ pci_base_offset(adapter, NETXEN_CRB_CAM));
+ DPRINTK(INFO, "cam RAM: %lx %lx", NETXEN_CAM_RAM_BASE,
+ pci_base_offset(adapter, NETXEN_CAM_RAM_BASE));
+ DPRINTK(INFO, "NIC base:%lx %lx\n", NIC_CRB_BASE_PORT1,
+ pci_base_offset(adapter, NIC_CRB_BASE_PORT1));
+
+ /* Window 1 call */
+ card_cmdring = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_CMDRING));
+
+ DPRINTK(INFO, "Command Peg sends 0x%x for cmdring base\n",
+ card_cmdring);
+
+ for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+ DPRINTK(INFO, "Command Peg ready..waiting for rcv peg\n");
+ loops = 0;
+ state = 0;
+ /* Window 1 call */
+ state = readl(NETXEN_CRB_NORMALIZE(adapter,
+ recv_crb_registers[ctx].
+ crb_rcvpeg_state));
+ while (state != PHAN_PEG_RCV_INITIALIZED && loops < 20) {
+ udelay(100);
+ /* Window 1 call */
+ state = readl(NETXEN_CRB_NORMALIZE(adapter,
+ recv_crb_registers
+ [ctx].
+ crb_rcvpeg_state));
+ loops++;
+ }
+ if (loops >= 20) {
+ printk(KERN_ERR "Rcv Peg initialization not complete:"
+ "%x.\n", state);
+ err = -EIO;
+ return err;
+ }
+ }
+ DPRINTK(INFO, "Recieve Peg ready too. starting stuff\n");
+
+ addr = netxen_alloc(adapter->ahw.pdev,
+ sizeof(struct cmd_desc_type0) *
+ adapter->max_tx_desc_count,
+ &hw->cmd_desc_phys_addr, &hw->cmd_desc_pdev);
+
+ if (addr == NULL) {
+ DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
+ return -ENOMEM;
+ }
+
+ pause_addr = netxen_alloc(adapter->ahw.pdev, 512,
+ (dma_addr_t *) & hw->pause_physaddr,
+ &hw->pause_pdev);
+ if (pause_addr == NULL) {
+ DPRINTK(1, ERR, "bad return from pci_alloc_consistent\n");
+ return -ENOMEM;
+ }
+
+ hw->pauseaddr = (char *)pause_addr;
+ {
+ u64 *ptr = (u64 *) pause_addr;
+ *ptr++ = NETXEN_NIC_ZERO_PAUSE_ADDR;
+ *ptr++ = NETXEN_NIC_ZERO_PAUSE_ADDR;
+ *ptr++ = NETXEN_NIC_UNIT_PAUSE_ADDR;
+ *ptr++ = NETXEN_NIC_ZERO_PAUSE_ADDR;
+ *ptr++ = NETXEN_NIC_EPG_PAUSE_ADDR1;
+ *ptr++ = NETXEN_NIC_EPG_PAUSE_ADDR2;
+ }
+
+ hw->cmd_desc_head = (struct cmd_desc_type0 *)addr;
+
+ for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+ recv_ctx = &adapter->recv_ctx[ctx];
+
+ for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+ rcv_desc = &recv_ctx->rcv_desc[ring];
+ addr = netxen_alloc(adapter->ahw.pdev,
+ RCV_DESC_RINGSIZE,
+ &rcv_desc->phys_addr,
+ &rcv_desc->phys_pdev);
+ if (addr == NULL) {
+ DPRINTK(ERR, "bad return from "
+ "pci_alloc_consistent\n");
+ netxen_free_hw_resources(adapter);
+ err = -ENOMEM;
+ return err;
+ }
+ rcv_desc->desc_head = (struct rcv_desc *)addr;
+ }
+
+ addr = netxen_alloc(adapter->ahw.pdev, STATUS_DESC_RINGSIZE,
+ &recv_ctx->rcv_status_desc_phys_addr,
+ &recv_ctx->rcv_status_desc_pdev);
+ if (addr == NULL) {
+ DPRINTK(ERR, "bad return from"
+ " pci_alloc_consistent\n");
+ netxen_free_hw_resources(adapter);
+ err = -ENOMEM;
+ return err;
+ }
+ recv_ctx->rcv_status_desc_head = (struct status_desc *)addr;
+ for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+ rcv_desc = &recv_ctx->rcv_desc[ring];
+ rcv_desc_crb =
+ &recv_crb_registers[ctx].rcv_desc_crb[ring];
+ DPRINTK(INFO, "ring #%d crb global ring reg 0x%x\n",
+ ring, rcv_desc_crb->crb_globalrcv_ring);
+ /* Window = 1 */
+ writel(lower32(rcv_desc->phys_addr),
+ NETXEN_CRB_NORMALIZE(adapter,
+ rcv_desc_crb->
+ crb_globalrcv_ring));
+ DPRINTK(INFO, "GLOBAL_RCV_RING ctx %d, addr 0x%x"
+ " val 0x%llx,"
+ " virt %p\n", ctx,
+ rcv_desc_crb->crb_globalrcv_ring,
+ (unsigned long long)rcv_desc->phys_addr,
+ +rcv_desc->desc_head);
+ }
+
+ /* Window = 1 */
+ writel(lower32(recv_ctx->rcv_status_desc_phys_addr),
+ NETXEN_CRB_NORMALIZE(adapter,
+ recv_crb_registers[ctx].
+ crb_rcvstatus_ring));
+ DPRINTK(INFO, "RCVSTATUS_RING, ctx %d, addr 0x%x,"
+ " val 0x%x,virt%p\n",
+ ctx,
+ recv_crb_registers[ctx].crb_rcvstatus_ring,
+ (unsigned long long)recv_ctx->rcv_status_desc_phys_addr,
+ recv_ctx->rcv_status_desc_head);
+ }
+ /* Window = 1 */
+ writel(lower32(hw->pause_physaddr),
+ NETXEN_CRB_NORMALIZE(adapter, CRB_PAUSE_ADDR_LO));
+ writel(upper32(hw->pause_physaddr),
+ NETXEN_CRB_NORMALIZE(adapter, CRB_PAUSE_ADDR_HI));
+
+ writel(lower32(hw->cmd_desc_phys_addr),
+ NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO));
+ writel(upper32(hw->cmd_desc_phys_addr),
+ NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_HI));
+ return err;
+}
+
+void netxen_free_hw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct netxen_rcv_desc_ctx *rcv_desc;
+ int ctx, ring;
+
+ if (adapter->ahw.cmd_desc_head != NULL) {
+ pci_free_consistent(adapter->ahw.cmd_desc_pdev,
+ sizeof(struct cmd_desc_type0) *
+ adapter->max_tx_desc_count,
+ adapter->ahw.cmd_desc_head,
+ adapter->ahw.cmd_desc_phys_addr);
+ adapter->ahw.cmd_desc_head = NULL;
+ }
+ if (adapter->ahw.pauseaddr != NULL) {
+ pci_free_consistent(adapter->ahw.pause_pdev, 512,
+ adapter->ahw.pauseaddr,
+ adapter->ahw.pause_physaddr);
+ adapter->ahw.pauseaddr = NULL;
+ }
+
+ for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+ recv_ctx = &adapter->recv_ctx[ctx];
+ for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+ rcv_desc = &recv_ctx->rcv_desc[ring];
+
+ if (rcv_desc->desc_head != NULL) {
+ pci_free_consistent(rcv_desc->phys_pdev,
+ RCV_DESC_RINGSIZE,
+ rcv_desc->desc_head,
+ rcv_desc->phys_addr);
+ rcv_desc->desc_head = NULL;
+ }
+ }
+
+ if (recv_ctx->rcv_status_desc_head != NULL) {
+ pci_free_consistent(recv_ctx->rcv_status_desc_pdev,
+ STATUS_DESC_RINGSIZE,
+ recv_ctx->rcv_status_desc_head,
+ recv_ctx->
+ rcv_status_desc_phys_addr);
+ recv_ctx->rcv_status_desc_head = NULL;
+ }
+ }
+}
+
+void netxen_tso_check(struct netxen_adapter *adapter,
+ struct cmd_desc_type0 *desc, struct sk_buff *skb)
+{
+ if (desc->mss) {
+ desc->total_hdr_length = sizeof(struct ethhdr) +
+ ((skb->nh.iph)->ihl * sizeof(u32)) +
+ ((skb->h.th)->doff * sizeof(u32));
+ desc->opcode = TX_TCP_LSO;
+ } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ if (skb->nh.iph->protocol == IPPROTO_TCP) {
+ desc->opcode = TX_TCP_PKT;
+ } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
+ desc->opcode = TX_UDP_PKT;
+ } else {
+ return;
+ }
+ }
+ adapter->stats.xmitcsummed++;
+ CMD_DESC_TCP_HDR_OFFSET_WRT(desc, skb->h.raw - skb->data);
+ desc->length_tcp_hdr = cpu_to_le32(desc->length_tcp_hdr);
+ desc->ip_hdr_offset = skb->nh.raw - skb->data;
+}
+
+int netxen_is_flash_supported(struct netxen_adapter *adapter)
+{
+ const int locs[] = { 0, 0x4, 0x100, 0x4000, 0x4128 };
+ int addr, val01, val02, i, j;
+
+ /* if the flash size less than 4Mb, make huge war cry and die */
+ for (j = 1; j < 4; j++) {
+ addr = j * NETXEN_NIC_WINDOW_MARGIN;
+ for (i = 0; i < (sizeof(locs) / sizeof(locs[0])); i++) {
+ if (netxen_rom_fast_read(adapter, locs[i], &val01) == 0
+ && netxen_rom_fast_read(adapter, (addr + locs[i]),
+ &val02) == 0) {
+ if (val01 == val02)
+ return -1;
+ } else
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
+ int size, u32 * buf)
+{
+ int i, addr;
+ u32 *ptr32;
+
+ addr = base;
+ ptr32 = buf;
+ for (i = 0; i < size / sizeof(u32); i++) {
+ if (netxen_rom_fast_read(adapter, addr, ptr32) == -1)
+ return -1;
+ ptr32++;
+ addr += sizeof(u32);
+ }
+ if ((char *)buf + size > (char *)ptr32) {
+ u32 local;
+
+ if (netxen_rom_fast_read(adapter, addr, &local) == -1)
+ return -1;
+ memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
+ }
+
+ return 0;
+}
+
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 mac[])
+{
+ u32 *pmac = (u32 *) & mac[0];
+
+ if (netxen_get_flash_block(adapter,
+ USER_START +
+ offsetof(struct netxen_new_user_info,
+ mac_addr),
+ FLASH_NUM_PORTS * sizeof(u64), pmac) == -1) {
+ return -1;
+ }
+ if (*mac == ~0ULL) {
+ if (netxen_get_flash_block(adapter,
+ USER_START_OLD +
+ offsetof(struct netxen_user_old_info,
+ mac_addr),
+ FLASH_NUM_PORTS * sizeof(u64),
+ pmac) == -1)
+ return -1;
+ if (*mac == ~0ULL)
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw)
+{
+ void __iomem *offset;
+ u32 tmp;
+ int count = 0;
+
+ if (adapter->curr_window == wndw)
+ return;
+
+ /*
+ * Move the CRB window.
+ * We need to write to the "direct access" region of PCI
+ * to avoid a race condition where the window register has
+ * not been successfully written across CRB before the target
+ * register address is received by PCI. The direct region bypasses
+ * the CRB bus.
+ */
+ offset =
+ PCI_OFFSET_SECOND_RANGE(adapter,
+ NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW));
+
+ if (wndw & 0x1)
+ wndw = NETXEN_WINDOW_ONE;
+
+ writel(wndw, offset);
+
+ /* MUST make sure window is set before we forge on... */
+ while ((tmp = readl(offset)) != wndw) {
+ printk(KERN_WARNING "%s: %s WARNING: CRB window value not "
+ "registered properly: 0x%08x.\n",
+ netxen_nic_driver_name, __FUNCTION__, tmp);
+ mdelay(1);
+ if (count >= 10)
+ break;
+ count++;
+ }
+
+ adapter->curr_window = wndw;
+}
+
+void netxen_load_firmware(struct netxen_adapter *adapter)
+{
+ int i;
+ long data, size = 0;
+ long flashaddr = NETXEN_FLASH_BASE, memaddr = NETXEN_PHANTOM_MEM_BASE;
+ u64 off;
+ void __iomem *addr;
+
+ size = NETXEN_FIRMWARE_LEN;
+ writel(1, NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CAS_RST));
+
+ for (i = 0; i < size; i++) {
+ if (netxen_rom_fast_read(adapter, flashaddr, (int *)&data) != 0) {
+ DPRINTK(ERR,
+ "Error in netxen_rom_fast_read(). Will skip"
+ "loading flash image\n");
+ return;
+ }
+ off = netxen_nic_pci_set_window(adapter, memaddr);
+ addr = pci_base_offset(adapter, off);
+ writel(data, addr);
+ flashaddr += 4;
+ memaddr += 4;
+ }
+ udelay(100);
+ /* make sure Casper is powered on */
+ writel(0x3fff,
+ NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL));
+ writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CAS_RST));
+
+ udelay(100);
+}
+
+int
+netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data,
+ int len)
+{
+ void __iomem *addr;
+
+ if (ADDR_IN_WINDOW1(off)) {
+ addr = NETXEN_CRB_NORMALIZE(adapter, off);
+ } else { /* Window 0 */
+ addr = pci_base_offset(adapter, off);
+ netxen_nic_pci_change_crbwindow(adapter, 0);
+ }
+
+ DPRINTK(INFO, "writing to base %lx offset %llx addr %p"
+ " data %llx len %d\n",
+ pci_base(adapter, off), off, addr,
+ *(unsigned long long *)data, len);
+ if (!addr) {
+ netxen_nic_pci_change_crbwindow(adapter, 1);
+ return 1;
+ }
+
+ switch (len) {
+ case 1:
+ writeb(*(u8 *) data, addr);
+ break;
+ case 2:
+ writew(*(u16 *) data, addr);
+ break;
+ case 4:
+ writel(*(u32 *) data, addr);
+ break;
+ case 8:
+ writeq(*(u64 *) data, addr);
+ break;
+ default:
+ DPRINTK(INFO,
+ "writing data %lx to offset %llx, num words=%d\n",
+ *(unsigned long *)data, off, (len >> 3));
+
+ netxen_nic_hw_block_write64((u64 __iomem *) data, addr,
+ (len >> 3));
+ break;
+ }
+ if (!ADDR_IN_WINDOW1(off))
+ netxen_nic_pci_change_crbwindow(adapter, 1);
+
+ return 0;
+}
+
+int
+netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data,
+ int len)
+{
+ void __iomem *addr;
+
+ if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
+ addr = NETXEN_CRB_NORMALIZE(adapter, off);
+ } else { /* Window 0 */
+ addr = pci_base_offset(adapter, off);
+ netxen_nic_pci_change_crbwindow(adapter, 0);
+ }
+
+ DPRINTK(INFO, "reading from base %lx offset %llx addr %p\n",
+ pci_base(adapter, off), off, addr);
+ if (!addr) {
+ netxen_nic_pci_change_crbwindow(adapter, 1);
+ return 1;
+ }
+ switch (len) {
+ case 1:
+ *(u8 *) data = readb(addr);
+ break;
+ case 2:
+ *(u16 *) data = readw(addr);
+ break;
+ case 4:
+ *(u32 *) data = readl(addr);
+ break;
+ case 8:
+ *(u64 *) data = readq(addr);
+ break;
+ default:
+ netxen_nic_hw_block_read64((u64 __iomem *) data, addr,
+ (len >> 3));
+ break;
+ }
+ DPRINTK(INFO, "read %lx\n", *(unsigned long *)data);
+
+ if (!ADDR_IN_WINDOW1(off))
+ netxen_nic_pci_change_crbwindow(adapter, 1);
+
+ return 0;
+}
+
+void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val)
+{ /* Only for window 1 */
+ void __iomem *addr;
+
+ addr = NETXEN_CRB_NORMALIZE(adapter, off);
+ DPRINTK(INFO, "writing to base %lx offset %llx addr %p data %x\n",
+ pci_base(adapter, off), off, addr);
+ writel(val, addr);
+
+}
+
+int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off)
+{ /* Only for window 1 */
+ void __iomem *addr;
+ int val;
+
+ addr = NETXEN_CRB_NORMALIZE(adapter, off);
+ DPRINTK(INFO, "reading from base %lx offset %llx addr %p\n",
+ adapter->ahw.pci_base, off, addr);
+ val = readl(addr);
+ writel(val, addr);
+
+ return val;
+}
+
+/* Change the window to 0, write and change back to window 1. */
+void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value)
+{
+ void __iomem *addr;
+
+ netxen_nic_pci_change_crbwindow(adapter, 0);
+ addr = pci_base_offset(adapter, index);
+ writel(value, addr);
+ netxen_nic_pci_change_crbwindow(adapter, 1);
+}
+
+/* Change the window to 0, read and change back to window 1. */
+void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 * value)
+{
+ void __iomem *addr;
+
+ addr = pci_base_offset(adapter, index);
+
+ netxen_nic_pci_change_crbwindow(adapter, 0);
+ *value = readl(addr);
+ netxen_nic_pci_change_crbwindow(adapter, 1);
+}
+
+int netxen_pci_set_window_warning_count = 0;
+
+unsigned long
+netxen_nic_pci_set_window(struct netxen_adapter *adapter,
+ unsigned long long addr)
+{
+ static int ddr_mn_window = -1;
+ static int qdr_sn_window = -1;
+ int window;
+
+ if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ /* DDR network side */
+ addr -= NETXEN_ADDR_DDR_NET;
+ window = (addr >> 25) & 0x3ff;
+ if (ddr_mn_window != window) {
+ ddr_mn_window = window;
+ writel(window, PCI_OFFSET_SECOND_RANGE(adapter,
+ NETXEN_PCIX_PH_REG
+ (PCIX_MN_WINDOW)));
+ /* MUST make sure window is set before we forge on... */
+ readl(PCI_OFFSET_SECOND_RANGE(adapter,
+ NETXEN_PCIX_PH_REG
+ (PCIX_MN_WINDOW)));
+ }
+ addr -= (window * NETXEN_WINDOW_ONE);
+ addr += NETXEN_PCI_DDR_NET;
+ } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+ addr -= NETXEN_ADDR_OCM0;
+ addr += NETXEN_PCI_OCM0;
+ } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+ addr -= NETXEN_ADDR_OCM1;
+ addr += NETXEN_PCI_OCM1;
+ } else
+ if (ADDR_IN_RANGE
+ (addr, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX)) {
+ /* QDR network side */
+ addr -= NETXEN_ADDR_QDR_NET;
+ window = (addr >> 22) & 0x3f;
+ if (qdr_sn_window != window) {
+ qdr_sn_window = window;
+ writel((window << 22),
+ PCI_OFFSET_SECOND_RANGE(adapter,
+ NETXEN_PCIX_PH_REG
+ (PCIX_SN_WINDOW)));
+ /* MUST make sure window is set before we forge on... */
+ readl(PCI_OFFSET_SECOND_RANGE(adapter,
+ NETXEN_PCIX_PH_REG
+ (PCIX_SN_WINDOW)));
+ }
+ addr -= (window * 0x400000);
+ addr += NETXEN_PCI_QDR_NET;
+ } else {
+ /*
+ * peg gdb frequently accesses memory that doesn't exist,
+ * this limits the chit chat so debugging isn't slowed down.
+ */
+ if ((netxen_pci_set_window_warning_count++ < 8)
+ || (netxen_pci_set_window_warning_count % 64 == 0))
+ printk("%s: Warning:netxen_nic_pci_set_window()"
+ " Unknown address range!\n",
+ netxen_nic_driver_name);
+
+ }
+ return addr;
+}
+
+int netxen_nic_get_board_info(struct netxen_adapter *adapter)
+{
+ int rv = 0;
+ int addr = BRDCFG_START;
+ struct netxen_board_info *boardinfo;
+ int index;
+ u32 *ptr32;
+
+ boardinfo = &adapter->ahw.boardcfg;
+ ptr32 = (u32 *) boardinfo;
+
+ for (index = 0; index < sizeof(struct netxen_board_info) / sizeof(u32);
+ index++) {
+ if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) {
+ return -EIO;
+ }
+ ptr32++;
+ addr += sizeof(u32);
+ }
+ if (boardinfo->magic != NETXEN_BDINFO_MAGIC) {
+ printk("%s: ERROR reading %s board config."
+ " Read %x, expected %x\n", netxen_nic_driver_name,
+ netxen_nic_driver_name,
+ boardinfo->magic, NETXEN_BDINFO_MAGIC);
+ rv = -1;
+ }
+ if (boardinfo->header_version != NETXEN_BDINFO_VERSION) {
+ printk("%s: Unknown board config version."
+ " Read %x, expected %x\n", netxen_nic_driver_name,
+ boardinfo->header_version, NETXEN_BDINFO_VERSION);
+ rv = -1;
+ }
+
+ DPRINTK(INFO, "Discovered board type:0x%x ", boardinfo->board_type);
+ switch ((netxen_brdtype_t) boardinfo->board_type) {
+ case NETXEN_BRDTYPE_P2_SB35_4G:
+ adapter->ahw.board_type = NETXEN_NIC_GBE;
+ break;
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ adapter->ahw.board_type = NETXEN_NIC_XGBE;
+ break;
+ case NETXEN_BRDTYPE_P1_BD:
+ case NETXEN_BRDTYPE_P1_SB:
+ case NETXEN_BRDTYPE_P1_SMAX:
+ case NETXEN_BRDTYPE_P1_SOCK:
+ adapter->ahw.board_type = NETXEN_NIC_GBE;
+ break;
+ default:
+ printk("%s: Unknown(%x)\n", netxen_nic_driver_name,
+ boardinfo->board_type);
+ break;
+ }
+
+ return rv;
+}
+
+/* NIU access sections */
+
+int netxen_nic_set_mtu_gb(struct netxen_port *port, int new_mtu)
+{
+ struct netxen_adapter *adapter = port->adapter;
+ netxen_nic_write_w0(adapter,
+ NETXEN_NIU_GB_MAX_FRAME_SIZE(port->portnum),
+ new_mtu);
+ return 0;
+}
+
+int netxen_nic_set_mtu_xgb(struct netxen_port *port, int new_mtu)
+{
+ struct netxen_adapter *adapter = port->adapter;
+ new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE;
+ netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu);
+ return 0;
+}
+
+void netxen_nic_init_niu_gb(struct netxen_adapter *adapter)
+{
+ int portno;
+ for (portno = 0; portno < NETXEN_NIU_MAX_GBE_PORTS; portno++)
+ netxen_niu_gbe_init_port(adapter, portno);
+}
+
+void netxen_nic_stop_all_ports(struct netxen_adapter *adapter)
+{
+ int port_nr;
+ struct netxen_port *port;
+
+ for (port_nr = 0; port_nr < adapter->ahw.max_ports; port_nr++) {
+ port = adapter->port[port_nr];
+ if (adapter->ops->stop_port)
+ adapter->ops->stop_port(adapter, port->portnum);
+ }
+}
+
+void
+netxen_crb_writelit_adapter(struct netxen_adapter *adapter, unsigned long off,
+ int data)
+{
+ void __iomem *addr;
+
+ if (ADDR_IN_WINDOW1(off)) {
+ writel(data, NETXEN_CRB_NORMALIZE(adapter, off));
+ } else {
+ netxen_nic_pci_change_crbwindow(adapter, 0);
+ addr = pci_base_offset(adapter, off);
+ writel(data, addr);
+ netxen_nic_pci_change_crbwindow(adapter, 1);
+ }
+}
+
+void netxen_nic_set_link_parameters(struct netxen_port *port)
+{
+ struct netxen_adapter *adapter = port->adapter;
+ __le32 status;
+ u16 autoneg;
+ __le32 mode;
+
+ netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
+ if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */
+ if (adapter->ops->phy_read
+ && adapter->ops->
+ phy_read(adapter, port->portnum,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ &status) == 0) {
+ if (netxen_get_phy_link(status)) {
+ switch (netxen_get_phy_speed(status)) {
+ case 0:
+ port->link_speed = SPEED_10;
+ break;
+ case 1:
+ port->link_speed = SPEED_100;
+ break;
+ case 2:
+ port->link_speed = SPEED_1000;
+ break;
+ default:
+ port->link_speed = -1;
+ break;
+ }
+ switch (netxen_get_phy_duplex(status)) {
+ case 0:
+ port->link_duplex = DUPLEX_HALF;
+ break;
+ case 1:
+ port->link_duplex = DUPLEX_FULL;
+ break;
+ default:
+ port->link_duplex = -1;
+ break;
+ }
+ if (adapter->ops->phy_read
+ && adapter->ops->
+ phy_read(adapter, port->portnum,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+ (__le32 *) & autoneg) != 0)
+ port->link_autoneg = autoneg;
+ } else
+ goto link_down;
+ } else {
+ link_down:
+ port->link_speed = -1;
+ port->link_duplex = -1;
+ }
+ }
+}
+
+void netxen_nic_flash_print(struct netxen_adapter *adapter)
+{
+ int valid = 1;
+ u32 fw_major = 0;
+ u32 fw_minor = 0;
+ u32 fw_build = 0;
+ char brd_name[NETXEN_MAX_SHORT_NAME];
+ struct netxen_new_user_info user_info;
+ int i, addr = USER_START;
+ u32 *ptr32;
+
+ struct netxen_board_info *board_info = &(adapter->ahw.boardcfg);
+ if (board_info->magic != NETXEN_BDINFO_MAGIC) {
+ printk
+ ("NetXen Unknown board config, Read 0x%x expected as 0x%x\n",
+ board_info->magic, NETXEN_BDINFO_MAGIC);
+ valid = 0;
+ }
+ if (board_info->header_version != NETXEN_BDINFO_VERSION) {
+ printk("NetXen Unknown board config version."
+ " Read %x, expected %x\n",
+ board_info->header_version, NETXEN_BDINFO_VERSION);
+ valid = 0;
+ }
+ if (valid) {
+ ptr32 = (u32 *) & user_info;
+ for (i = 0;
+ i < sizeof(struct netxen_new_user_info) / sizeof(u32);
+ i++) {
+ if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) {
+ printk("%s: ERROR reading %s board userarea.\n",
+ netxen_nic_driver_name,
+ netxen_nic_driver_name);
+ return;
+ }
+ ptr32++;
+ addr += sizeof(u32);
+ }
+ get_brd_name_by_type(board_info->board_type, brd_name);
+
+ printk("NetXen %s Board S/N %s Chip id 0x%x\n",
+ brd_name, user_info.serial_num, board_info->chip_id);
+
+ printk("NetXen %s Board #%d, Chip id 0x%x\n",
+ board_info->board_type == 0x0b ? "XGB" : "GBE",
+ board_info->board_num, board_info->chip_id);
+ fw_major = readl(NETXEN_CRB_NORMALIZE(adapter,
+ NETXEN_FW_VERSION_MAJOR));
+ fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter,
+ NETXEN_FW_VERSION_MINOR));
+ fw_build =
+ readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB));
+
+ printk("NetXen Firmware version %d.%d.%d\n", fw_major, fw_minor,
+ fw_build);
+ }
+ if (fw_major != _NETXEN_NIC_LINUX_MAJOR) {
+ printk(KERN_ERR "The mismatch in driver version and firmware "
+ "version major number\n"
+ "Driver version major number = %d \t"
+ "Firmware version major number = %d \n",
+ _NETXEN_NIC_LINUX_MAJOR, fw_major);
+ adapter->driver_mismatch = 1;
+ }
+ if (fw_minor != _NETXEN_NIC_LINUX_MINOR) {
+ printk(KERN_ERR "The mismatch in driver version and firmware "
+ "version minor number\n"
+ "Driver version minor number = %d \t"
+ "Firmware version minor number = %d \n",
+ _NETXEN_NIC_LINUX_MINOR, fw_minor);
+ adapter->driver_mismatch = 1;
+ }
+ if (adapter->driver_mismatch)
+ printk(KERN_INFO "Use the driver with version no %d.%d.xxx\n",
+ fw_major, fw_minor);
+}
+
+int netxen_crb_read_val(struct netxen_adapter *adapter, unsigned long off)
+{
+ int data;
+ netxen_nic_hw_read_wx(adapter, off, &data, 4);
+ return data;
+}
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
new file mode 100644
index 000000000000..201a636b7ab8
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ * info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ *
+ * Structures, enums, and macros for the MAC
+ *
+ */
+
+#ifndef __NETXEN_NIC_HW_H_
+#define __NETXEN_NIC_HW_H_
+
+#include "netxen_nic_hdr.h"
+
+/* Hardware memory size of 128 meg */
+#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024)
+
+#ifndef readq
+static inline u64 readq(void __iomem * addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem * addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+static inline void netxen_nic_hw_block_write64(u64 __iomem * data_ptr,
+ u64 __iomem * addr,
+ int num_words)
+{
+ int num;
+ for (num = 0; num < num_words; num++) {
+ writeq(readq((void __iomem *)data_ptr), addr);
+ addr++;
+ data_ptr++;
+ }
+}
+
+static inline void netxen_nic_hw_block_read64(u64 __iomem * data_ptr,
+ u64 __iomem * addr, int num_words)
+{
+ int num;
+ for (num = 0; num < num_words; num++) {
+ writeq(readq((void __iomem *)addr), data_ptr);
+ addr++;
+ data_ptr++;
+ }
+
+}
+
+struct netxen_adapter;
+
+#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20)
+
+#define NETXEN_NIC_LOCKED_READ_REG(X, Y) \
+ addr = pci_base_offset(adapter, (X)); \
+ *(u32 *)Y = readl(addr);
+
+struct netxen_port;
+void netxen_nic_set_link_parameters(struct netxen_port *port);
+void netxen_nic_flash_print(struct netxen_adapter *adapter);
+int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off,
+ void *data, int len);
+void netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
+ unsigned long off, int data);
+int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off,
+ void *data, int len);
+
+typedef u8 netxen_ethernet_macaddr_t[6];
+
+/* Nibble or Byte mode for phy interface (GbE mode only) */
+typedef enum {
+ NETXEN_NIU_10_100_MB = 0,
+ NETXEN_NIU_1000_MB
+} netxen_niu_gbe_ifmode_t;
+
+#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+
+#define netxen_gb_enable_tx(config_word) \
+ set_bit(0, (unsigned long*)(&config_word))
+#define netxen_gb_enable_rx(config_word) \
+ set_bit(2, (unsigned long*)(&config_word))
+#define netxen_gb_tx_flowctl(config_word) \
+ set_bit(4, (unsigned long*)(&config_word))
+#define netxen_gb_rx_flowctl(config_word) \
+ set_bit(5, (unsigned long*)(&config_word))
+#define netxen_gb_tx_reset_pb(config_word) \
+ set_bit(16, (unsigned long*)(&config_word))
+#define netxen_gb_rx_reset_pb(config_word) \
+ set_bit(17, (unsigned long*)(&config_word))
+#define netxen_gb_tx_reset_mac(config_word) \
+ set_bit(18, (unsigned long*)(&config_word))
+#define netxen_gb_rx_reset_mac(config_word) \
+ set_bit(19, (unsigned long*)(&config_word))
+#define netxen_gb_soft_reset(config_word) \
+ set_bit(31, (unsigned long*)(&config_word))
+
+#define netxen_gb_unset_tx_flowctl(config_word) \
+ clear_bit(4, (unsigned long *)(&config_word))
+#define netxen_gb_unset_rx_flowctl(config_word) \
+ clear_bit(5, (unsigned long*)(&config_word))
+
+#define netxen_gb_get_tx_synced(config_word) \
+ _netxen_crb_get_bit((config_word), 1)
+#define netxen_gb_get_rx_synced(config_word) \
+ _netxen_crb_get_bit((config_word), 3)
+#define netxen_gb_get_tx_flowctl(config_word) \
+ _netxen_crb_get_bit((config_word), 4)
+#define netxen_gb_get_rx_flowctl(config_word) \
+ _netxen_crb_get_bit((config_word), 5)
+#define netxen_gb_get_soft_reset(config_word) \
+ _netxen_crb_get_bit((config_word), 31)
+
+/*
+ * NIU GB MAC Config Register 1 (applies to GB0, GB1, GB2, GB3)
+ *
+ * Bit 0 : duplex => 1:full duplex mode, 0:half duplex
+ * Bit 1 : crc_enable => 1:append CRC to xmit frames, 0:dont append
+ * Bit 2 : padshort => 1:pad short frames and add CRC, 0:dont pad
+ * Bit 4 : checklength => 1:check framelen with actual,0:dont check
+ * Bit 5 : hugeframes => 1:allow oversize xmit frames, 0:dont allow
+ * Bits 8-9 : intfmode => 01:nibble (10/100), 10:byte (1000)
+ * Bits 12-15 : preamblelen => preamble field length in bytes, default 7
+ */
+
+#define netxen_gb_set_duplex(config_word) \
+ set_bit(0, (unsigned long*)&config_word)
+#define netxen_gb_set_crc_enable(config_word) \
+ set_bit(1, (unsigned long*)&config_word)
+#define netxen_gb_set_padshort(config_word) \
+ set_bit(2, (unsigned long*)&config_word)
+#define netxen_gb_set_checklength(config_word) \
+ set_bit(4, (unsigned long*)&config_word)
+#define netxen_gb_set_hugeframes(config_word) \
+ set_bit(5, (unsigned long*)&config_word)
+#define netxen_gb_set_preamblelen(config_word, val) \
+ ((config_word) |= ((val) << 12) & 0xF000)
+#define netxen_gb_set_intfmode(config_word, val) \
+ ((config_word) |= ((val) << 8) & 0x300)
+
+#define netxen_gb_get_stationaddress_low(config_word) ((config_word) >> 16)
+
+#define netxen_gb_set_mii_mgmt_clockselect(config_word, val) \
+ ((config_word) |= ((val) & 0x07))
+#define netxen_gb_mii_mgmt_reset(config_word) \
+ set_bit(31, (unsigned long*)&config_word)
+#define netxen_gb_mii_mgmt_unset(config_word) \
+ clear_bit(31, (unsigned long*)&config_word)
+
+/*
+ * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3)
+ * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op
+ * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op
+ */
+
+#define netxen_gb_mii_mgmt_set_read_cycle(config_word) \
+ set_bit(0, (unsigned long*)&config_word)
+#define netxen_gb_mii_mgmt_reg_addr(config_word, val) \
+ ((config_word) |= ((val) & 0x1F))
+#define netxen_gb_mii_mgmt_phy_addr(config_word, val) \
+ ((config_word) |= (((val) & 0x1F) << 8))
+
+/*
+ * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3)
+ * Read-only register.
+ * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle
+ * Bit 1 : scanning => 1:scan operation in progress, 0:idle
+ * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle
+ */
+#define netxen_get_gb_mii_mgmt_busy(config_word) \
+ _netxen_crb_get_bit(config_word, 0)
+#define netxen_get_gb_mii_mgmt_scanning(config_word) \
+ _netxen_crb_get_bit(config_word, 1)
+#define netxen_get_gb_mii_mgmt_notvalid(config_word) \
+ _netxen_crb_get_bit(config_word, 2)
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+typedef enum {
+ NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL = 0,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS = 1,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 = 2,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 = 3,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG = 4,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART = 5,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE = 6,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT = 7,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE = 8,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL = 9,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS = 10,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS = 15,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL = 16,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS = 17,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE = 18,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS = 19,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE = 20,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT = 21,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL = 24,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE = 25,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET = 26,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE = 27
+} netxen_niu_phy_register_t;
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0 : jabber => 1:jabber detected, 0:not
+ * Bit 1 : polarity => 1:polarity reversed, 0:normal
+ * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4 : energydetect => 1:sleep, 0:active
+ * Bit 5 : downshift => 1:downshift, 0:no downshift
+ * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9 : cablelen => not valid in 10Mb/s mode
+ * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10 : link => 1:link up, 0:link down
+ * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12 : pagercvd => 1:page received, 0:page not received
+ * Bit 13 : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define netxen_get_phy_cablelen(config_word) (((config_word) >> 7) & 0x07)
+#define netxen_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define netxen_set_phy_speed(config_word, val) \
+ ((config_word) |= ((val & 0x03) << 14))
+#define netxen_set_phy_duplex(config_word) \
+ set_bit(13, (unsigned long*)&config_word)
+#define netxen_clear_phy_duplex(config_word) \
+ clear_bit(13, (unsigned long*)&config_word)
+
+#define netxen_get_phy_jabber(config_word) \
+ _netxen_crb_get_bit(config_word, 0)
+#define netxen_get_phy_polarity(config_word) \
+ _netxen_crb_get_bit(config_word, 1)
+#define netxen_get_phy_recvpause(config_word) \
+ _netxen_crb_get_bit(config_word, 2)
+#define netxen_get_phy_xmitpause(config_word) \
+ _netxen_crb_get_bit(config_word, 3)
+#define netxen_get_phy_energydetect(config_word) \
+ _netxen_crb_get_bit(config_word, 4)
+#define netxen_get_phy_downshift(config_word) \
+ _netxen_crb_get_bit(config_word, 5)
+#define netxen_get_phy_crossover(config_word) \
+ _netxen_crb_get_bit(config_word, 6)
+#define netxen_get_phy_link(config_word) \
+ _netxen_crb_get_bit(config_word, 10)
+#define netxen_get_phy_resolved(config_word) \
+ _netxen_crb_get_bit(config_word, 11)
+#define netxen_get_phy_pagercvd(config_word) \
+ _netxen_crb_get_bit(config_word, 12)
+#define netxen_get_phy_duplex(config_word) \
+ _netxen_crb_get_bit(config_word, 13)
+
+/*
+ * Interrupt Register definition
+ * This definition applies to registers 18 and 19 (int enable and int status).
+ * Bit 0 : jabber
+ * Bit 1 : polarity_changed
+ * Bit 4 : energy_detect
+ * Bit 5 : downshift
+ * Bit 6 : mdi_xover_changed
+ * Bit 7 : fifo_over_underflow
+ * Bit 8 : false_carrier
+ * Bit 9 : symbol_error
+ * Bit 10: link_status_changed
+ * Bit 11: autoneg_completed
+ * Bit 12: page_received
+ * Bit 13: duplex_changed
+ * Bit 14: speed_changed
+ * Bit 15: autoneg_error
+ */
+
+#define netxen_get_phy_int_jabber(config_word) \
+ _netxen_crb_get_bit(config_word, 0)
+#define netxen_get_phy_int_polarity_changed(config_word) \
+ _netxen_crb_get_bit(config_word, 1)
+#define netxen_get_phy_int_energy_detect(config_word) \
+ _netxen_crb_get_bit(config_word, 4)
+#define netxen_get_phy_int_downshift(config_word) \
+ _netxen_crb_get_bit(config_word, 5)
+#define netxen_get_phy_int_mdi_xover_changed(config_word) \
+ _netxen_crb_get_bit(config_word, 6)
+#define netxen_get_phy_int_fifo_over_underflow(config_word) \
+ _netxen_crb_get_bit(config_word, 7)
+#define netxen_get_phy_int_false_carrier(config_word) \
+ _netxen_crb_get_bit(config_word, 8)
+#define netxen_get_phy_int_symbol_error(config_word) \
+ _netxen_crb_get_bit(config_word, 9)
+#define netxen_get_phy_int_link_status_changed(config_word) \
+ _netxen_crb_get_bit(config_word, 10)
+#define netxen_get_phy_int_autoneg_completed(config_word) \
+ _netxen_crb_get_bit(config_word, 11)
+#define netxen_get_phy_int_page_received(config_word) \
+ _netxen_crb_get_bit(config_word, 12)
+#define netxen_get_phy_int_duplex_changed(config_word) \
+ _netxen_crb_get_bit(config_word, 13)
+#define netxen_get_phy_int_speed_changed(config_word) \
+ _netxen_crb_get_bit(config_word, 14)
+#define netxen_get_phy_int_autoneg_error(config_word) \
+ _netxen_crb_get_bit(config_word, 15)
+
+#define netxen_set_phy_int_link_status_changed(config_word) \
+ set_bit(10, (unsigned long*)&config_word)
+#define netxen_set_phy_int_autoneg_completed(config_word) \
+ set_bit(11, (unsigned long*)&config_word)
+#define netxen_set_phy_int_speed_changed(config_word) \
+ set_bit(14, (unsigned long*)&config_word)
+
+/*
+ * NIU Mode Register.
+ * Bit 0 : enable FibreChannel
+ * Bit 1 : enable 10/100/1000 Ethernet
+ * Bit 2 : enable 10Gb Ethernet
+ */
+
+#define netxen_get_niu_enable_ge(config_word) \
+ _netxen_crb_get_bit(config_word, 1)
+
+/* Promiscous mode options (GbE mode only) */
+typedef enum {
+ NETXEN_NIU_PROMISC_MODE = 0,
+ NETXEN_NIU_NON_PROMISC_MODE
+} netxen_niu_prom_mode_t;
+
+/*
+ * NIU GB Drop CRC Register
+ *
+ * Bit 0 : drop_gb0 => 1:drop pkts with bad CRCs, 0:pass them on
+ * Bit 1 : drop_gb1 => 1:drop pkts with bad CRCs, 0:pass them on
+ * Bit 2 : drop_gb2 => 1:drop pkts with bad CRCs, 0:pass them on
+ * Bit 3 : drop_gb3 => 1:drop pkts with bad CRCs, 0:pass them on
+ */
+
+#define netxen_set_gb_drop_gb0(config_word) \
+ set_bit(0, (unsigned long*)&config_word)
+#define netxen_set_gb_drop_gb1(config_word) \
+ set_bit(1, (unsigned long*)&config_word)
+#define netxen_set_gb_drop_gb2(config_word) \
+ set_bit(2, (unsigned long*)&config_word)
+#define netxen_set_gb_drop_gb3(config_word) \
+ set_bit(3, (unsigned long*)&config_word)
+
+#define netxen_clear_gb_drop_gb0(config_word) \
+ clear_bit(0, (unsigned long*)&config_word)
+#define netxen_clear_gb_drop_gb1(config_word) \
+ clear_bit(1, (unsigned long*)&config_word)
+#define netxen_clear_gb_drop_gb2(config_word) \
+ clear_bit(2, (unsigned long*)&config_word)
+#define netxen_clear_gb_drop_gb3(config_word) \
+ clear_bit(3, (unsigned long*)&config_word)
+
+/*
+ * NIU XG MAC Config Register
+ *
+ * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable
+ * Bit 2 : rx_enable => 1:enable frame recv, 0:disable
+ * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op
+ * Bit 27: xaui_framer_reset
+ * Bit 28: xaui_rx_reset
+ * Bit 29: xaui_tx_reset
+ * Bit 30: xg_ingress_afifo_reset
+ * Bit 31: xg_egress_afifo_reset
+ */
+
+#define netxen_xg_soft_reset(config_word) \
+ set_bit(4, (unsigned long*)&config_word)
+
+/*
+ * MAC Control Register
+ *
+ * Bit 0-1 : id_pool0
+ * Bit 2 : enable_xtnd0
+ * Bit 4-5 : id_pool1
+ * Bit 6 : enable_xtnd1
+ * Bit 8-9 : id_pool2
+ * Bit 10 : enable_xtnd2
+ * Bit 12-13 : id_pool3
+ * Bit 14 : enable_xtnd3
+ * Bit 24-25 : mode_select
+ * Bit 28-31 : enable_pool
+ */
+
+#define netxen_nic_mcr_set_id_pool0(config, val) \
+ ((config) |= ((val) &0x03))
+#define netxen_nic_mcr_set_enable_xtnd0(config) \
+ (set_bit(3, (unsigned long *)&(config)))
+#define netxen_nic_mcr_set_id_pool1(config, val) \
+ ((config) |= (((val) & 0x03) << 4))
+#define netxen_nic_mcr_set_enable_xtnd1(config) \
+ (set_bit(6, (unsigned long *)&(config)))
+#define netxen_nic_mcr_set_id_pool2(config, val) \
+ ((config) |= (((val) & 0x03) << 8))
+#define netxen_nic_mcr_set_enable_xtnd2(config) \
+ (set_bit(10, (unsigned long *)&(config)))
+#define netxen_nic_mcr_set_id_pool3(config, val) \
+ ((config) |= (((val) & 0x03) << 12))
+#define netxen_nic_mcr_set_enable_xtnd3(config) \
+ (set_bit(14, (unsigned long *)&(config)))
+#define netxen_nic_mcr_set_mode_select(config, val) \
+ ((config) |= (((val) & 0x03) << 24))
+#define netxen_nic_mcr_set_enable_pool(config, val) \
+ ((config) |= (((val) & 0x0f) << 28))
+
+/* Set promiscuous mode for a GbE interface */
+int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, int port,
+ netxen_niu_prom_mode_t mode);
+int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
+ int port, netxen_niu_prom_mode_t mode);
+
+/* get/set the MAC address for a given MAC */
+int netxen_niu_macaddr_get(struct netxen_adapter *adapter, int port,
+ netxen_ethernet_macaddr_t * addr);
+int netxen_niu_macaddr_set(struct netxen_port *port,
+ netxen_ethernet_macaddr_t addr);
+
+/* XG versons */
+int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, int port,
+ netxen_ethernet_macaddr_t * addr);
+int netxen_niu_xg_macaddr_set(struct netxen_port *port,
+ netxen_ethernet_macaddr_t addr);
+
+/* Generic enable for GbE ports. Will detect the speed of the link. */
+int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port);
+
+int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port);
+
+/* Disable a GbE interface */
+int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter, int port);
+
+int netxen_niu_disable_xg_port(struct netxen_adapter *adapter, int port);
+
+#endif /* __NETXEN_NIC_HW_H_ */
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
new file mode 100644
index 000000000000..0dca029bc3e5
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -0,0 +1,1304 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ * info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ *
+ * Source file for NIC routines to initialize the Phantom Hardware
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+#include "netxen_nic_ioctl.h"
+#include "netxen_nic_phan_reg.h"
+
+struct crb_addr_pair {
+ long addr;
+ long data;
+};
+
+#define NETXEN_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
+#define NETXEN_ADDR_ERROR ((unsigned long ) 0xffffffff )
+
+#define crb_addr_transform(name) \
+ crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
+ NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
+
+#define NETXEN_NIC_XDMA_RESET 0x8000ff
+
+static inline void
+netxen_nic_locked_write_reg(struct netxen_adapter *adapter,
+ unsigned long off, int *data)
+{
+ void __iomem *addr = pci_base_offset(adapter, off);
+ writel(*data, addr);
+}
+
+static void crb_addr_transform_setup(void)
+{
+ crb_addr_transform(XDMA);
+ crb_addr_transform(TIMR);
+ crb_addr_transform(SRE);
+ crb_addr_transform(SQN3);
+ crb_addr_transform(SQN2);
+ crb_addr_transform(SQN1);
+ crb_addr_transform(SQN0);
+ crb_addr_transform(SQS3);
+ crb_addr_transform(SQS2);
+ crb_addr_transform(SQS1);
+ crb_addr_transform(SQS0);
+ crb_addr_transform(RPMX7);
+ crb_addr_transform(RPMX6);
+ crb_addr_transform(RPMX5);
+ crb_addr_transform(RPMX4);
+ crb_addr_transform(RPMX3);
+ crb_addr_transform(RPMX2);
+ crb_addr_transform(RPMX1);
+ crb_addr_transform(RPMX0);
+ crb_addr_transform(ROMUSB);
+ crb_addr_transform(SN);
+ crb_addr_transform(QMN);
+ crb_addr_transform(QMS);
+ crb_addr_transform(PGNI);
+ crb_addr_transform(PGND);
+ crb_addr_transform(PGN3);
+ crb_addr_transform(PGN2);
+ crb_addr_transform(PGN1);
+ crb_addr_transform(PGN0);
+ crb_addr_transform(PGSI);
+ crb_addr_transform(PGSD);
+ crb_addr_transform(PGS3);
+ crb_addr_transform(PGS2);
+ crb_addr_transform(PGS1);
+ crb_addr_transform(PGS0);
+ crb_addr_transform(PS);
+ crb_addr_transform(PH);
+ crb_addr_transform(NIU);
+ crb_addr_transform(I2Q);
+ crb_addr_transform(EG);
+ crb_addr_transform(MN);
+ crb_addr_transform(MS);
+ crb_addr_transform(CAS2);
+ crb_addr_transform(CAS1);
+ crb_addr_transform(CAS0);
+ crb_addr_transform(CAM);
+ crb_addr_transform(C2C1);
+ crb_addr_transform(C2C0);
+}
+
+int netxen_init_firmware(struct netxen_adapter *adapter)
+{
+ u32 state = 0, loops = 0, err = 0;
+
+ /* Window 1 call */
+ state = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
+
+ if (state == PHAN_INITIALIZE_ACK)
+ return 0;
+
+ while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) {
+ udelay(100);
+ /* Window 1 call */
+ state = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
+
+ loops++;
+ }
+ if (loops >= 2000) {
+ printk(KERN_ERR "Cmd Peg initialization not complete:%x.\n",
+ state);
+ err = -EIO;
+ return err;
+ }
+ /* Window 1 call */
+ writel(PHAN_INITIALIZE_ACK,
+ NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
+
+ return err;
+}
+
+#define NETXEN_ADDR_LIMIT 0xffffffffULL
+
+void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr,
+ struct pci_dev **used_dev)
+{
+ void *addr;
+
+ addr = pci_alloc_consistent(pdev, sz, ptr);
+ if ((unsigned long long)(*ptr) < NETXEN_ADDR_LIMIT) {
+ *used_dev = pdev;
+ return addr;
+ }
+ pci_free_consistent(pdev, sz, addr, *ptr);
+ addr = pci_alloc_consistent(NULL, sz, ptr);
+ *used_dev = NULL;
+ return addr;
+}
+
+void netxen_initialize_adapter_sw(struct netxen_adapter *adapter)
+{
+ int ctxid, ring;
+ u32 i;
+ u32 num_rx_bufs = 0;
+ struct netxen_rcv_desc_ctx *rcv_desc;
+
+ DPRINTK(INFO, "initializing some queues: %p\n", adapter);
+ for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
+ for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+ struct netxen_rx_buffer *rx_buf;
+ rcv_desc = &adapter->recv_ctx[ctxid].rcv_desc[ring];
+ rcv_desc->rcv_free = rcv_desc->max_rx_desc_count;
+ rcv_desc->begin_alloc = 0;
+ rx_buf = rcv_desc->rx_buf_arr;
+ num_rx_bufs = rcv_desc->max_rx_desc_count;
+ /*
+ * Now go through all of them, set reference handles
+ * and put them in the queues.
+ */
+ for (i = 0; i < num_rx_bufs; i++) {
+ rx_buf->ref_handle = i;
+ rx_buf->state = NETXEN_BUFFER_FREE;
+
+ DPRINTK(INFO, "Rx buf:ctx%d i(%d) rx_buf:"
+ "%p\n", ctxid, i, rx_buf);
+ rx_buf++;
+ }
+ }
+ }
+ DPRINTK(INFO, "initialized buffers for %s and %s\n",
+ "adapter->free_cmd_buf_list", "adapter->free_rxbuf");
+}
+
+void netxen_initialize_adapter_hw(struct netxen_adapter *adapter)
+{
+ int ports = 0;
+ struct netxen_board_info *board_info = &(adapter->ahw.boardcfg);
+
+ if (netxen_nic_get_board_info(adapter) != 0)
+ printk("%s: Error getting board config info.\n",
+ netxen_nic_driver_name);
+ get_brd_port_by_type(board_info->board_type, &ports);
+ if (ports == 0)
+ printk(KERN_ERR "%s: Unknown board type\n",
+ netxen_nic_driver_name);
+ adapter->ahw.max_ports = ports;
+}
+
+void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
+{
+ struct netxen_drvops *ops = adapter->ops;
+ switch (adapter->ahw.board_type) {
+ case NETXEN_NIC_GBE:
+ ops->enable_phy_interrupts =
+ netxen_niu_gbe_enable_phy_interrupts;
+ ops->disable_phy_interrupts =
+ netxen_niu_gbe_disable_phy_interrupts;
+ ops->handle_phy_intr = netxen_nic_gbe_handle_phy_intr;
+ ops->macaddr_set = netxen_niu_macaddr_set;
+ ops->set_mtu = netxen_nic_set_mtu_gb;
+ ops->set_promisc = netxen_niu_set_promiscuous_mode;
+ ops->unset_promisc = netxen_niu_set_promiscuous_mode;
+ ops->phy_read = netxen_niu_gbe_phy_read;
+ ops->phy_write = netxen_niu_gbe_phy_write;
+ ops->init_port = netxen_niu_gbe_init_port;
+ ops->init_niu = netxen_nic_init_niu_gb;
+ ops->stop_port = netxen_niu_disable_gbe_port;
+ break;
+
+ case NETXEN_NIC_XGBE:
+ ops->enable_phy_interrupts =
+ netxen_niu_xgbe_enable_phy_interrupts;
+ ops->disable_phy_interrupts =
+ netxen_niu_xgbe_disable_phy_interrupts;
+ ops->handle_phy_intr = netxen_nic_xgbe_handle_phy_intr;
+ ops->macaddr_set = netxen_niu_xg_macaddr_set;
+ ops->set_mtu = netxen_nic_set_mtu_xgb;
+ ops->init_port = netxen_niu_xg_init_port;
+ ops->set_promisc = netxen_niu_xg_set_promiscuous_mode;
+ ops->unset_promisc = netxen_niu_xg_set_promiscuous_mode;
+ ops->stop_port = netxen_niu_disable_xg_port;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
+ * address to external PCI CRB address.
+ */
+unsigned long netxen_decode_crb_addr(unsigned long addr)
+{
+ int i;
+ unsigned long base_addr, offset, pci_base;
+
+ crb_addr_transform_setup();
+
+ pci_base = NETXEN_ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == NETXEN_ADDR_ERROR)
+ return pci_base;
+ else
+ return (pci_base + offset);
+}
+
+static long rom_max_timeout = 10000;
+static long rom_lock_timeout = 1000000;
+
+static inline int rom_lock(struct netxen_adapter *adapter)
+{
+ int iter;
+ u32 done = 0;
+ int timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore2 from PCI HW block */
+ netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK),
+ &done);
+ if (done == 1)
+ break;
+ if (timeout >= rom_lock_timeout)
+ return -EIO;
+
+ timeout++;
+ /*
+ * Yield CPU
+ */
+ if (!in_atomic())
+ schedule();
+ else {
+ for (iter = 0; iter < 20; iter++)
+ cpu_relax(); /*This a nop instr on i386 */
+ }
+ }
+ netxen_nic_reg_write(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER);
+ return 0;
+}
+
+int netxen_wait_rom_done(struct netxen_adapter *adapter)
+{
+ long timeout = 0;
+ long done = 0;
+
+ while (done == 0) {
+ done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS);
+ done &= 2;
+ timeout++;
+ if (timeout >= rom_max_timeout) {
+ printk("Timeout reached waiting for rom done");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static inline int netxen_rom_wren(struct netxen_adapter *adapter)
+{
+ /* Set write enable latch in ROM status register */
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE,
+ M25P_INSTR_WREN);
+ if (netxen_wait_rom_done(adapter)) {
+ return -1;
+ }
+ return 0;
+}
+
+static inline unsigned int netxen_rdcrbreg(struct netxen_adapter *adapter,
+ unsigned int addr)
+{
+ unsigned int data = 0xdeaddead;
+ data = netxen_nic_reg_read(adapter, addr);
+ return data;
+}
+
+static inline int netxen_do_rom_rdsr(struct netxen_adapter *adapter)
+{
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE,
+ M25P_INSTR_RDSR);
+ if (netxen_wait_rom_done(adapter)) {
+ return -1;
+ }
+ return netxen_rdcrbreg(adapter, NETXEN_ROMUSB_ROM_RDATA);
+}
+
+static inline void netxen_rom_unlock(struct netxen_adapter *adapter)
+{
+ u32 val;
+
+ /* release semaphore2 */
+ netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK), &val);
+
+}
+
+int netxen_rom_wip_poll(struct netxen_adapter *adapter)
+{
+ long timeout = 0;
+ long wip = 1;
+ int val;
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+ while (wip != 0) {
+ val = netxen_do_rom_rdsr(adapter);
+ wip = val & 1;
+ timeout++;
+ if (timeout > rom_max_timeout) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static inline int do_rom_fast_write(struct netxen_adapter *adapter,
+ int addr, int data)
+{
+ if (netxen_rom_wren(adapter)) {
+ return -1;
+ }
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_WDATA, data);
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE,
+ M25P_INSTR_PP);
+ if (netxen_wait_rom_done(adapter)) {
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+ return -1;
+ }
+
+ return netxen_rom_wip_poll(adapter);
+}
+
+static inline int
+do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
+{
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
+ udelay(100); /* prevent bursting on CRB */
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (netxen_wait_rom_done(adapter)) {
+ printk("Error waiting for rom done\n");
+ return -EIO;
+ }
+ /* reset abyte_cnt and dummy_byte_cnt */
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+ udelay(100); /* prevent bursting on CRB */
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+ *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
+{
+ int ret;
+
+ if (rom_lock(adapter) != 0)
+ return -EIO;
+
+ ret = do_rom_fast_read(adapter, addr, valp);
+ netxen_rom_unlock(adapter);
+ return ret;
+}
+
+int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data)
+{
+ int ret = 0;
+
+ if (rom_lock(adapter) != 0) {
+ return -1;
+ }
+ ret = do_rom_fast_write(adapter, addr, data);
+ netxen_rom_unlock(adapter);
+ return ret;
+}
+int netxen_do_rom_se(struct netxen_adapter *adapter, int addr)
+{
+ netxen_rom_wren(adapter);
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE,
+ M25P_INSTR_SE);
+ if (netxen_wait_rom_done(adapter)) {
+ netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+ return -1;
+ }
+ return netxen_rom_wip_poll(adapter);
+}
+
+int netxen_rom_se(struct netxen_adapter *adapter, int addr)
+{
+ int ret = 0;
+ if (rom_lock(adapter) != 0) {
+ return -1;
+ }
+ ret = netxen_do_rom_se(adapter, addr);
+ netxen_rom_unlock(adapter);
+ return ret;
+}
+
+#define NETXEN_BOARDTYPE 0x4008
+#define NETXEN_BOARDNUM 0x400c
+#define NETXEN_CHIPNUM 0x4010
+#define NETXEN_ROMBUS_RESET 0xFFFFFFFF
+#define NETXEN_ROM_FIRST_BARRIER 0x800000000ULL
+#define NETXEN_ROM_FOUND_INIT 0x400
+
+int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
+{
+ int addr, val, status;
+ int n, i;
+ int init_delay = 0;
+ struct crb_addr_pair *buf;
+ unsigned long off;
+
+ /* resetall */
+ status = netxen_nic_get_board_info(adapter);
+ if (status)
+ printk("%s: netxen_pinit_from_rom: Error getting board info\n",
+ netxen_nic_driver_name);
+
+ netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
+ NETXEN_ROMBUS_RESET);
+
+ if (verbose) {
+ int val;
+ if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
+ printk("P2 ROM board type: 0x%08x\n", val);
+ else
+ printk("Could not read board type\n");
+ if (netxen_rom_fast_read(adapter, NETXEN_BOARDNUM, &val) == 0)
+ printk("P2 ROM board num: 0x%08x\n", val);
+ else
+ printk("Could not read board number\n");
+ if (netxen_rom_fast_read(adapter, NETXEN_CHIPNUM, &val) == 0)
+ printk("P2 ROM chip num: 0x%08x\n", val);
+ else
+ printk("Could not read chip number\n");
+ }
+
+ if (netxen_rom_fast_read(adapter, 0, &n) == 0
+ && (n & NETXEN_ROM_FIRST_BARRIER)) {
+ n &= ~NETXEN_ROM_ROUNDUP;
+ if (n < NETXEN_ROM_FOUND_INIT) {
+ if (verbose)
+ printk("%s: %d CRB init values found"
+ " in ROM.\n", netxen_nic_driver_name, n);
+ } else {
+ printk("%s:n=0x%x Error! NetXen card flash not"
+ " initialized.\n", __FUNCTION__, n);
+ return -EIO;
+ }
+ buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ printk("%s: netxen_pinit_from_rom: Unable to calloc "
+ "memory.\n", netxen_nic_driver_name);
+ return -ENOMEM;
+ }
+ for (i = 0; i < n; i++) {
+ if (netxen_rom_fast_read(adapter, 8 * i + 4, &val) != 0
+ || netxen_rom_fast_read(adapter, 8 * i + 8,
+ &addr) != 0)
+ return -EIO;
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+
+ if (verbose)
+ printk("%s: PCI: 0x%08x == 0x%08x\n",
+ netxen_nic_driver_name, (unsigned int)
+ netxen_decode_crb_addr((unsigned long)
+ addr), val);
+ }
+ for (i = 0; i < n; i++) {
+
+ off =
+ netxen_decode_crb_addr((unsigned long)buf[i].addr) +
+ NETXEN_PCI_CRBSPACE;
+ /* skipping cold reboot MAGIC */
+ if (off == NETXEN_CAM_RAM(0x1fc))
+ continue;
+
+ /* After writing this register, HW needs time for CRB */
+ /* to quiet down (else crb_window returns 0xffffffff) */
+ if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
+ init_delay = 1;
+ /* hold xdma in reset also */
+ buf[i].data = NETXEN_NIC_XDMA_RESET;
+ }
+
+ if (ADDR_IN_WINDOW1(off)) {
+ writel(buf[i].data,
+ NETXEN_CRB_NORMALIZE(adapter, off));
+ } else {
+ netxen_nic_pci_change_crbwindow(adapter, 0);
+ writel(buf[i].data,
+ pci_base_offset(adapter, off));
+
+ netxen_nic_pci_change_crbwindow(adapter, 1);
+ }
+ if (init_delay == 1) {
+ ssleep(1);
+ init_delay = 0;
+ }
+ msleep(1);
+ }
+ kfree(buf);
+
+ /* disable_peg_cache_all */
+
+ /* unreset_net_cache */
+ netxen_nic_hw_read_wx(adapter, NETXEN_ROMUSB_GLB_SW_RESET, &val,
+ 4);
+ netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
+ (val & 0xffffff0f));
+ /* p2dn replyCount */
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
+ /* disable_peg_cache 0 */
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_CRB_PEG_NET_D + 0x4c, 8);
+ /* disable_peg_cache 1 */
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_CRB_PEG_NET_I + 0x4c, 8);
+
+ /* peg_clr_all */
+
+ /* peg_clr 0 */
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8,
+ 0);
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc,
+ 0);
+ /* peg_clr 1 */
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8,
+ 0);
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc,
+ 0);
+ /* peg_clr 2 */
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8,
+ 0);
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc,
+ 0);
+ /* peg_clr 3 */
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8,
+ 0);
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc,
+ 0);
+ }
+ return 0;
+}
+
+void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
+{
+ u32 val = 0;
+ int loops = 0;
+
+ if (!pegtune_val) {
+ while (val != PHAN_INITIALIZE_COMPLETE && loops < 200000) {
+ udelay(100);
+ schedule();
+ val =
+ readl(NETXEN_CRB_NORMALIZE
+ (adapter, CRB_CMDPEG_STATE));
+ loops++;
+ }
+ if (val != PHAN_INITIALIZE_COMPLETE)
+ printk("WARNING: Initial boot wait loop failed...\n");
+ }
+}
+
+int netxen_nic_rx_has_work(struct netxen_adapter *adapter)
+{
+ int ctx;
+
+ for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+ struct netxen_recv_context *recv_ctx =
+ &(adapter->recv_ctx[ctx]);
+ u32 consumer;
+ struct status_desc *desc_head;
+ struct status_desc *desc;
+
+ consumer = recv_ctx->status_rx_consumer;
+ desc_head = recv_ctx->rcv_status_desc_head;
+ desc = &desc_head[consumer];
+
+ if (((le16_to_cpu(desc->owner)) & STATUS_OWNER_HOST))
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int netxen_nic_check_temp(struct netxen_adapter *adapter)
+{
+ int port_num;
+ struct netxen_port *port;
+ struct net_device *netdev;
+ uint32_t temp, temp_state, temp_val;
+ int rv = 0;
+
+ temp = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_TEMP_STATE));
+
+ temp_state = nx_get_temp_state(temp);
+ temp_val = nx_get_temp_val(temp);
+
+ if (temp_state == NX_TEMP_PANIC) {
+ printk(KERN_ALERT
+ "%s: Device temperature %d degrees C exceeds"
+ " maximum allowed. Hardware has been shut down.\n",
+ netxen_nic_driver_name, temp_val);
+ for (port_num = 0; port_num < adapter->ahw.max_ports;
+ port_num++) {
+ port = adapter->port[port_num];
+ netdev = port->netdev;
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ rv = 1;
+ } else if (temp_state == NX_TEMP_WARN) {
+ if (adapter->temp == NX_TEMP_NORMAL) {
+ printk(KERN_ALERT
+ "%s: Device temperature %d degrees C "
+ "exceeds operating range."
+ " Immediate action needed.\n",
+ netxen_nic_driver_name, temp_val);
+ }
+ } else {
+ if (adapter->temp == NX_TEMP_WARN) {
+ printk(KERN_INFO
+ "%s: Device temperature is now %d degrees C"
+ " in normal range.\n", netxen_nic_driver_name,
+ temp_val);
+ }
+ }
+ adapter->temp = temp_state;
+ return rv;
+}
+
+void netxen_watchdog_task(unsigned long v)
+{
+ int port_num;
+ struct netxen_port *port;
+ struct net_device *netdev;
+ struct netxen_adapter *adapter = (struct netxen_adapter *)v;
+
+ if (netxen_nic_check_temp(adapter))
+ return;
+
+ for (port_num = 0; port_num < adapter->ahw.max_ports; port_num++) {
+ port = adapter->port[port_num];
+ netdev = port->netdev;
+
+ if ((netif_running(netdev)) && !netif_carrier_ok(netdev)) {
+ printk(KERN_INFO "%s port %d, %s carrier is now ok\n",
+ netxen_nic_driver_name, port_num, netdev->name);
+ netif_carrier_on(netdev);
+ }
+
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+ }
+
+ if (adapter->ops->handle_phy_intr)
+ adapter->ops->handle_phy_intr(adapter);
+ mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
+}
+
+/*
+ * netxen_process_rcv() send the received packet to the protocol stack.
+ * and if the number of receives exceeds RX_BUFFERS_REFILL, then we
+ * invoke the routine to send more rx buffers to the Phantom...
+ */
+void
+netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
+ struct status_desc *desc)
+{
+ struct netxen_port *port = adapter->port[STATUS_DESC_PORT(desc)];
+ struct pci_dev *pdev = port->pdev;
+ struct net_device *netdev = port->netdev;
+ int index = le16_to_cpu(desc->reference_handle);
+ struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
+ struct netxen_rx_buffer *buffer;
+ struct sk_buff *skb;
+ u32 length = le16_to_cpu(desc->total_length);
+ u32 desc_ctx;
+ struct netxen_rcv_desc_ctx *rcv_desc;
+ int ret;
+
+ desc_ctx = STATUS_DESC_TYPE(desc);
+ if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) {
+ printk("%s: %s Bad Rcv descriptor ring\n",
+ netxen_nic_driver_name, netdev->name);
+ return;
+ }
+
+ rcv_desc = &recv_ctx->rcv_desc[desc_ctx];
+ buffer = &rcv_desc->rx_buf_arr[index];
+
+ pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb = (struct sk_buff *)buffer->skb;
+
+ if (likely(STATUS_DESC_STATUS(desc) == STATUS_CKSUM_OK)) {
+ port->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->dev = netdev;
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ ret = netif_receive_skb(skb);
+
+ /*
+ * RH: Do we need these stats on a regular basis. Can we get it from
+ * Linux stats.
+ */
+ switch (ret) {
+ case NET_RX_SUCCESS:
+ port->stats.uphappy++;
+ break;
+
+ case NET_RX_CN_LOW:
+ port->stats.uplcong++;
+ break;
+
+ case NET_RX_CN_MOD:
+ port->stats.upmcong++;
+ break;
+
+ case NET_RX_CN_HIGH:
+ port->stats.uphcong++;
+ break;
+
+ case NET_RX_DROP:
+ port->stats.updropped++;
+ break;
+
+ default:
+ port->stats.updunno++;
+ break;
+ }
+
+ netdev->last_rx = jiffies;
+
+ rcv_desc->rcv_free++;
+ rcv_desc->rcv_pending--;
+
+ /*
+ * We just consumed one buffer so post a buffer.
+ */
+ adapter->stats.post_called++;
+ buffer->skb = NULL;
+ buffer->state = NETXEN_BUFFER_FREE;
+
+ port->stats.no_rcv++;
+ port->stats.rxbytes += length;
+}
+
+/* Process Receive status ring */
+u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
+{
+ struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
+ struct status_desc *desc_head = recv_ctx->rcv_status_desc_head;
+ struct status_desc *desc; /* used to read status desc here */
+ u32 consumer = recv_ctx->status_rx_consumer;
+ int count = 0, ring;
+
+ DPRINTK(INFO, "procesing receive\n");
+ /*
+ * we assume in this case that there is only one port and that is
+ * port #1...changes need to be done in firmware to indicate port
+ * number as part of the descriptor. This way we will be able to get
+ * the netdev which is associated with that device.
+ */
+ while (count < max) {
+ desc = &desc_head[consumer];
+ if (!((le16_to_cpu(desc->owner)) & STATUS_OWNER_HOST)) {
+ DPRINTK(ERR, "desc %p ownedby %x\n", desc, desc->owner);
+ break;
+ }
+ netxen_process_rcv(adapter, ctxid, desc);
+ desc->owner = STATUS_OWNER_PHANTOM;
+ consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1);
+ count++;
+ }
+ if (count) {
+ for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+ netxen_post_rx_buffers(adapter, ctxid, ring);
+ }
+ }
+
+ /* update the consumer index in phantom */
+ if (count) {
+ adapter->stats.process_rcv++;
+ recv_ctx->status_rx_consumer = consumer;
+
+ /* Window = 1 */
+ writel(consumer,
+ NETXEN_CRB_NORMALIZE(adapter,
+ recv_crb_registers[ctxid].
+ crb_rcv_status_consumer));
+ }
+
+ return count;
+}
+
+/* Process Command status ring */
+void netxen_process_cmd_ring(unsigned long data)
+{
+ u32 last_consumer;
+ u32 consumer;
+ struct netxen_adapter *adapter = (struct netxen_adapter *)data;
+ int count = 0;
+ struct netxen_cmd_buffer *buffer;
+ struct netxen_port *port; /* port #1 */
+ struct netxen_port *nport;
+ struct pci_dev *pdev;
+ struct netxen_skb_frag *frag;
+ u32 i;
+ struct sk_buff *skb = NULL;
+ int p;
+
+ spin_lock(&adapter->tx_lock);
+ last_consumer = adapter->last_cmd_consumer;
+ DPRINTK(INFO, "procesing xmit complete\n");
+ /* we assume in this case that there is only one port and that is
+ * port #1...changes need to be done in firmware to indicate port
+ * number as part of the descriptor. This way we will be able to get
+ * the netdev which is associated with that device.
+ */
+ consumer =
+ readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET));
+
+ if (last_consumer == consumer) { /* Ring is empty */
+ DPRINTK(INFO, "last_consumer %d == consumer %d\n",
+ last_consumer, consumer);
+ spin_unlock(&adapter->tx_lock);
+ return;
+ }
+
+ adapter->proc_cmd_buf_counter++;
+ adapter->stats.process_xmit++;
+ /*
+ * Not needed - does not seem to be used anywhere.
+ * adapter->cmd_consumer = consumer;
+ */
+ spin_unlock(&adapter->tx_lock);
+
+ while ((last_consumer != consumer) && (count < MAX_STATUS_HANDLE)) {
+ buffer = &adapter->cmd_buf_arr[last_consumer];
+ port = adapter->port[buffer->port];
+ pdev = port->pdev;
+ frag = &buffer->frag_array[0];
+ skb = buffer->skb;
+ if (skb && (cmpxchg(&buffer->skb, skb, 0) == skb)) {
+ pci_unmap_single(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ for (i = 1; i < buffer->frag_count; i++) {
+ DPRINTK(INFO, "getting fragment no %d\n", i);
+ frag++; /* Get the next frag */
+ pci_unmap_page(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ }
+
+ port->stats.skbfreed++;
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ } else if (adapter->proc_cmd_buf_counter == 1) {
+ port->stats.txnullskb++;
+ }
+ if (unlikely(netif_queue_stopped(port->netdev)
+ && netif_carrier_ok(port->netdev))
+ && ((jiffies - port->netdev->trans_start) >
+ port->netdev->watchdog_timeo)) {
+ schedule_work(&port->adapter->tx_timeout_task);
+ }
+
+ last_consumer = get_next_index(last_consumer,
+ adapter->max_tx_desc_count);
+ count++;
+ }
+ adapter->stats.noxmitdone += count;
+
+ count = 0;
+ spin_lock(&adapter->tx_lock);
+ if ((--adapter->proc_cmd_buf_counter) == 0) {
+ adapter->last_cmd_consumer = last_consumer;
+ while ((adapter->last_cmd_consumer != consumer)
+ && (count < MAX_STATUS_HANDLE)) {
+ buffer =
+ &adapter->cmd_buf_arr[adapter->last_cmd_consumer];
+ count++;
+ if (buffer->skb)
+ break;
+ else
+ adapter->last_cmd_consumer =
+ get_next_index(adapter->last_cmd_consumer,
+ adapter->max_tx_desc_count);
+ }
+ }
+ if (count) {
+ for (p = 0; p < adapter->ahw.max_ports; p++) {
+ nport = adapter->port[p];
+ if (netif_queue_stopped(nport->netdev)
+ && (nport->flags & NETXEN_NETDEV_STATUS)) {
+ netif_wake_queue(nport->netdev);
+ nport->flags &= ~NETXEN_NETDEV_STATUS;
+ }
+ }
+ }
+
+ spin_unlock(&adapter->tx_lock);
+ DPRINTK(INFO, "last consumer is %d in %s\n", last_consumer,
+ __FUNCTION__);
+}
+
+/*
+ * netxen_post_rx_buffers puts buffer in the Phantom memory
+ */
+void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
+{
+ struct pci_dev *pdev = adapter->ahw.pdev;
+ struct sk_buff *skb;
+ struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
+ struct netxen_rcv_desc_ctx *rcv_desc = NULL;
+ struct netxen_recv_crb *crbarea = &recv_crb_registers[ctx];
+ struct netxen_rcv_desc_crb *rcv_desc_crb = NULL;
+ u32 producer;
+ struct rcv_desc *pdesc;
+ struct netxen_rx_buffer *buffer;
+ int count = 0;
+ int index = 0;
+
+ adapter->stats.post_called++;
+ rcv_desc = &recv_ctx->rcv_desc[ringid];
+ rcv_desc_crb = &crbarea->rcv_desc_crb[ringid];
+
+ producer = rcv_desc->producer;
+ index = rcv_desc->begin_alloc;
+ buffer = &rcv_desc->rx_buf_arr[index];
+ /* We can start writing rx descriptors into the phantom memory. */
+ while (buffer->state == NETXEN_BUFFER_FREE) {
+ skb = dev_alloc_skb(rcv_desc->skb_size);
+ if (unlikely(!skb)) {
+ /*
+ * We need to schedule the posting of buffers to the pegs.
+ */
+ rcv_desc->begin_alloc = index;
+ DPRINTK(ERR, "netxen_post_rx_buffers: "
+ " allocated only %d buffers\n", count);
+ break;
+ }
+ count++; /* now there should be no failure */
+ pdesc = &rcv_desc->desc_head[producer];
+ skb_reserve(skb, NET_IP_ALIGN);
+ /*
+ * This will be setup when we receive the
+ * buffer after it has been filled
+ * skb->dev = netdev;
+ */
+ buffer->skb = skb;
+ buffer->state = NETXEN_BUFFER_BUSY;
+ buffer->dma = pci_map_single(pdev, skb->data,
+ rcv_desc->dma_size,
+ PCI_DMA_FROMDEVICE);
+ /* make a rcv descriptor */
+ pdesc->reference_handle = le16_to_cpu(buffer->ref_handle);
+ pdesc->buffer_length = le16_to_cpu(rcv_desc->dma_size);
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ DPRINTK(INFO, "done writing descripter\n");
+ producer =
+ get_next_index(producer, rcv_desc->max_rx_desc_count);
+ index = get_next_index(index, rcv_desc->max_rx_desc_count);
+ buffer = &rcv_desc->rx_buf_arr[index];
+ }
+
+ /* if we did allocate buffers, then write the count to Phantom */
+ if (count) {
+ rcv_desc->begin_alloc = index;
+ rcv_desc->rcv_pending += count;
+ adapter->stats.lastposted = count;
+ adapter->stats.posted += count;
+ rcv_desc->producer = producer;
+ if (rcv_desc->rcv_free >= 32) {
+ rcv_desc->rcv_free = 0;
+ /* Window = 1 */
+ writel((producer - 1) &
+ (rcv_desc->max_rx_desc_count - 1),
+ NETXEN_CRB_NORMALIZE(adapter,
+ rcv_desc_crb->
+ crb_rcv_producer_offset));
+ wmb();
+ }
+ }
+}
+
+int netxen_nic_tx_has_work(struct netxen_adapter *adapter)
+{
+ if (find_diff_among(adapter->last_cmd_consumer,
+ adapter->cmd_producer,
+ adapter->max_tx_desc_count) > 0)
+ return 1;
+
+ return 0;
+}
+
+int
+netxen_nic_fill_statistics(struct netxen_adapter *adapter,
+ struct netxen_port *port,
+ struct netxen_statistics *netxen_stats)
+{
+ void __iomem *addr;
+
+ if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
+ netxen_nic_pci_change_crbwindow(adapter, 0);
+ NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_TX_BYTE_CNT,
+ &(netxen_stats->tx_bytes));
+ NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_TX_FRAME_CNT,
+ &(netxen_stats->tx_packets));
+ NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_RX_BYTE_CNT,
+ &(netxen_stats->rx_bytes));
+ NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_RX_FRAME_CNT,
+ &(netxen_stats->rx_packets));
+ NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_AGGR_ERROR_CNT,
+ &(netxen_stats->rx_errors));
+ NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_CRC_ERROR_CNT,
+ &(netxen_stats->rx_crc_errors));
+ NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR,
+ &(netxen_stats->
+ rx_long_length_error));
+ NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR,
+ &(netxen_stats->
+ rx_short_length_error));
+
+ netxen_nic_pci_change_crbwindow(adapter, 1);
+ } else {
+ spin_lock_bh(&adapter->tx_lock);
+ netxen_stats->tx_bytes = port->stats.txbytes;
+ netxen_stats->tx_packets = port->stats.xmitedframes +
+ port->stats.xmitfinished;
+ netxen_stats->rx_bytes = port->stats.rxbytes;
+ netxen_stats->rx_packets = port->stats.no_rcv;
+ netxen_stats->rx_errors = port->stats.rcvdbadskb;
+ netxen_stats->tx_errors = port->stats.nocmddescriptor;
+ netxen_stats->rx_short_length_error = port->stats.uplcong;
+ netxen_stats->rx_long_length_error = port->stats.uphcong;
+ netxen_stats->rx_crc_errors = 0;
+ netxen_stats->rx_mac_errors = 0;
+ spin_unlock_bh(&adapter->tx_lock);
+ }
+ return 0;
+}
+
+void netxen_nic_clear_stats(struct netxen_adapter *adapter)
+{
+ struct netxen_port *port;
+ int port_num;
+
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+ for (port_num = 0; port_num < adapter->ahw.max_ports; port_num++) {
+ port = adapter->port[port_num];
+ memset(&port->stats, 0, sizeof(port->stats));
+ }
+}
+
+int
+netxen_nic_clear_statistics(struct netxen_adapter *adapter,
+ struct netxen_port *port)
+{
+ int data = 0;
+
+ netxen_nic_pci_change_crbwindow(adapter, 0);
+
+ netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_TX_BYTE_CNT, &data);
+ netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_TX_FRAME_CNT,
+ &data);
+ netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_RX_BYTE_CNT, &data);
+ netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_RX_FRAME_CNT,
+ &data);
+ netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_AGGR_ERROR_CNT,
+ &data);
+ netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_CRC_ERROR_CNT,
+ &data);
+ netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR,
+ &data);
+ netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR,
+ &data);
+
+ netxen_nic_pci_change_crbwindow(adapter, 1);
+ netxen_nic_clear_stats(adapter);
+ return 0;
+}
+
+int
+netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
+ struct netxen_port *port)
+{
+ struct netxen_nic_ioctl_data data;
+ struct netxen_nic_ioctl_data *up_data;
+ int retval = 0;
+ struct netxen_statistics netxen_stats;
+
+ up_data = (void *)u_data;
+
+ DPRINTK(INFO, "doing ioctl for %p\n", adapter);
+ if (copy_from_user(&data, (void __user *)up_data, sizeof(data))) {
+ /* evil user tried to crash the kernel */
+ DPRINTK(ERR, "bad copy from userland: %d\n", (int)sizeof(data));
+ retval = -EFAULT;
+ goto error_out;
+ }
+
+ /* Shouldn't access beyond legal limits of "char u[64];" member */
+ if (!data.ptr && (data.size > sizeof(data.u))) {
+ /* evil user tried to crash the kernel */
+ DPRINTK(ERR, "bad size: %d\n", data.size);
+ retval = -EFAULT;
+ goto error_out;
+ }
+
+ switch (data.cmd) {
+ case netxen_nic_cmd_pci_read:
+ if ((retval = netxen_nic_hw_read_wx(adapter, data.off,
+ &(data.u), data.size)))
+ goto error_out;
+ if (copy_to_user
+ ((void __user *)&(up_data->u), &(data.u), data.size)) {
+ DPRINTK(ERR, "bad copy to userland: %d\n",
+ (int)sizeof(data));
+ retval = -EFAULT;
+ goto error_out;
+ }
+ data.rv = 0;
+ break;
+
+ case netxen_nic_cmd_pci_write:
+ data.rv = netxen_nic_hw_write_wx(adapter, data.off, &(data.u),
+ data.size);
+ break;
+
+ case netxen_nic_cmd_pci_config_read:
+ switch (data.size) {
+ case 1:
+ data.rv = pci_read_config_byte(adapter->ahw.pdev,
+ data.off,
+ (char *)&(data.u));
+ break;
+ case 2:
+ data.rv = pci_read_config_word(adapter->ahw.pdev,
+ data.off,
+ (short *)&(data.u));
+ break;
+ case 4:
+ data.rv = pci_read_config_dword(adapter->ahw.pdev,
+ data.off,
+ (u32 *) & (data.u));
+ break;
+ }
+ if (copy_to_user
+ ((void __user *)&(up_data->u), &(data.u), data.size)) {
+ DPRINTK(ERR, "bad copy to userland: %d\n",
+ (int)sizeof(data));
+ retval = -EFAULT;
+ goto error_out;
+ }
+ break;
+
+ case netxen_nic_cmd_pci_config_write:
+ switch (data.size) {
+ case 1:
+ data.rv = pci_write_config_byte(adapter->ahw.pdev,
+ data.off,
+ *(char *)&(data.u));
+ break;
+ case 2:
+ data.rv = pci_write_config_word(adapter->ahw.pdev,
+ data.off,
+ *(short *)&(data.u));
+ break;
+ case 4:
+ data.rv = pci_write_config_dword(adapter->ahw.pdev,
+ data.off,
+ *(u32 *) & (data.u));
+ break;
+ }
+ break;
+
+ case netxen_nic_cmd_get_stats:
+ data.rv =
+ netxen_nic_fill_statistics(adapter, port, &netxen_stats);
+ if (copy_to_user
+ ((void __user *)(up_data->ptr), (void *)&netxen_stats,
+ sizeof(struct netxen_statistics))) {
+ DPRINTK(ERR, "bad copy to userland: %d\n",
+ (int)sizeof(netxen_stats));
+ retval = -EFAULT;
+ goto error_out;
+ }
+ up_data->rv = data.rv;
+ break;
+
+ case netxen_nic_cmd_clear_stats:
+ data.rv = netxen_nic_clear_statistics(adapter, port);
+ up_data->rv = data.rv;
+ break;
+
+ case netxen_nic_cmd_get_version:
+ if (copy_to_user
+ ((void __user *)&(up_data->u), NETXEN_NIC_LINUX_VERSIONID,
+ sizeof(NETXEN_NIC_LINUX_VERSIONID))) {
+ DPRINTK(ERR, "bad copy to userland: %d\n",
+ (int)sizeof(data));
+ retval = -EFAULT;
+ goto error_out;
+ }
+ break;
+
+ default:
+ DPRINTK(INFO, "bad command %d for %p\n", data.cmd, adapter);
+ retval = -EOPNOTSUPP;
+ goto error_out;
+ }
+ put_user(data.rv, (u16 __user *) (&(up_data->rv)));
+ DPRINTK(INFO, "done ioctl for %p well.\n", adapter);
+
+ error_out:
+ return retval;
+}
diff --git a/drivers/net/netxen/netxen_nic_ioctl.h b/drivers/net/netxen/netxen_nic_ioctl.h
new file mode 100644
index 000000000000..23e53adbf123
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_ioctl.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ * info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ */
+
+#ifndef __NETXEN_NIC_IOCTL_H__
+#define __NETXEN_NIC_IOCTL_H__
+
+#include <linux/sockios.h>
+
+#define NETXEN_CMD_START SIOCDEVPRIVATE
+#define NETXEN_NIC_CMD (NETXEN_CMD_START + 1)
+#define NETXEN_NIC_NAME (NETXEN_CMD_START + 2)
+#define NETXEN_NIC_NAME_LEN 16
+#define NETXEN_NIC_NAME_RSP "NETXEN"
+
+typedef enum {
+ netxen_nic_cmd_none = 0,
+ netxen_nic_cmd_pci_read,
+ netxen_nic_cmd_pci_write,
+ netxen_nic_cmd_pci_mem_read,
+ netxen_nic_cmd_pci_mem_write,
+ netxen_nic_cmd_pci_config_read,
+ netxen_nic_cmd_pci_config_write,
+ netxen_nic_cmd_get_stats,
+ netxen_nic_cmd_clear_stats,
+ netxen_nic_cmd_get_version
+} netxen_nic_ioctl_cmd_t;
+
+struct netxen_nic_ioctl_data {
+ u32 cmd;
+ u32 unused1;
+ u64 off;
+ u32 size;
+ u32 rv;
+ char u[64];
+ void *ptr;
+};
+
+struct netxen_statistics {
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 rx_errors;
+ u64 tx_bytes;
+ u64 tx_errors;
+ u64 rx_crc_errors;
+ u64 rx_short_length_error;
+ u64 rx_long_length_error;
+ u64 rx_mac_errors;
+};
+
+#endif /* __NETXEN_NIC_IOCTL_H_ */
diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c
new file mode 100644
index 000000000000..ae180fee8008
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_isr.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ * info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+#include "netxen_nic_phan_reg.h"
+
+/*
+ * netxen_nic_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ */
+struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
+{
+ struct netxen_port *port = netdev_priv(netdev);
+ struct net_device_stats *stats = &port->net_stats;
+
+ memset(stats, 0, sizeof(*stats));
+
+ /* total packets received */
+ stats->rx_packets = port->stats.no_rcv;
+ /* total packets transmitted */
+ stats->tx_packets = port->stats.xmitedframes + port->stats.xmitfinished;
+ /* total bytes received */
+ stats->rx_bytes = port->stats.rxbytes;
+ /* total bytes transmitted */
+ stats->tx_bytes = port->stats.txbytes;
+ /* bad packets received */
+ stats->rx_errors = port->stats.rcvdbadskb;
+ /* packet transmit problems */
+ stats->tx_errors = port->stats.nocmddescriptor;
+ /* no space in linux buffers */
+ stats->rx_dropped = port->stats.updropped;
+ /* no space available in linux */
+ stats->tx_dropped = port->stats.txdropped;
+
+ return stats;
+}
+
+void netxen_indicate_link_status(struct netxen_adapter *adapter, u32 portno,
+ u32 link)
+{
+ struct netxen_port *pport = adapter->port[portno];
+ struct net_device *netdev = pport->netdev;
+
+ if (link)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
+}
+
+void netxen_handle_port_int(struct netxen_adapter *adapter, u32 portno,
+ u32 enable)
+{
+ __le32 int_src;
+ struct netxen_port *port;
+
+ /* This should clear the interrupt source */
+ if (adapter->ops->phy_read)
+ adapter->ops->phy_read(adapter, portno,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS,
+ &int_src);
+ if (int_src == 0) {
+ DPRINTK(INFO, "No phy interrupts for port #%d\n", portno);
+ return;
+ }
+ if (adapter->ops->disable_phy_interrupts)
+ adapter->ops->disable_phy_interrupts(adapter, portno);
+
+ port = adapter->port[portno];
+
+ if (netxen_get_phy_int_jabber(int_src))
+ DPRINTK(INFO, "NetXen: %s Jabber interrupt \n",
+ port->netdev->name);
+
+ if (netxen_get_phy_int_polarity_changed(int_src))
+ DPRINTK(INFO, "NetXen: %s POLARITY CHANGED int \n",
+ port->netdev->name);
+
+ if (netxen_get_phy_int_energy_detect(int_src))
+ DPRINTK(INFO, "NetXen: %s ENERGY DETECT INT \n",
+ port->netdev->name);
+
+ if (netxen_get_phy_int_downshift(int_src))
+ DPRINTK(INFO, "NetXen: %s DOWNSHIFT INT \n",
+ port->netdev->name);
+ /* write it down later.. */
+ if ((netxen_get_phy_int_speed_changed(int_src))
+ || (netxen_get_phy_int_link_status_changed(int_src))) {
+ __le32 status;
+
+ DPRINTK(INFO, "NetXen: %s SPEED CHANGED OR"
+ " LINK STATUS CHANGED \n", port->netdev->name);
+
+ if (adapter->ops->phy_read
+ && adapter->ops->phy_read(adapter, portno,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ &status) == 0) {
+ if (netxen_get_phy_int_link_status_changed(int_src)) {
+ if (netxen_get_phy_link(status)) {
+ netxen_niu_gbe_init_port(adapter,
+ portno);
+ printk("%s: %s Link UP\n",
+ netxen_nic_driver_name,
+ port->netdev->name);
+
+ } else {
+ printk("%s: %s Link DOWN\n",
+ netxen_nic_driver_name,
+ port->netdev->name);
+ }
+ netxen_indicate_link_status(adapter, portno,
+ netxen_get_phy_link
+ (status));
+ }
+ }
+ }
+ if (adapter->ops->enable_phy_interrupts)
+ adapter->ops->enable_phy_interrupts(adapter, portno);
+}
+
+void netxen_nic_isr_other(struct netxen_adapter *adapter)
+{
+ u32 portno;
+ u32 val, linkup, qg_linksup;
+
+ /* verify the offset */
+ val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
+ if (val == adapter->ahw.qg_linksup)
+ return;
+
+ qg_linksup = adapter->ahw.qg_linksup;
+ adapter->ahw.qg_linksup = val;
+ DPRINTK(1, INFO, "%s: link update 0x%08x\n", netxen_nic_driver_name,
+ val);
+ for (portno = 0; portno < NETXEN_NIU_MAX_GBE_PORTS; portno++) {
+ linkup = val & 1;
+ if (linkup != (qg_linksup & 1)) {
+ printk(KERN_INFO "%s: PORT %d link %s\n",
+ netxen_nic_driver_name, portno,
+ ((linkup == 0) ? "down" : "up"));
+ netxen_indicate_link_status(adapter, portno, linkup);
+ if (linkup)
+ netxen_nic_set_link_parameters(adapter->
+ port[portno]);
+
+ }
+ val = val >> 1;
+ qg_linksup = qg_linksup >> 1;
+ }
+
+ adapter->stats.otherints++;
+
+}
+
+void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter)
+{
+ netxen_nic_isr_other(adapter);
+}
+
+void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->port[0]->netdev;
+ u32 val;
+
+ /* WINDOW = 1 */
+ val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
+
+ if (adapter->ahw.xg_linkup == 1 && val != XG_LINK_UP) {
+ printk(KERN_INFO "%s: %s NIC Link is down\n",
+ netxen_nic_driver_name, netdev->name);
+ adapter->ahw.xg_linkup = 0;
+ /* read twice to clear sticky bits */
+ /* WINDOW = 0 */
+ netxen_nic_read_w0(adapter, NETXEN_NIU_XG_STATUS, &val);
+ netxen_nic_read_w0(adapter, NETXEN_NIU_XG_STATUS, &val);
+
+ if ((val & 0xffb) != 0xffb) {
+ printk(KERN_INFO "%s ISR: Sync/Align BAD: 0x%08x\n",
+ netxen_nic_driver_name, val);
+ }
+ } else if (adapter->ahw.xg_linkup == 0 && val == XG_LINK_UP) {
+ printk(KERN_INFO "%s: %s NIC Link is up\n",
+ netxen_nic_driver_name, netdev->name);
+ adapter->ahw.xg_linkup = 1;
+ }
+}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
new file mode 100644
index 000000000000..1cb662d5bd76
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -0,0 +1,1161 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ * info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ *
+ * Main source file for NetXen NIC Driver on Linux
+ *
+ */
+
+#include <linux/vmalloc.h>
+#include "netxen_nic_hw.h"
+
+#include "netxen_nic.h"
+#define DEFINE_GLOBAL_RECV_CRB
+#include "netxen_nic_phan_reg.h"
+#include "netxen_nic_ioctl.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+
+#define PHAN_VENDOR_ID 0x4040
+
+MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
+
+char netxen_nic_driver_name[] = "netxen";
+static char netxen_nic_driver_string[] = "NetXen Network Driver version "
+ NETXEN_NIC_LINUX_VERSIONID;
+
+#define NETXEN_NETDEV_WEIGHT 120
+#define NETXEN_ADAPTER_UP_MAGIC 777
+#define NETXEN_NIC_PEG_TUNE 0
+
+/* Local functions to NetXen NIC driver */
+static int __devinit netxen_nic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void __devexit netxen_nic_remove(struct pci_dev *pdev);
+static int netxen_nic_open(struct net_device *netdev);
+static int netxen_nic_close(struct net_device *netdev);
+static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
+static void netxen_tx_timeout(struct net_device *netdev);
+static void netxen_tx_timeout_task(struct net_device *netdev);
+static void netxen_watchdog(unsigned long);
+static int netxen_handle_int(struct netxen_adapter *, struct net_device *);
+static int netxen_nic_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd);
+static int netxen_nic_poll(struct net_device *dev, int *budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netxen_nic_poll_controller(struct net_device *netdev);
+#endif
+static irqreturn_t netxen_intr(int irq, void *data);
+
+/* PCI Device ID Table */
+static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
+ {PCI_DEVICE(0x4040, 0x0001)},
+ {PCI_DEVICE(0x4040, 0x0002)},
+ {PCI_DEVICE(0x4040, 0x0003)},
+ {PCI_DEVICE(0x4040, 0x0004)},
+ {PCI_DEVICE(0x4040, 0x0005)},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
+
+/*
+ * netxen_nic_probe()
+ *
+ * The Linux system will invoke this after identifying the vendor ID and
+ * device Id in the pci_tbl supported by this module.
+ *
+ * A quad port card has one operational PCI config space, (function 0),
+ * which is used to access all four ports.
+ *
+ * This routine will initialize the adapter, and setup the global parameters
+ * along with the port's specific structure.
+ */
+static int __devinit
+netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct netxen_adapter *adapter = NULL;
+ struct netxen_port *port = NULL;
+ u8 *mem_ptr0 = NULL;
+ u8 *mem_ptr1 = NULL;
+ u8 *mem_ptr2 = NULL;
+
+ unsigned long mem_base, mem_len;
+ int pci_using_dac, i, err;
+ int ring;
+ struct netxen_recv_context *recv_ctx = NULL;
+ struct netxen_rcv_desc_ctx *rcv_desc = NULL;
+ struct netxen_cmd_buffer *cmd_buf_arr = NULL;
+ u64 mac_addr[FLASH_NUM_PORTS + 1];
+ int valid_mac;
+
+ printk(KERN_INFO "%s \n", netxen_nic_driver_string);
+ if ((err = pci_enable_device(pdev)))
+ return err;
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
+ goto err_out_disable_pdev;
+
+ pci_set_master(pdev);
+ if ((pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) &&
+ (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) == 0))
+ pci_using_dac = 1;
+ else {
+ if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
+ (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)))
+ goto err_out_free_res;
+
+ pci_using_dac = 0;
+ }
+
+ /* remap phys address */
+ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
+ mem_len = pci_resource_len(pdev, 0);
+
+ /* 128 Meg of memory */
+ mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
+ mem_ptr1 =
+ ioremap(mem_base + SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_SIZE);
+ mem_ptr2 =
+ ioremap(mem_base + THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
+
+ if ((mem_ptr0 == 0UL) || (mem_ptr1 == 0UL) || (mem_ptr2 == 0UL)) {
+ DPRINTK(1, ERR,
+ "Cannot remap adapter memory aborting.:"
+ "0 -> %p, 1 -> %p, 2 -> %p\n",
+ mem_ptr0, mem_ptr1, mem_ptr2);
+
+ err = -EIO;
+ if (mem_ptr0)
+ iounmap(mem_ptr0);
+ if (mem_ptr1)
+ iounmap(mem_ptr1);
+ if (mem_ptr2)
+ iounmap(mem_ptr2);
+
+ goto err_out_free_res;
+ }
+
+/*
+ * Allocate a adapter structure which will manage all the initialization
+ * as well as the common resources for all ports...
+ * all the ports will have pointer to this adapter as well as Adapter
+ * will have pointers of all the ports structures.
+ */
+
+ /* One adapter structure for all 4 ports.... */
+ adapter = kzalloc(sizeof(struct netxen_adapter), GFP_KERNEL);
+ if (adapter == NULL) {
+ printk(KERN_ERR "%s: Could not allocate adapter memory:%d\n",
+ netxen_nic_driver_name,
+ (int)sizeof(struct netxen_adapter));
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+
+ adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS;
+ adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS;
+ adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS;
+
+ pci_set_drvdata(pdev, adapter);
+
+ cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
+ if (cmd_buf_arr == NULL) {
+ err = -ENOMEM;
+ goto err_out_free_adapter;
+ }
+ memset(cmd_buf_arr, 0, TX_RINGSIZE);
+
+ for (i = 0; i < MAX_RCV_CTX; ++i) {
+ recv_ctx = &adapter->recv_ctx[i];
+ for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+ rcv_desc = &recv_ctx->rcv_desc[ring];
+ switch (RCV_DESC_TYPE(ring)) {
+ case RCV_DESC_NORMAL:
+ rcv_desc->max_rx_desc_count =
+ adapter->max_rx_desc_count;
+ rcv_desc->flags = RCV_DESC_NORMAL;
+ rcv_desc->dma_size = RX_DMA_MAP_LEN;
+ rcv_desc->skb_size = MAX_RX_BUFFER_LENGTH;
+ break;
+
+ case RCV_DESC_JUMBO:
+ rcv_desc->max_rx_desc_count =
+ adapter->max_jumbo_rx_desc_count;
+ rcv_desc->flags = RCV_DESC_JUMBO;
+ rcv_desc->dma_size = RX_JUMBO_DMA_MAP_LEN;
+ rcv_desc->skb_size = MAX_RX_JUMBO_BUFFER_LENGTH;
+ break;
+
+ }
+ rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *)
+ vmalloc(RCV_BUFFSIZE);
+
+ if (rcv_desc->rx_buf_arr == NULL) {
+ err = -ENOMEM;
+ goto err_out_free_rx_buffer;
+ }
+ memset(rcv_desc->rx_buf_arr, 0, RCV_BUFFSIZE);
+ }
+
+ }
+
+ adapter->ops = kzalloc(sizeof(struct netxen_drvops), GFP_KERNEL);
+ if (adapter->ops == NULL) {
+ printk(KERN_ERR
+ "%s: Could not allocate memory for adapter->ops:%d\n",
+ netxen_nic_driver_name,
+ (int)sizeof(struct netxen_adapter));
+ err = -ENOMEM;
+ goto err_out_free_rx_buffer;
+ }
+
+ adapter->cmd_buf_arr = cmd_buf_arr;
+ adapter->ahw.pci_base0 = mem_ptr0;
+ adapter->ahw.pci_base1 = mem_ptr1;
+ adapter->ahw.pci_base2 = mem_ptr2;
+ spin_lock_init(&adapter->tx_lock);
+ spin_lock_init(&adapter->lock);
+#ifdef CONFIG_IA64
+ netxen_pinit_from_rom(adapter, 0);
+ udelay(500);
+ netxen_load_firmware(adapter);
+#endif
+
+ /* initialize the buffers in adapter */
+ netxen_initialize_adapter_sw(adapter);
+ /*
+ * Set the CRB window to invalid. If any register in window 0 is
+ * accessed it should set the window to 0 and then reset it to 1.
+ */
+ adapter->curr_window = 255;
+ /*
+ * Adapter in our case is quad port so initialize it before
+ * initializing the ports
+ */
+ netxen_initialize_adapter_hw(adapter); /* initialize the adapter */
+
+ netxen_initialize_adapter_ops(adapter);
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->ahw.xg_linkup = 0;
+ adapter->watchdog_timer.function = &netxen_watchdog;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+ INIT_WORK(&adapter->watchdog_task,
+ (void (*)(void *))netxen_watchdog_task, adapter);
+ adapter->ahw.pdev = pdev;
+ adapter->proc_cmd_buf_counter = 0;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id);
+
+ if (pci_enable_msi(pdev)) {
+ adapter->flags &= ~NETXEN_NIC_MSI_ENABLED;
+ printk(KERN_WARNING "%s: unable to allocate MSI interrupt"
+ " error\n", netxen_nic_driver_name);
+ } else
+ adapter->flags |= NETXEN_NIC_MSI_ENABLED;
+
+ if (netxen_is_flash_supported(adapter) == 0 &&
+ netxen_get_flash_mac_addr(adapter, mac_addr) == 0)
+ valid_mac = 1;
+ else
+ valid_mac = 0;
+
+ /*
+ * Initialize all the CRB registers here.
+ */
+ writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET));
+ writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET));
+ writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO));
+
+ /* Unlock the HW, prompting the boot sequence */
+ writel(1,
+ NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE));
+
+ /* Handshake with the card before we register the devices. */
+ netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
+
+ /* initialize the all the ports */
+
+ for (i = 0; i < adapter->ahw.max_ports; i++) {
+ netdev = alloc_etherdev(sizeof(struct netxen_port));
+ if (!netdev) {
+ printk(KERN_ERR "%s: could not allocate netdev for port"
+ " %d\n", netxen_nic_driver_name, i + 1);
+ goto err_out_free_dev;
+ }
+
+ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ port = netdev_priv(netdev);
+ port->netdev = netdev;
+ port->pdev = pdev;
+ port->adapter = adapter;
+ port->portnum = i; /* Gigabit port number from 0-3 */
+
+ netdev->open = netxen_nic_open;
+ netdev->stop = netxen_nic_close;
+ netdev->hard_start_xmit = netxen_nic_xmit_frame;
+ netdev->get_stats = netxen_nic_get_stats;
+ netdev->set_multicast_list = netxen_nic_set_multi;
+ netdev->set_mac_address = netxen_nic_set_mac;
+ netdev->change_mtu = netxen_nic_change_mtu;
+ netdev->do_ioctl = netxen_nic_ioctl;
+ netdev->tx_timeout = netxen_tx_timeout;
+ netdev->watchdog_timeo = HZ;
+
+ SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
+ netdev->poll = netxen_nic_poll;
+ netdev->weight = NETXEN_NETDEV_WEIGHT;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = netxen_nic_poll_controller;
+#endif
+ /* ScatterGather support */
+ netdev->features = NETIF_F_SG;
+ netdev->features |= NETIF_F_IP_CSUM;
+ netdev->features |= NETIF_F_TSO;
+
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ if (valid_mac) {
+ unsigned char *p = (unsigned char *)&mac_addr[i];
+ netdev->dev_addr[0] = *(p + 5);
+ netdev->dev_addr[1] = *(p + 4);
+ netdev->dev_addr[2] = *(p + 3);
+ netdev->dev_addr[3] = *(p + 2);
+ netdev->dev_addr[4] = *(p + 1);
+ netdev->dev_addr[5] = *(p + 0);
+
+ memcpy(netdev->perm_addr, netdev->dev_addr,
+ netdev->addr_len);
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+ printk(KERN_ERR "%s: Bad MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x.\n",
+ netxen_nic_driver_name,
+ netdev->dev_addr[0],
+ netdev->dev_addr[1],
+ netdev->dev_addr[2],
+ netdev->dev_addr[3],
+ netdev->dev_addr[4],
+ netdev->dev_addr[5]);
+ } else {
+ if (adapter->ops->macaddr_set)
+ adapter->ops->macaddr_set(port,
+ netdev->
+ dev_addr);
+ }
+ }
+ INIT_WORK(&adapter->tx_timeout_task,
+ (void (*)(void *))netxen_tx_timeout_task, netdev);
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ if ((err = register_netdev(netdev))) {
+ printk(KERN_ERR "%s: register_netdev failed port #%d"
+ " aborting\n", netxen_nic_driver_name, i + 1);
+ err = -EIO;
+ free_netdev(netdev);
+ goto err_out_free_dev;
+ }
+ adapter->port_count++;
+ adapter->active_ports = 0;
+ adapter->port[i] = port;
+ }
+
+ /*
+ * delay a while to ensure that the Pegs are up & running.
+ * Otherwise, we might see some flaky behaviour.
+ */
+ udelay(100);
+
+ switch (adapter->ahw.board_type) {
+ case NETXEN_NIC_GBE:
+ printk("%s: QUAD GbE board initialized\n",
+ netxen_nic_driver_name);
+ break;
+
+ case NETXEN_NIC_XGBE:
+ printk("%s: XGbE board initialized\n", netxen_nic_driver_name);
+ break;
+ }
+
+ adapter->driver_mismatch = 0;
+
+ return 0;
+
+ err_out_free_dev:
+ if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
+ pci_disable_msi(pdev);
+ for (i = 0; i < adapter->port_count; i++) {
+ port = adapter->port[i];
+ if ((port) && (port->netdev)) {
+ unregister_netdev(port->netdev);
+ free_netdev(port->netdev);
+ }
+ }
+ kfree(adapter->ops);
+
+ err_out_free_rx_buffer:
+ for (i = 0; i < MAX_RCV_CTX; ++i) {
+ recv_ctx = &adapter->recv_ctx[i];
+ for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+ rcv_desc = &recv_ctx->rcv_desc[ring];
+ if (rcv_desc->rx_buf_arr != NULL) {
+ vfree(rcv_desc->rx_buf_arr);
+ rcv_desc->rx_buf_arr = NULL;
+ }
+ }
+ }
+
+ vfree(cmd_buf_arr);
+
+ kfree(adapter->port);
+
+ err_out_free_adapter:
+ pci_set_drvdata(pdev, NULL);
+ kfree(adapter);
+
+ err_out_iounmap:
+ iounmap(mem_ptr0);
+ iounmap(mem_ptr1);
+ iounmap(mem_ptr2);
+
+ err_out_free_res:
+ pci_release_regions(pdev);
+ err_out_disable_pdev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void __devexit netxen_nic_remove(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter;
+ struct netxen_port *port;
+ struct netxen_rx_buffer *buffer;
+ struct netxen_recv_context *recv_ctx;
+ struct netxen_rcv_desc_ctx *rcv_desc;
+ int i;
+ int ctxid, ring;
+
+ adapter = pci_get_drvdata(pdev);
+ if (adapter == NULL)
+ return;
+
+ netxen_nic_stop_all_ports(adapter);
+ /* leave the hw in the same state as reboot */
+ netxen_pinit_from_rom(adapter, 0);
+ udelay(500);
+ netxen_load_firmware(adapter);
+
+ if ((adapter->flags & NETXEN_NIC_MSI_ENABLED))
+ netxen_nic_disable_int(adapter);
+
+ udelay(500); /* Delay for a while to drain the DMA engines */
+ for (i = 0; i < adapter->port_count; i++) {
+ port = adapter->port[i];
+ if ((port) && (port->netdev)) {
+ unregister_netdev(port->netdev);
+ free_netdev(port->netdev);
+ }
+ }
+
+ if ((adapter->flags & NETXEN_NIC_MSI_ENABLED))
+ pci_disable_msi(pdev);
+ pci_set_drvdata(pdev, NULL);
+ if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
+ netxen_free_hw_resources(adapter);
+
+ iounmap(adapter->ahw.pci_base0);
+ iounmap(adapter->ahw.pci_base1);
+ iounmap(adapter->ahw.pci_base2);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
+ recv_ctx = &adapter->recv_ctx[ctxid];
+ for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+ rcv_desc = &recv_ctx->rcv_desc[ring];
+ for (i = 0; i < rcv_desc->max_rx_desc_count; ++i) {
+ buffer = &(rcv_desc->rx_buf_arr[i]);
+ if (buffer->state == NETXEN_BUFFER_FREE)
+ continue;
+ pci_unmap_single(pdev, buffer->dma,
+ rcv_desc->dma_size,
+ PCI_DMA_FROMDEVICE);
+ if (buffer->skb != NULL)
+ dev_kfree_skb_any(buffer->skb);
+ }
+ vfree(rcv_desc->rx_buf_arr);
+ }
+ }
+
+ vfree(adapter->cmd_buf_arr);
+ kfree(adapter->ops);
+ kfree(adapter);
+}
+
+/*
+ * Called when a network interface is made active
+ * @returns 0 on success, negative value on failure
+ */
+static int netxen_nic_open(struct net_device *netdev)
+{
+ struct netxen_port *port = netdev_priv(netdev);
+ struct netxen_adapter *adapter = port->adapter;
+ int err = 0;
+ int ctx, ring;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) {
+ err = netxen_init_firmware(adapter);
+ if (err != 0) {
+ printk(KERN_ERR "Failed to init firmware\n");
+ return -EIO;
+ }
+ netxen_nic_flash_print(adapter);
+
+ /* setup all the resources for the Phantom... */
+ /* this include the descriptors for rcv, tx, and status */
+ netxen_nic_clear_stats(adapter);
+ err = netxen_nic_hw_resources(adapter);
+ if (err) {
+ printk(KERN_ERR "Error in setting hw resources:%d\n",
+ err);
+ return err;
+ }
+ if (adapter->ops->init_port
+ && adapter->ops->init_port(adapter, port->portnum) != 0) {
+ printk(KERN_ERR "%s: Failed to initialize port %d\n",
+ netxen_nic_driver_name, port->portnum);
+ netxen_free_hw_resources(adapter);
+ return -EIO;
+ }
+ if (adapter->ops->init_niu)
+ adapter->ops->init_niu(adapter);
+ for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+ for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++)
+ netxen_post_rx_buffers(adapter, ctx, ring);
+ }
+ adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
+ }
+ adapter->active_ports++;
+ if (adapter->active_ports == 1) {
+ err = request_irq(adapter->ahw.pdev->irq, &netxen_intr,
+ SA_SHIRQ | SA_SAMPLE_RANDOM, netdev->name,
+ adapter);
+ if (err) {
+ printk(KERN_ERR "request_irq failed with: %d\n", err);
+ adapter->active_ports--;
+ return err;
+ }
+ adapter->irq = adapter->ahw.pdev->irq;
+ if (!adapter->driver_mismatch)
+ mod_timer(&adapter->watchdog_timer, jiffies);
+
+ netxen_nic_enable_int(adapter);
+ }
+
+ /* Done here again so that even if phantom sw overwrote it,
+ * we set it */
+ if (adapter->ops->macaddr_set)
+ adapter->ops->macaddr_set(port, netdev->dev_addr);
+ netxen_nic_set_link_parameters(port);
+
+ netxen_nic_set_multi(netdev);
+ if (!adapter->driver_mismatch)
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+/*
+ * netxen_nic_close - Disables a network interface entry point
+ */
+static int netxen_nic_close(struct net_device *netdev)
+{
+ struct netxen_port *port = netdev_priv(netdev);
+ struct netxen_adapter *adapter = port->adapter;
+ int i, j;
+ struct netxen_cmd_buffer *cmd_buff;
+ struct netxen_skb_frag *buffrag;
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ adapter->active_ports--;
+
+ if (!adapter->active_ports) {
+ netxen_nic_disable_int(adapter);
+ if (adapter->irq)
+ free_irq(adapter->irq, adapter);
+ cmd_buff = adapter->cmd_buf_arr;
+ for (i = 0; i < adapter->max_tx_desc_count; i++) {
+ buffrag = cmd_buff->frag_array;
+ if (buffrag->dma) {
+ pci_unmap_single(port->pdev, buffrag->dma,
+ buffrag->length,
+ PCI_DMA_TODEVICE);
+ buffrag->dma = (u64) NULL;
+ }
+ for (j = 0; j < cmd_buff->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(port->pdev,
+ buffrag->dma,
+ buffrag->length,
+ PCI_DMA_TODEVICE);
+ buffrag->dma = (u64) NULL;
+ }
+ }
+ /* Free the skb we received in netxen_nic_xmit_frame */
+ if (cmd_buff->skb) {
+ dev_kfree_skb_any(cmd_buff->skb);
+ cmd_buff->skb = NULL;
+ }
+ cmd_buff++;
+ }
+ del_timer_sync(&adapter->watchdog_timer);
+ }
+
+ return 0;
+}
+
+static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct netxen_port *port = netdev_priv(netdev);
+ struct netxen_adapter *adapter = port->adapter;
+ struct netxen_hardware_context *hw = &adapter->ahw;
+ unsigned int first_seg_len = skb->len - skb->data_len;
+ struct netxen_skb_frag *buffrag;
+ unsigned int i;
+
+ u32 producer = 0;
+ u32 saved_producer = 0;
+ struct cmd_desc_type0 *hwdesc;
+ int k;
+ struct netxen_cmd_buffer *pbuf = NULL;
+ unsigned int tries = 0;
+ static int dropped_packet = 0;
+ int frag_count;
+ u32 local_producer = 0;
+ u32 max_tx_desc_count = 0;
+ u32 last_cmd_consumer = 0;
+ int no_of_desc;
+
+ port->stats.xmitcalled++;
+ frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+ if (unlikely(skb->len <= 0)) {
+ dev_kfree_skb_any(skb);
+ port->stats.badskblen++;
+ return NETDEV_TX_OK;
+ }
+
+ if (frag_count > MAX_BUFFERS_PER_CMD) {
+ printk("%s: %s netxen_nic_xmit_frame: frag_count (%d)"
+ "too large, can handle only %d frags\n",
+ netxen_nic_driver_name, netdev->name,
+ frag_count, MAX_BUFFERS_PER_CMD);
+ port->stats.txdropped++;
+ if ((++dropped_packet & 0xff) == 0xff)
+ printk("%s: %s droppped packets = %d\n",
+ netxen_nic_driver_name, netdev->name,
+ dropped_packet);
+
+ return NETDEV_TX_OK;
+ }
+
+ /*
+ * Everything is set up. Now, we just need to transmit it out.
+ * Note that we have to copy the contents of buffer over to
+ * right place. Later on, this can be optimized out by de-coupling the
+ * producer index from the buffer index.
+ */
+ retry_getting_window:
+ spin_lock_bh(&adapter->tx_lock);
+ if (adapter->total_threads == MAX_XMIT_PRODUCERS) {
+ spin_unlock_bh(&adapter->tx_lock);
+ /*
+ * Yield CPU
+ */
+ if (!in_atomic())
+ schedule();
+ else {
+ for (i = 0; i < 20; i++)
+ cpu_relax(); /*This a nop instr on i386 */
+ }
+ goto retry_getting_window;
+ }
+ local_producer = adapter->cmd_producer;
+ /* There 4 fragments per descriptor */
+ no_of_desc = (frag_count + 3) >> 2;
+ if (netdev->features & NETIF_F_TSO) {
+ if (skb_shinfo(skb)->gso_size > 0) {
+
+ no_of_desc++;
+ if (((skb->nh.iph)->ihl * sizeof(u32)) +
+ ((skb->h.th)->doff * sizeof(u32)) +
+ sizeof(struct ethhdr) >
+ (sizeof(struct cmd_desc_type0) - NET_IP_ALIGN)) {
+ no_of_desc++;
+ }
+ }
+ }
+ k = adapter->cmd_producer;
+ max_tx_desc_count = adapter->max_tx_desc_count;
+ last_cmd_consumer = adapter->last_cmd_consumer;
+ if ((k + no_of_desc) >=
+ ((last_cmd_consumer <= k) ? last_cmd_consumer + max_tx_desc_count :
+ last_cmd_consumer)) {
+ spin_unlock_bh(&adapter->tx_lock);
+ if (tries == 0) {
+ local_bh_disable();
+ netxen_process_cmd_ring((unsigned long)adapter);
+ local_bh_enable();
+ ++tries;
+ goto retry_getting_window;
+ } else {
+ port->stats.nocmddescriptor++;
+ DPRINTK(ERR, "No command descriptors available,"
+ " producer = %d, consumer = %d count=%llu,"
+ " dropping packet\n", producer,
+ adapter->last_cmd_consumer,
+ port->stats.nocmddescriptor);
+
+ spin_lock_bh(&adapter->tx_lock);
+ netif_stop_queue(netdev);
+ port->flags |= NETXEN_NETDEV_STATUS;
+ spin_unlock_bh(&adapter->tx_lock);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ k = get_index_range(k, max_tx_desc_count, no_of_desc);
+ adapter->cmd_producer = k;
+ adapter->total_threads++;
+ adapter->num_threads++;
+
+ spin_unlock_bh(&adapter->tx_lock);
+ /* Copy the descriptors into the hardware */
+ producer = local_producer;
+ saved_producer = producer;
+ hwdesc = &hw->cmd_desc_head[producer];
+ memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
+ /* Take skb->data itself */
+ pbuf = &adapter->cmd_buf_arr[producer];
+ if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) {
+ pbuf->mss = skb_shinfo(skb)->gso_size;
+ hwdesc->mss = skb_shinfo(skb)->gso_size;
+ } else {
+ pbuf->mss = 0;
+ hwdesc->mss = 0;
+ }
+ pbuf->no_of_descriptors = no_of_desc;
+ pbuf->total_length = skb->len;
+ pbuf->skb = skb;
+ pbuf->cmd = TX_ETHER_PKT;
+ pbuf->frag_count = frag_count;
+ pbuf->port = port->portnum;
+ buffrag = &pbuf->frag_array[0];
+ buffrag->dma = pci_map_single(port->pdev, skb->data, first_seg_len,
+ PCI_DMA_TODEVICE);
+ buffrag->length = first_seg_len;
+ CMD_DESC_TOTAL_LENGTH_WRT(hwdesc, skb->len);
+ hwdesc->num_of_buffers = frag_count;
+ hwdesc->opcode = TX_ETHER_PKT;
+
+ CMD_DESC_PORT_WRT(hwdesc, port->portnum);
+ hwdesc->buffer1_length = cpu_to_le16(first_seg_len);
+ hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+
+ for (i = 1, k = 1; i < frag_count; i++, k++) {
+ struct skb_frag_struct *frag;
+ int len, temp_len;
+ unsigned long offset;
+ dma_addr_t temp_dma;
+
+ /* move to next desc. if there is a need */
+ if ((i & 0x3) == 0) {
+ k = 0;
+ producer = get_next_index(producer,
+ adapter->max_tx_desc_count);
+ hwdesc = &hw->cmd_desc_head[producer];
+ memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
+ }
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ len = frag->size;
+ offset = frag->page_offset;
+
+ temp_len = len;
+ temp_dma = pci_map_page(port->pdev, frag->page, offset,
+ len, PCI_DMA_TODEVICE);
+
+ buffrag++;
+ buffrag->dma = temp_dma;
+ buffrag->length = temp_len;
+
+ DPRINTK(INFO, "for loop. i=%d k=%d\n", i, k);
+ switch (k) {
+ case 0:
+ hwdesc->buffer1_length = cpu_to_le16(temp_len);
+ hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
+ break;
+ case 1:
+ hwdesc->buffer2_length = cpu_to_le16(temp_len);
+ hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
+ break;
+ case 2:
+ hwdesc->buffer3_length = cpu_to_le16(temp_len);
+ hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
+ break;
+ case 3:
+ hwdesc->buffer4_length = temp_len;
+ hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
+ break;
+ }
+ frag++;
+ }
+ producer = get_next_index(producer, adapter->max_tx_desc_count);
+
+ /* might change opcode to TX_TCP_LSO */
+ netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb);
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring
+ */
+ if (hw->cmd_desc_head[saved_producer].opcode == TX_TCP_LSO) {
+ int hdr_len, first_hdr_len, more_hdr;
+ hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length;
+ if (hdr_len > (sizeof(struct cmd_desc_type0) - NET_IP_ALIGN)) {
+ first_hdr_len =
+ sizeof(struct cmd_desc_type0) - NET_IP_ALIGN;
+ more_hdr = 1;
+ } else {
+ first_hdr_len = hdr_len;
+ more_hdr = 0;
+ }
+ /* copy the MAC/IP/TCP headers to the cmd descriptor list */
+ hwdesc = &hw->cmd_desc_head[producer];
+
+ /* copy the first 64 bytes */
+ memcpy(((void *)hwdesc) + NET_IP_ALIGN,
+ (void *)(skb->data), first_hdr_len);
+ producer = get_next_index(producer, max_tx_desc_count);
+
+ if (more_hdr) {
+ hwdesc = &hw->cmd_desc_head[producer];
+ /* copy the next 64 bytes - should be enough except
+ * for pathological case
+ */
+ memcpy((void *)hwdesc, (void *)(skb->data) +
+ first_hdr_len, hdr_len - first_hdr_len);
+ producer = get_next_index(producer, max_tx_desc_count);
+ }
+ }
+ spin_lock_bh(&adapter->tx_lock);
+ port->stats.txbytes +=
+ CMD_DESC_TOTAL_LENGTH(&hw->cmd_desc_head[saved_producer]);
+ /* Code to update the adapter considering how many producer threads
+ are currently working */
+ if ((--adapter->num_threads) == 0) {
+ /* This is the last thread */
+ u32 crb_producer = adapter->cmd_producer;
+ writel(crb_producer,
+ NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET));
+ wmb();
+ adapter->total_threads = 0;
+ } else {
+ u32 crb_producer = 0;
+ crb_producer =
+ readl(NETXEN_CRB_NORMALIZE
+ (adapter, CRB_CMD_PRODUCER_OFFSET));
+ if (crb_producer == local_producer) {
+ crb_producer = get_index_range(crb_producer,
+ max_tx_desc_count,
+ no_of_desc);
+ writel(crb_producer,
+ NETXEN_CRB_NORMALIZE(adapter,
+ CRB_CMD_PRODUCER_OFFSET));
+ wmb();
+ }
+ }
+
+ port->stats.xmitfinished++;
+ spin_unlock_bh(&adapter->tx_lock);
+
+ netdev->trans_start = jiffies;
+
+ DPRINTK(INFO, "wrote CMD producer %x to phantom\n", producer);
+
+ DPRINTK(INFO, "Done. Send\n");
+ return NETDEV_TX_OK;
+}
+
+static void netxen_watchdog(unsigned long v)
+{
+ struct netxen_adapter *adapter = (struct netxen_adapter *)v;
+ schedule_work(&adapter->watchdog_task);
+}
+
+static void netxen_tx_timeout(struct net_device *netdev)
+{
+ struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
+ struct netxen_adapter *adapter = port->adapter;
+
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+static void netxen_tx_timeout_task(struct net_device *netdev)
+{
+ struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
+ unsigned long flags;
+
+ printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
+ netxen_nic_driver_name, netdev->name);
+
+ spin_lock_irqsave(&port->adapter->lock, flags);
+ netxen_nic_close(netdev);
+ netxen_nic_open(netdev);
+ spin_unlock_irqrestore(&port->adapter->lock, flags);
+ netdev->trans_start = jiffies;
+ netif_wake_queue(netdev);
+}
+
+static int
+netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ u32 ret = 0;
+
+ DPRINTK(INFO, "Entered handle ISR\n");
+
+ adapter->stats.ints++;
+
+ if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
+ int count = 0;
+ u32 mask;
+ netxen_nic_disable_int(adapter);
+ /* Window = 0 or 1 */
+ do {
+ writel(0xffffffff, PCI_OFFSET_SECOND_RANGE(adapter,
+ ISR_INT_TARGET_STATUS));
+ mask = readl(pci_base_offset(adapter, ISR_INT_VECTOR));
+ } while (((mask & 0x80) != 0) && (++count < 32));
+ if ((mask & 0x80) != 0)
+ printk("Could not disable interrupt completely\n");
+
+ }
+ adapter->stats.hostints++;
+
+ if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) {
+ if (netif_rx_schedule_prep(netdev)) {
+ /*
+ * Interrupts are already disabled.
+ */
+ __netif_rx_schedule(netdev);
+ } else {
+ static unsigned int intcount = 0;
+ if ((++intcount & 0xfff) == 0xfff)
+ printk(KERN_ERR
+ "%s: %s interrupt %d while in poll\n",
+ netxen_nic_driver_name, netdev->name,
+ intcount);
+ }
+ ret = 1;
+ }
+
+ if (ret == 0) {
+ netxen_nic_enable_int(adapter);
+ }
+
+ return ret;
+}
+
+/*
+ * netxen_intr - Interrupt Handler
+ * @irq: interrupt number
+ * data points to adapter stucture (which may be handling more than 1 port
+ */
+irqreturn_t netxen_intr(int irq, void *data)
+{
+ struct netxen_adapter *adapter;
+ struct netxen_port *port;
+ struct net_device *netdev;
+ int i;
+
+ if (unlikely(!irq)) {
+ return IRQ_NONE; /* Not our interrupt */
+ }
+
+ adapter = (struct netxen_adapter *)data;
+ for (i = 0; i < adapter->ahw.max_ports; i++) {
+ port = adapter->port[i];
+ netdev = port->netdev;
+
+ /* process our status queue (for all 4 ports) */
+ netxen_handle_int(adapter, netdev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int netxen_nic_poll(struct net_device *netdev, int *budget)
+{
+ struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
+ struct netxen_adapter *adapter = port->adapter;
+ int work_to_do = min(*budget, netdev->quota);
+ int done = 1;
+ int ctx;
+ int this_work_done;
+
+ DPRINTK(INFO, "polling for %d descriptors\n", *budget);
+ port->stats.polled++;
+
+ adapter->work_done = 0;
+ for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+ /*
+ * Fairness issue. This will give undue weight to the
+ * receive context 0.
+ */
+
+ /*
+ * To avoid starvation, we give each of our receivers,
+ * a fraction of the quota. Sometimes, it might happen that we
+ * have enough quota to process every packet, but since all the
+ * packets are on one context, it gets only half of the quota,
+ * and ends up not processing it.
+ */
+ this_work_done = netxen_process_rcv_ring(adapter, ctx,
+ work_to_do /
+ MAX_RCV_CTX);
+ adapter->work_done += this_work_done;
+ }
+
+ netdev->quota -= adapter->work_done;
+ *budget -= adapter->work_done;
+
+ if (adapter->work_done >= work_to_do
+ && netxen_nic_rx_has_work(adapter) != 0)
+ done = 0;
+
+ netxen_process_cmd_ring((unsigned long)adapter);
+
+ DPRINTK(INFO, "new work_done: %d work_to_do: %d\n",
+ adapter->work_done, work_to_do);
+ if (done) {
+ netif_rx_complete(netdev);
+ netxen_nic_enable_int(adapter);
+ }
+
+ return !done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netxen_nic_poll_controller(struct net_device *netdev)
+{
+ struct netxen_port *port = netdev_priv(netdev);
+ struct netxen_adapter *adapter = port->adapter;
+ disable_irq(adapter->irq);
+ netxen_intr(adapter->irq, adapter);
+ enable_irq(adapter->irq);
+}
+#endif
+/*
+ * netxen_nic_ioctl () We provide the tcl/phanmon support through these
+ * ioctls.
+ */
+static int
+netxen_nic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ int err = 0;
+ unsigned long nr_bytes = 0;
+ struct netxen_port *port = netdev_priv(netdev);
+ struct netxen_adapter *adapter = port->adapter;
+ char dev_name[NETXEN_NIC_NAME_LEN];
+
+ DPRINTK(INFO, "doing ioctl for %s\n", netdev->name);
+ switch (cmd) {
+ case NETXEN_NIC_CMD:
+ err = netxen_nic_do_ioctl(adapter, (void *)ifr->ifr_data, port);
+ break;
+
+ case NETXEN_NIC_NAME:
+ DPRINTK(INFO, "ioctl cmd for NetXen\n");
+ if (ifr->ifr_data) {
+ sprintf(dev_name, "%s-%d", NETXEN_NIC_NAME_RSP,
+ port->portnum);
+ nr_bytes = copy_to_user((char *)ifr->ifr_data, dev_name,
+ NETXEN_NIC_NAME_LEN);
+ if (nr_bytes)
+ err = -EIO;
+
+ }
+ break;
+
+ default:
+ DPRINTK(INFO, "ioctl cmd %x not supported\n", cmd);
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static struct pci_driver netxen_driver = {
+ .name = netxen_nic_driver_name,
+ .id_table = netxen_pci_tbl,
+ .probe = netxen_nic_probe,
+ .remove = __devexit_p(netxen_nic_remove)
+};
+
+/* Driver Registration on NetXen card */
+
+static int __init netxen_init_module(void)
+{
+ return pci_module_init(&netxen_driver);
+}
+
+module_init(netxen_init_module);
+
+static void __exit netxen_exit_module(void)
+{
+ /*
+ * Wait for some time to allow the dma to drain, if any.
+ */
+ mdelay(5);
+ pci_unregister_driver(&netxen_driver);
+}
+
+module_exit(netxen_exit_module);
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
new file mode 100644
index 000000000000..7950a04532e6
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -0,0 +1,894 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ * info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ *
+ * Provides access to the Network Interface Unit h/w block.
+ *
+ */
+
+#include "netxen_nic.h"
+
+#define NETXEN_GB_MAC_SOFT_RESET 0x80000000
+#define NETXEN_GB_MAC_RESET_PROT_BLK 0x000F0000
+#define NETXEN_GB_MAC_ENABLE_TX_RX 0x00000005
+#define NETXEN_GB_MAC_PAUSED_FRMS 0x00000020
+
+static long phy_lock_timeout = 100000000;
+
+static inline int phy_lock(void)
+{
+ int i;
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ done = readl((void __iomem *)NETXEN_PCIE_REG(PCIE_SEM3_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= phy_lock_timeout) {
+ return -1;
+ }
+ timeout++;
+ if (!in_atomic())
+ schedule();
+ else {
+ for (i = 0; i < 20; i++)
+ cpu_relax();
+ }
+ }
+
+ writel(NETXEN_PHY_LOCK_ID, (void __iomem *)PHY_LOCK_DRIVER);
+ return 0;
+}
+
+static inline int phy_unlock(void)
+{
+ readl((void __iomem *)NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK));
+ return 0;
+}
+
+/*
+ * netxen_niu_gbe_phy_read - read a register from the GbE PHY via
+ * mii management interface.
+ *
+ * Note: The MII management interface goes through port 0.
+ * Individual phys are addressed as follows:
+ * @param phy [15:8] phy id
+ * @param reg [7:0] register number
+ *
+ * @returns 0 on success
+ * -1 on error
+ *
+ */
+int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy,
+ long reg, __le32 * readval)
+{
+ long timeout = 0;
+ long result = 0;
+ long restore = 0;
+ __le32 address;
+ __le32 command;
+ __le32 status;
+ __le32 mac_cfg0;
+
+ if (phy_lock() != 0) {
+ return -1;
+ }
+
+ /*
+ * MII mgmt all goes through port 0 MAC interface,
+ * so it cannot be in reset
+ */
+
+ if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
+ &mac_cfg0, 4))
+ return -EIO;
+ if (netxen_gb_get_soft_reset(mac_cfg0)) {
+ __le32 temp;
+ temp = 0;
+ netxen_gb_tx_reset_pb(temp);
+ netxen_gb_rx_reset_pb(temp);
+ netxen_gb_tx_reset_mac(temp);
+ netxen_gb_rx_reset_mac(temp);
+ if (netxen_nic_hw_write_wx(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_0(0),
+ &temp, 4))
+ return -EIO;
+ restore = 1;
+ }
+
+ address = 0;
+ netxen_gb_mii_mgmt_reg_addr(address, reg);
+ netxen_gb_mii_mgmt_phy_addr(address, phy);
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
+ &address, 4))
+ return -EIO;
+ command = 0; /* turn off any prior activity */
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
+ &command, 4))
+ return -EIO;
+ /* send read command */
+ netxen_gb_mii_mgmt_set_read_cycle(command);
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
+ &command, 4))
+ return -EIO;
+
+ status = 0;
+ do {
+ if (netxen_nic_hw_read_wx(adapter,
+ NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
+ &status, 4))
+ return -EIO;
+ timeout++;
+ } while ((netxen_get_gb_mii_mgmt_busy(status)
+ || netxen_get_gb_mii_mgmt_notvalid(status))
+ && (timeout++ < NETXEN_NIU_PHY_WAITMAX));
+
+ if (timeout < NETXEN_NIU_PHY_WAITMAX) {
+ if (netxen_nic_hw_read_wx(adapter,
+ NETXEN_NIU_GB_MII_MGMT_STATUS(0),
+ readval, 4))
+ return -EIO;
+ result = 0;
+ } else
+ result = -1;
+
+ if (restore)
+ if (netxen_nic_hw_write_wx(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_0(0),
+ &mac_cfg0, 4))
+ return -EIO;
+ phy_unlock();
+ return result;
+}
+
+/*
+ * netxen_niu_gbe_phy_write - write a register to the GbE PHY via
+ * mii management interface.
+ *
+ * Note: The MII management interface goes through port 0.
+ * Individual phys are addressed as follows:
+ * @param phy [15:8] phy id
+ * @param reg [7:0] register number
+ *
+ * @returns 0 on success
+ * -1 on error
+ *
+ */
+int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter,
+ long phy, long reg, __le32 val)
+{
+ long timeout = 0;
+ long result = 0;
+ long restore = 0;
+ __le32 address;
+ __le32 command;
+ __le32 status;
+ __le32 mac_cfg0;
+
+ /*
+ * MII mgmt all goes through port 0 MAC interface, so it
+ * cannot be in reset
+ */
+
+ if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
+ &mac_cfg0, 4))
+ return -EIO;
+ if (netxen_gb_get_soft_reset(mac_cfg0)) {
+ __le32 temp;
+ temp = 0;
+ netxen_gb_tx_reset_pb(temp);
+ netxen_gb_rx_reset_pb(temp);
+ netxen_gb_tx_reset_mac(temp);
+ netxen_gb_rx_reset_mac(temp);
+
+ if (netxen_nic_hw_write_wx(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_0(0),
+ &temp, 4))
+ return -EIO;
+ restore = 1;
+ }
+
+ command = 0; /* turn off any prior activity */
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
+ &command, 4))
+ return -EIO;
+
+ address = 0;
+ netxen_gb_mii_mgmt_reg_addr(address, reg);
+ netxen_gb_mii_mgmt_phy_addr(address, phy);
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
+ &address, 4))
+ return -EIO;
+
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0),
+ &val, 4))
+ return -EIO;
+
+ status = 0;
+ do {
+ if (netxen_nic_hw_read_wx(adapter,
+ NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
+ &status, 4))
+ return -EIO;
+ timeout++;
+ } while ((netxen_get_gb_mii_mgmt_busy(status))
+ && (timeout++ < NETXEN_NIU_PHY_WAITMAX));
+
+ if (timeout < NETXEN_NIU_PHY_WAITMAX)
+ result = 0;
+ else
+ result = -EIO;
+
+ /* restore the state of port 0 MAC in case we tampered with it */
+ if (restore)
+ if (netxen_nic_hw_write_wx(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_0(0),
+ &mac_cfg0, 4))
+ return -EIO;
+
+ return result;
+}
+
+int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter,
+ int port)
+{
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_INT_MASK, 0x3f);
+ return 0;
+}
+
+int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter,
+ int port)
+{
+ int result = 0;
+ __le32 enable = 0;
+ netxen_set_phy_int_link_status_changed(enable);
+ netxen_set_phy_int_autoneg_completed(enable);
+ netxen_set_phy_int_speed_changed(enable);
+
+ if (0 !=
+ netxen_niu_gbe_phy_write(adapter, port,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE,
+ enable))
+ result = -EIO;
+
+ return result;
+}
+
+int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter,
+ int port)
+{
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_INT_MASK, 0x7f);
+ return 0;
+}
+
+int netxen_niu_gbe_disable_phy_interrupts(struct netxen_adapter *adapter,
+ int port)
+{
+ int result = 0;
+ if (0 !=
+ netxen_niu_gbe_phy_write(adapter, port,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE, 0))
+ result = -EIO;
+
+ return result;
+}
+
+int netxen_niu_xgbe_clear_phy_interrupts(struct netxen_adapter *adapter,
+ int port)
+{
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_ACTIVE_INT, -1);
+ return 0;
+}
+
+int netxen_niu_gbe_clear_phy_interrupts(struct netxen_adapter *adapter,
+ int port)
+{
+ int result = 0;
+ if (0 !=
+ netxen_niu_gbe_phy_write(adapter, port,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS,
+ -EIO))
+ result = -EIO;
+
+ return result;
+}
+
+/*
+ * netxen_niu_gbe_set_mii_mode- Set 10/100 Mbit Mode for GbE MAC
+ *
+ */
+void netxen_niu_gbe_set_mii_mode(struct netxen_adapter *adapter,
+ int port, long enable)
+{
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_MODE, 0x2);
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ 0x80000000);
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ 0x0000f0025);
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port),
+ 0xf1ff);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_GB0_GMII_MODE + (port << 3), 0);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_GB0_MII_MODE + (port << 3), 1);
+ netxen_crb_writelit_adapter(adapter,
+ (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7);
+
+ if (enable) {
+ /*
+ * Do NOT enable flow control until a suitable solution for
+ * shutting down pause frames is found.
+ */
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ 0x5);
+ }
+
+ if (netxen_niu_gbe_enable_phy_interrupts(adapter, port))
+ printk(KERN_ERR PFX "ERROR enabling PHY interrupts\n");
+ if (netxen_niu_gbe_clear_phy_interrupts(adapter, port))
+ printk(KERN_ERR PFX "ERROR clearing PHY interrupts\n");
+}
+
+/*
+ * netxen_niu_gbe_set_gmii_mode- Set GbE Mode for GbE MAC
+ */
+void netxen_niu_gbe_set_gmii_mode(struct netxen_adapter *adapter,
+ int port, long enable)
+{
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_MODE, 0x2);
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ 0x80000000);
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ 0x0000f0025);
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port),
+ 0xf2ff);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_GB0_MII_MODE + (port << 3), 0);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_GB0_GMII_MODE + (port << 3), 1);
+ netxen_crb_writelit_adapter(adapter,
+ (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7);
+
+ if (enable) {
+ /*
+ * Do NOT enable flow control until a suitable solution for
+ * shutting down pause frames is found.
+ */
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ 0x5);
+ }
+
+ if (netxen_niu_gbe_enable_phy_interrupts(adapter, port))
+ printk(KERN_ERR PFX "ERROR enabling PHY interrupts\n");
+ if (netxen_niu_gbe_clear_phy_interrupts(adapter, port))
+ printk(KERN_ERR PFX "ERROR clearing PHY interrupts\n");
+}
+
+int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
+{
+ int result = 0;
+ __le32 status;
+ if (adapter->ops->disable_phy_interrupts)
+ adapter->ops->disable_phy_interrupts(adapter, port);
+ mdelay(2);
+
+ if (0 ==
+ netxen_niu_gbe_phy_read(adapter, port,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ (__le32 *) & status)) {
+ if (netxen_get_phy_link(status)) {
+ if (netxen_get_phy_speed(status) == 2) {
+ netxen_niu_gbe_set_gmii_mode(adapter, port, 1);
+ } else if ((netxen_get_phy_speed(status) == 1)
+ || (netxen_get_phy_speed(status) == 0)) {
+ netxen_niu_gbe_set_mii_mode(adapter, port, 1);
+ } else {
+ result = -1;
+ }
+
+ } else {
+ /*
+ * We don't have link. Cable must be unconnected.
+ * Enable phy interrupts so we take action when
+ * plugged in.
+ */
+
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_0
+ (port),
+ NETXEN_GB_MAC_SOFT_RESET);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_0
+ (port),
+ NETXEN_GB_MAC_RESET_PROT_BLK
+ | NETXEN_GB_MAC_ENABLE_TX_RX
+ |
+ NETXEN_GB_MAC_PAUSED_FRMS);
+ if (netxen_niu_gbe_clear_phy_interrupts(adapter, port))
+ printk(KERN_ERR PFX
+ "ERROR clearing PHY interrupts\n");
+ if (netxen_niu_gbe_enable_phy_interrupts(adapter, port))
+ printk(KERN_ERR PFX
+ "ERROR enabling PHY interrupts\n");
+ if (netxen_niu_gbe_clear_phy_interrupts(adapter, port))
+ printk(KERN_ERR PFX
+ "ERROR clearing PHY interrupts\n");
+ result = -1;
+ }
+ } else {
+ result = -EIO;
+ }
+ return result;
+}
+
+int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
+{
+ long reg = 0, ret = 0;
+
+ if (adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) {
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_XG1_CONFIG_0, 0x5);
+ /* XXX hack for Mez cards: both ports in promisc mode */
+ netxen_nic_hw_read_wx(adapter,
+ NETXEN_NIU_XGE_CONFIG_1, &reg, 4);
+ reg = (reg | 0x2000UL);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_XGE_CONFIG_1, reg);
+ reg = 0;
+ netxen_nic_hw_read_wx(adapter,
+ NETXEN_NIU_XG1_CONFIG_1, &reg, 4);
+ reg = (reg | 0x2000UL);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_XG1_CONFIG_1, reg);
+ }
+
+ return ret;
+}
+
+/*
+ * netxen_niu_gbe_handle_phy_interrupt - Handles GbE PHY interrupts
+ * @param enable 0 means don't enable the port
+ * 1 means enable (or re-enable) the port
+ */
+int netxen_niu_gbe_handle_phy_interrupt(struct netxen_adapter *adapter,
+ int port, long enable)
+{
+ int result = 0;
+ __le32 int_src;
+
+ printk(KERN_INFO PFX "NETXEN: Handling PHY interrupt on port %d"
+ " (device enable = %d)\n", (int)port, (int)enable);
+
+ /*
+ * The read of the PHY INT status will clear the pending
+ * interrupt status
+ */
+ if (netxen_niu_gbe_phy_read(adapter, port,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS,
+ &int_src) != 0)
+ result = -EINVAL;
+ else {
+ printk(KERN_INFO PFX "PHY Interrupt source = 0x%x \n", int_src);
+ if (netxen_get_phy_int_jabber(int_src))
+ printk(KERN_INFO PFX "jabber Interrupt ");
+ if (netxen_get_phy_int_polarity_changed(int_src))
+ printk(KERN_INFO PFX "polarity changed ");
+ if (netxen_get_phy_int_energy_detect(int_src))
+ printk(KERN_INFO PFX "energy detect \n");
+ if (netxen_get_phy_int_downshift(int_src))
+ printk(KERN_INFO PFX "downshift \n");
+ if (netxen_get_phy_int_mdi_xover_changed(int_src))
+ printk(KERN_INFO PFX "mdi_xover_changed ");
+ if (netxen_get_phy_int_fifo_over_underflow(int_src))
+ printk(KERN_INFO PFX "fifo_over_underflow ");
+ if (netxen_get_phy_int_false_carrier(int_src))
+ printk(KERN_INFO PFX "false_carrier ");
+ if (netxen_get_phy_int_symbol_error(int_src))
+ printk(KERN_INFO PFX "symbol_error ");
+ if (netxen_get_phy_int_autoneg_completed(int_src))
+ printk(KERN_INFO PFX "autoneg_completed ");
+ if (netxen_get_phy_int_page_received(int_src))
+ printk(KERN_INFO PFX "page_received ");
+ if (netxen_get_phy_int_duplex_changed(int_src))
+ printk(KERN_INFO PFX "duplex_changed ");
+ if (netxen_get_phy_int_autoneg_error(int_src))
+ printk(KERN_INFO PFX "autoneg_error ");
+ if ((netxen_get_phy_int_speed_changed(int_src))
+ || (netxen_get_phy_int_link_status_changed(int_src))) {
+ __le32 status;
+
+ printk(KERN_INFO PFX
+ "speed_changed or link status changed");
+ if (netxen_niu_gbe_phy_read
+ (adapter, port,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ &status) == 0) {
+ if (netxen_get_phy_speed(status) == 2) {
+ printk
+ (KERN_INFO PFX "Link speed changed"
+ " to 1000 Mbps\n");
+ netxen_niu_gbe_set_gmii_mode(adapter,
+ port,
+ enable);
+ } else if (netxen_get_phy_speed(status) == 1) {
+ printk
+ (KERN_INFO PFX "Link speed changed"
+ " to 100 Mbps\n");
+ netxen_niu_gbe_set_mii_mode(adapter,
+ port,
+ enable);
+ } else if (netxen_get_phy_speed(status) == 0) {
+ printk
+ (KERN_INFO PFX "Link speed changed"
+ " to 10 Mbps\n");
+ netxen_niu_gbe_set_mii_mode(adapter,
+ port,
+ enable);
+ } else {
+ printk(KERN_ERR PFX "ERROR reading"
+ "PHY status. Illegal speed.\n");
+ result = -1;
+ }
+ } else {
+ printk(KERN_ERR PFX
+ "ERROR reading PHY status.\n");
+ result = -1;
+ }
+
+ }
+ printk(KERN_INFO "\n");
+ }
+ return result;
+}
+
+/*
+ * Return the current station MAC address.
+ * Note that the passed-in value must already be in network byte order.
+ */
+int netxen_niu_macaddr_get(struct netxen_adapter *adapter,
+ int phy, netxen_ethernet_macaddr_t * addr)
+{
+ u64 result = 0;
+ __le32 stationhigh;
+ __le32 stationlow;
+
+ if (addr == NULL)
+ return -EINVAL;
+ if ((phy < 0) || (phy > 3))
+ return -EINVAL;
+
+ if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy),
+ &stationhigh, 4))
+ return -EIO;
+ if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy),
+ &stationlow, 4))
+ return -EIO;
+
+ result = (u64) netxen_gb_get_stationaddress_low(stationlow);
+ result |= (u64) stationhigh << 16;
+ memcpy(*addr, &result, sizeof(netxen_ethernet_macaddr_t));
+
+ return 0;
+}
+
+/*
+ * Set the station MAC address.
+ * Note that the passed-in value must already be in network byte order.
+ */
+int netxen_niu_macaddr_set(struct netxen_port *port,
+ netxen_ethernet_macaddr_t addr)
+{
+ __le32 temp = 0;
+ struct netxen_adapter *adapter = port->adapter;
+ int phy = port->portnum;
+ unsigned char mac_addr[MAX_ADDR_LEN];
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ memcpy(&temp, addr, 2);
+ temp <<= 16;
+ if (netxen_nic_hw_write_wx
+ (adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), &temp, 4))
+ return -EIO;
+
+ temp = 0;
+
+ memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
+ if (netxen_nic_hw_write_wx
+ (adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), &temp, 4))
+ return -2;
+
+ netxen_niu_macaddr_get(adapter, phy,
+ (netxen_ethernet_macaddr_t *) mac_addr);
+ if (memcmp(mac_addr, addr, MAX_ADDR_LEN == 0))
+ break;
+ }
+
+ if (i == 10) {
+ printk(KERN_ERR "%s: cannot set Mac addr for %s\n",
+ netxen_nic_driver_name, port->netdev->name);
+ printk(KERN_ERR "MAC address set: "
+ "%02x:%02x:%02x:%02x:%02x:%02x.\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ printk(KERN_ERR "MAC address get: "
+ "%02x:%02x:%02x:%02x:%02x:%02x.\n",
+ mac_addr[0],
+ mac_addr[1],
+ mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
+ }
+ return 0;
+}
+
+/* Enable a GbE interface */
+int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
+ int port, netxen_niu_gbe_ifmode_t mode)
+{
+ __le32 mac_cfg0;
+ __le32 mac_cfg1;
+ __le32 mii_cfg;
+
+ if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ return -EINVAL;
+
+ mac_cfg0 = 0;
+ netxen_gb_soft_reset(mac_cfg0);
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ &mac_cfg0, 4))
+ return -EIO;
+ mac_cfg0 = 0;
+ netxen_gb_enable_tx(mac_cfg0);
+ netxen_gb_enable_rx(mac_cfg0);
+ netxen_gb_unset_rx_flowctl(mac_cfg0);
+ netxen_gb_tx_reset_pb(mac_cfg0);
+ netxen_gb_rx_reset_pb(mac_cfg0);
+ netxen_gb_tx_reset_mac(mac_cfg0);
+ netxen_gb_rx_reset_mac(mac_cfg0);
+
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ &mac_cfg0, 4))
+ return -EIO;
+ mac_cfg1 = 0;
+ netxen_gb_set_preamblelen(mac_cfg1, 0xf);
+ netxen_gb_set_duplex(mac_cfg1);
+ netxen_gb_set_crc_enable(mac_cfg1);
+ netxen_gb_set_padshort(mac_cfg1);
+ netxen_gb_set_checklength(mac_cfg1);
+ netxen_gb_set_hugeframes(mac_cfg1);
+
+ if (mode == NETXEN_NIU_10_100_MB) {
+ netxen_gb_set_intfmode(mac_cfg1, 1);
+ if (netxen_nic_hw_write_wx(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_1(port),
+ &mac_cfg1, 4))
+ return -EIO;
+
+ /* set mii mode */
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB0_GMII_MODE +
+ (port << 3), 0);
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB0_MII_MODE +
+ (port << 3), 1);
+
+ } else if (mode == NETXEN_NIU_1000_MB) {
+ netxen_gb_set_intfmode(mac_cfg1, 2);
+ if (netxen_nic_hw_write_wx(adapter,
+ NETXEN_NIU_GB_MAC_CONFIG_1(port),
+ &mac_cfg1, 4))
+ return -EIO;
+ /* set gmii mode */
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB0_MII_MODE +
+ (port << 3), 0);
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB0_GMII_MODE +
+ (port << 3), 1);
+ }
+ mii_cfg = 0;
+ netxen_gb_set_mii_mgmt_clockselect(mii_cfg, 7);
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port),
+ &mii_cfg, 4))
+ return -EIO;
+ mac_cfg0 = 0;
+ netxen_gb_enable_tx(mac_cfg0);
+ netxen_gb_enable_rx(mac_cfg0);
+ netxen_gb_unset_rx_flowctl(mac_cfg0);
+ netxen_gb_unset_tx_flowctl(mac_cfg0);
+
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ &mac_cfg0, 4))
+ return -EIO;
+ return 0;
+}
+
+/* Disable a GbE interface */
+int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter, int port)
+{
+ __le32 mac_cfg0;
+
+ if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ return -EINVAL;
+
+ mac_cfg0 = 0;
+ netxen_gb_soft_reset(mac_cfg0);
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ &mac_cfg0, 4))
+ return -EIO;
+ return 0;
+}
+
+/* Disable an XG interface */
+int netxen_niu_disable_xg_port(struct netxen_adapter *adapter, int port)
+{
+ __le32 mac_cfg;
+
+ if (port != 0)
+ return -EINVAL;
+
+ mac_cfg = 0;
+ netxen_xg_soft_reset(mac_cfg);
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_CONFIG_0,
+ &mac_cfg, 4))
+ return -EIO;
+ return 0;
+}
+
+/* Set promiscuous mode for a GbE interface */
+int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, int port,
+ netxen_niu_prom_mode_t mode)
+{
+ __le32 reg;
+
+ if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ return -EINVAL;
+
+ /* save previous contents */
+ if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
+ &reg, 4))
+ return -EIO;
+ if (mode == NETXEN_NIU_PROMISC_MODE) {
+ switch (port) {
+ case 0:
+ netxen_clear_gb_drop_gb0(reg);
+ break;
+ case 1:
+ netxen_clear_gb_drop_gb1(reg);
+ break;
+ case 2:
+ netxen_clear_gb_drop_gb2(reg);
+ break;
+ case 3:
+ netxen_clear_gb_drop_gb3(reg);
+ break;
+ default:
+ return -EIO;
+ }
+ } else {
+ switch (port) {
+ case 0:
+ netxen_set_gb_drop_gb0(reg);
+ break;
+ case 1:
+ netxen_set_gb_drop_gb1(reg);
+ break;
+ case 2:
+ netxen_set_gb_drop_gb2(reg);
+ break;
+ case 3:
+ netxen_set_gb_drop_gb3(reg);
+ break;
+ default:
+ return -EIO;
+ }
+ }
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
+ &reg, 4))
+ return -EIO;
+ return 0;
+}
+
+/*
+ * Set the MAC address for an XG port
+ * Note that the passed-in value must already be in network byte order.
+ */
+int netxen_niu_xg_macaddr_set(struct netxen_port *port,
+ netxen_ethernet_macaddr_t addr)
+{
+ __le32 temp = 0;
+ struct netxen_adapter *adapter = port->adapter;
+
+ memcpy(&temp, addr, 2);
+ temp = cpu_to_le32(temp);
+ temp <<= 16;
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
+ &temp, 4))
+ return -EIO;
+
+ temp = 0;
+
+ memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
+ temp = cpu_to_le32(temp);
+ if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
+ &temp, 4))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Return the current station MAC address.
+ * Note that the passed-in value must already be in network byte order.
+ */
+int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, int phy,
+ netxen_ethernet_macaddr_t * addr)
+{
+ __le32 stationhigh;
+ __le32 stationlow;
+ u64 result;
+
+ if (addr == NULL)
+ return -EINVAL;
+ if (phy != 0)
+ return -EINVAL;
+
+ if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
+ &stationhigh, 4))
+ return -EIO;
+ if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
+ &stationlow, 4))
+ return -EIO;
+
+ result = ((u64) stationlow) >> 16;
+ result |= (u64) stationhigh << 16;
+ memcpy(*addr, &result, sizeof(netxen_ethernet_macaddr_t));
+
+ return 0;
+}
+
+int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
+ int port, netxen_niu_prom_mode_t mode)
+{
+ __le32 reg;
+
+ if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ return -EINVAL;
+
+ if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_CONFIG_1, &reg, 4))
+ return -EIO;
+ if (mode == NETXEN_NIU_PROMISC_MODE)
+ reg = (reg | 0x2000UL);
+ else
+ reg = (reg & ~0x2000UL);
+
+ netxen_crb_writelit_adapter(adapter, NETXEN_NIU_XGE_CONFIG_1, reg);
+
+ return 0;
+}
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
new file mode 100644
index 000000000000..8181d436783f
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ * info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ */
+
+#ifndef __NIC_PHAN_REG_H_
+#define __NIC_PHAN_REG_H_
+
+/*
+ * CRB Registers or queue message done only at initialization time.
+ */
+
+/*
+ * The following 2 are the base adresses for the CRB registers and their
+ * offsets will be added to get addresses for the index addresses.
+ */
+#define NIC_CRB_BASE_PORT1 NETXEN_CAM_RAM(0x200)
+#define NIC_CRB_BASE_PORT2 NETXEN_CAM_RAM(0x250)
+
+#define NETXEN_NIC_REG(X) (NIC_CRB_BASE_PORT1+(X))
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define CRB_PHAN_CNTRL_LO_OFFSET NETXEN_NIC_REG(0x00)
+#define CRB_PHAN_CNTRL_HI_OFFSET NETXEN_NIC_REG(0x04)
+
+/* point to the indexes */
+#define CRB_CMD_PRODUCER_OFFSET NETXEN_NIC_REG(0x08)
+#define CRB_CMD_CONSUMER_OFFSET NETXEN_NIC_REG(0x0c)
+
+#define CRB_PAUSE_ADDR_LO NETXEN_NIC_REG(0x10)
+#define CRB_PAUSE_ADDR_HI NETXEN_NIC_REG(0x14)
+
+/* address of command descriptors in the host memory */
+#define CRB_HOST_CMD_ADDR_HI NETXEN_NIC_REG(0x30)
+#define CRB_HOST_CMD_ADDR_LO NETXEN_NIC_REG(0x34)
+
+/* The following 4 CRB registers are for doing performance coal */
+#define CRB_CMD_INTR_LOOP NETXEN_NIC_REG(0x38)
+#define CRB_CMD_DMA_LOOP NETXEN_NIC_REG(0x3c)
+#define CRB_RCV_INTR_LOOP NETXEN_NIC_REG(0x40)
+#define CRB_RCV_DMA_LOOP NETXEN_NIC_REG(0x44)
+
+/* Needed by the host to find out the state of Phantom's initialization */
+#define CRB_ENABLE_TX_INTR NETXEN_NIC_REG(0x4c)
+#define CRB_CMDPEG_STATE NETXEN_NIC_REG(0x50)
+#define CRB_CMDPEG_CMDRING NETXEN_NIC_REG(0x54)
+
+/* Interrupt coalescing parameters */
+#define CRB_GLOBAL_INT_COAL NETXEN_NIC_REG(0x80)
+#define CRB_INT_COAL_MODE NETXEN_NIC_REG(0x84)
+#define CRB_MAX_RCV_BUFS NETXEN_NIC_REG(0x88)
+#define CRB_TX_INT_THRESHOLD NETXEN_NIC_REG(0x8c)
+#define CRB_RX_PKT_TIMER NETXEN_NIC_REG(0x90)
+#define CRB_TX_PKT_TIMER NETXEN_NIC_REG(0x94)
+#define CRB_RX_PKT_CNT NETXEN_NIC_REG(0x98)
+#define CRB_RX_TMR_CNT NETXEN_NIC_REG(0x9c)
+#define CRB_INT_THRESH NETXEN_NIC_REG(0xa4)
+
+/* Register for communicating XG link status */
+#define CRB_XG_STATE NETXEN_NIC_REG(0xa0)
+
+/* Register for communicating card temperature */
+/* Upper 16 bits are temperature value. Lower 16 bits are the state */
+#define CRB_TEMP_STATE NETXEN_NIC_REG(0xa8)
+#define nx_get_temp_val(x) ((x) >> 16)
+#define nx_get_temp_state(x) ((x) & 0xffff)
+#define nx_encode_temp(val, state) (((val) << 16) | (state))
+
+/* Debug registers for controlling NIC pkt gen agent */
+#define CRB_AGENT_GO NETXEN_NIC_REG(0xb0)
+#define CRB_AGENT_TX_SIZE NETXEN_NIC_REG(0xb4)
+#define CRB_AGENT_TX_TYPE NETXEN_NIC_REG(0xb8)
+#define CRB_AGENT_TX_ADDR NETXEN_NIC_REG(0xbc)
+#define CRB_AGENT_TX_MSS NETXEN_NIC_REG(0xc0)
+
+/* Debug registers for observing NIC performance */
+#define CRB_TX_STATE NETXEN_NIC_REG(0xd0)
+#define CRB_TX_COUNT NETXEN_NIC_REG(0xd4)
+#define CRB_RX_STATE NETXEN_NIC_REG(0xd8)
+
+/* CRB registers per Rcv Descriptor ring */
+struct netxen_rcv_desc_crb {
+ u32 crb_rcv_producer_offset __attribute__ ((aligned(512)));
+ u32 crb_rcv_consumer_offset;
+ u32 crb_globalrcv_ring;
+};
+
+/*
+ * CRB registers used by the receive peg logic. One instance of these
+ * needs to be instantiated per instance of the receive peg.
+ */
+
+struct netxen_recv_crb {
+ struct netxen_rcv_desc_crb rcv_desc_crb[NUM_RCV_DESC_RINGS];
+ u32 crb_rcvstatus_ring;
+ u32 crb_rcv_status_producer;
+ u32 crb_rcv_status_consumer;
+ u32 crb_rcvpeg_state;
+};
+
+#if defined(DEFINE_GLOBAL_RECV_CRB)
+struct netxen_recv_crb recv_crb_registers[] = {
+ /*
+ * Instance 0.
+ */
+ {
+ /* rcv_desc_crb: */
+ {
+ {
+ /* crb_rcv_producer_offset: */
+ NETXEN_NIC_REG(0x18),
+ /* crb_rcv_consumer_offset: */
+ NETXEN_NIC_REG(0x1c),
+ /* crb_gloablrcv_ring: */
+ NETXEN_NIC_REG(0x20),
+ },
+ /* Jumbo frames */
+ {
+ /* crb_rcv_producer_offset: */
+ NETXEN_NIC_REG(0x100),
+ /* crb_rcv_consumer_offset: */
+ NETXEN_NIC_REG(0x104),
+ /* crb_gloablrcv_ring: */
+ NETXEN_NIC_REG(0x108),
+ }
+ },
+ /* crb_rcvstatus_ring: */
+ NETXEN_NIC_REG(0x24),
+ /* crb_rcv_status_producer: */
+ NETXEN_NIC_REG(0x28),
+ /* crb_rcv_status_consumer: */
+ NETXEN_NIC_REG(0x2c),
+ /* crb_rcvpeg_state: */
+ NETXEN_NIC_REG(0x48),
+
+ },
+ /*
+ * Instance 1,
+ */
+ {
+ /* rcv_desc_crb: */
+ {
+ {
+ /* crb_rcv_producer_offset: */
+ NETXEN_NIC_REG(0x80),
+ /* crb_rcv_consumer_offset: */
+ NETXEN_NIC_REG(0x84),
+ /* crb_globalrcv_ring: */
+ NETXEN_NIC_REG(0x88),
+ },
+ /* Jumbo frames */
+ {
+ /* crb_rcv_producer_offset: */
+ NETXEN_NIC_REG(0x10C),
+ /* crb_rcv_consumer_offset: */
+ NETXEN_NIC_REG(0x110),
+ /* crb_globalrcv_ring: */
+ NETXEN_NIC_REG(0x114),
+ }
+ },
+ /* crb_rcvstatus_ring: */
+ NETXEN_NIC_REG(0x8c),
+ /* crb_rcv_status_producer: */
+ NETXEN_NIC_REG(0x90),
+ /* crb_rcv_status_consumer: */
+ NETXEN_NIC_REG(0x94),
+ /* crb_rcvpeg_state: */
+ NETXEN_NIC_REG(0x98),
+ },
+};
+#else
+extern struct netxen_recv_crb recv_crb_registers[];
+#endif /* DEFINE_GLOBAL_RECEIVE_CRB */
+
+/*
+ * Temperature control.
+ */
+enum {
+ NX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ NX_TEMP_WARN, /* Sound alert, temperature getting high */
+ NX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+#endif /* __NIC_PHAN_REG_H_ */
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 0c00d182e7fd..c51cc5d8789a 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1096,7 +1096,6 @@ static void ei_watchdog(u_long arg)
/* Check for pending interrupt with expired latency timer: with
this, we can limp along even if the interrupt is blocked */
- outb_p(E8390_NODMA+E8390_PAGE0, nic_base + E8390_CMD);
if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
if (!info->fast_poll)
printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index ecb61f876f27..f994f129f3d8 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -56,6 +56,12 @@ config SMSC_PHY
---help---
Currently supports the LAN83C185 PHY
+config BROADCOM_PHY
+ tristate "Drivers for Broadcom PHYs"
+ depends on PHYLIB
+ ---help---
+ Currently supports the BCM5411, BCM5421 and BCM5461 PHYs.
+
config FIXED_PHY
tristate "Drivers for PHY emulation on fixed speed/link"
depends on PHYLIB
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 320f8323123f..bcd1efbd2a18 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -10,4 +10,5 @@ obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
new file mode 100644
index 000000000000..29666c85ed55
--- /dev/null
+++ b/drivers/net/phy/broadcom.c
@@ -0,0 +1,175 @@
+/*
+ * drivers/net/phy/broadcom.c
+ *
+ * Broadcom BCM5411, BCM5421 and BCM5461 Gigabit Ethernet
+ * transceivers.
+ *
+ * Copyright (c) 2006 Maciej W. Rozycki
+ *
+ * Inspired by code written by Amy Fong.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
+#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
+#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
+
+#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
+#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
+
+#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
+#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */
+#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */
+#define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */
+#define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */
+#define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */
+#define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */
+#define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */
+#define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */
+#define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */
+#define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */
+#define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */
+#define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */
+#define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */
+#define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */
+#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */
+#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */
+
+MODULE_DESCRIPTION("Broadcom PHY driver");
+MODULE_AUTHOR("Maciej W. Rozycki");
+MODULE_LICENSE("GPL");
+
+static int bcm54xx_config_init(struct phy_device *phydev)
+{
+ int reg, err;
+
+ reg = phy_read(phydev, MII_BCM54XX_ECR);
+ if (reg < 0)
+ return reg;
+
+ /* Mask interrupts globally. */
+ reg |= MII_BCM54XX_ECR_IM;
+ err = phy_write(phydev, MII_BCM54XX_ECR, reg);
+ if (err < 0)
+ return err;
+
+ /* Unmask events we are interested in. */
+ reg = ~(MII_BCM54XX_INT_DUPLEX |
+ MII_BCM54XX_INT_SPEED |
+ MII_BCM54XX_INT_LINK);
+ err = phy_write(phydev, MII_BCM54XX_IMR, reg);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static int bcm54xx_ack_interrupt(struct phy_device *phydev)
+{
+ int reg;
+
+ /* Clear pending interrupts. */
+ reg = phy_read(phydev, MII_BCM54XX_ISR);
+ if (reg < 0)
+ return reg;
+
+ return 0;
+}
+
+static int bcm54xx_config_intr(struct phy_device *phydev)
+{
+ int reg, err;
+
+ reg = phy_read(phydev, MII_BCM54XX_ECR);
+ if (reg < 0)
+ return reg;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ reg &= ~MII_BCM54XX_ECR_IM;
+ else
+ reg |= MII_BCM54XX_ECR_IM;
+
+ err = phy_write(phydev, MII_BCM54XX_ECR, reg);
+ return err;
+}
+
+static struct phy_driver bcm5411_driver = {
+ .phy_id = 0x00206070,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM5411",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm54xx_ack_interrupt,
+ .config_intr = bcm54xx_config_intr,
+ .driver = { .owner = THIS_MODULE },
+};
+
+static struct phy_driver bcm5421_driver = {
+ .phy_id = 0x002060e0,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM5421",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm54xx_ack_interrupt,
+ .config_intr = bcm54xx_config_intr,
+ .driver = { .owner = THIS_MODULE },
+};
+
+static struct phy_driver bcm5461_driver = {
+ .phy_id = 0x002060c0,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM5461",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm54xx_ack_interrupt,
+ .config_intr = bcm54xx_config_intr,
+ .driver = { .owner = THIS_MODULE },
+};
+
+static int __init broadcom_init(void)
+{
+ int ret;
+
+ ret = phy_driver_register(&bcm5411_driver);
+ if (ret)
+ goto out_5411;
+ ret = phy_driver_register(&bcm5421_driver);
+ if (ret)
+ goto out_5421;
+ ret = phy_driver_register(&bcm5461_driver);
+ if (ret)
+ goto out_5461;
+ return ret;
+
+out_5461:
+ phy_driver_unregister(&bcm5421_driver);
+out_5421:
+ phy_driver_unregister(&bcm5411_driver);
+out_5411:
+ return ret;
+}
+
+static void __exit broadcom_exit(void)
+{
+ phy_driver_unregister(&bcm5461_driver);
+ phy_driver_unregister(&bcm5421_driver);
+ phy_driver_unregister(&bcm5411_driver);
+}
+
+module_init(broadcom_init);
+module_exit(broadcom_exit);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3af9fcf76c81..88237bdb5255 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -7,6 +7,7 @@
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
+ * Copyright (c) 2006 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -32,6 +33,8 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -484,6 +487,9 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
struct phy_device *phydev = phy_dat;
+ if (PHY_HALTED == phydev->state)
+ return IRQ_NONE; /* It can't be ours. */
+
/* The MDIO bus is not allowed to be written in interrupt
* context, so we need to disable the irq here. A work
* queue will write the PHY to disable and clear the
@@ -577,6 +583,13 @@ int phy_stop_interrupts(struct phy_device *phydev)
if (err)
phy_error(phydev);
+ /*
+ * Finish any pending work; we might have been scheduled
+ * to be called from keventd ourselves, though.
+ */
+ if (!current_is_keventd())
+ flush_scheduled_work();
+
free_irq(phydev->irq, phydev);
return err;
@@ -603,7 +616,8 @@ static void phy_change(void *data)
enable_irq(phydev->irq);
/* Reenable interrupts */
- err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
+ if (PHY_HALTED != phydev->state)
+ err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
if (err)
goto irq_enable_err;
@@ -624,18 +638,24 @@ void phy_stop(struct phy_device *phydev)
if (PHY_HALTED == phydev->state)
goto out_unlock;
- if (phydev->irq != PHY_POLL) {
- /* Clear any pending interrupts */
- phy_clear_interrupt(phydev);
+ phydev->state = PHY_HALTED;
+ if (phydev->irq != PHY_POLL) {
/* Disable PHY Interrupts */
phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
- }
- phydev->state = PHY_HALTED;
+ /* Clear any pending interrupts */
+ phy_clear_interrupt(phydev);
+ }
out_unlock:
spin_unlock(&phydev->lock);
+
+ /*
+ * Cannot call flush_scheduled_work() here as desired because
+ * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
+ * will not reenable interrupts.
+ */
}
@@ -693,60 +713,57 @@ static void phy_timer(unsigned long data)
break;
case PHY_AN:
+ err = phy_read_status(phydev);
+
+ if (err < 0)
+ break;
+
+ /* If the link is down, give up on
+ * negotiation for now */
+ if (!phydev->link) {
+ phydev->state = PHY_NOLINK;
+ netif_carrier_off(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
+ break;
+ }
+
/* Check if negotiation is done. Break
* if there's an error */
err = phy_aneg_done(phydev);
if (err < 0)
break;
- /* If auto-negotiation is done, we change to
- * either RUNNING, or NOLINK */
+ /* If AN is done, we're running */
if (err > 0) {
- err = phy_read_status(phydev);
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
+
+ } else if (0 == phydev->link_timeout--) {
+ int idx;
- if (err)
+ needs_aneg = 1;
+ /* If we have the magic_aneg bit,
+ * we try again */
+ if (phydev->drv->flags & PHY_HAS_MAGICANEG)
break;
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- } else {
- phydev->state = PHY_NOLINK;
- netif_carrier_off(phydev->attached_dev);
- }
+ /* The timer expired, and we still
+ * don't have a setting, so we try
+ * forcing it until we find one that
+ * works, starting from the fastest speed,
+ * and working our way down */
+ idx = phy_find_valid(0, phydev->supported);
- phydev->adjust_link(phydev->attached_dev);
+ phydev->speed = settings[idx].speed;
+ phydev->duplex = settings[idx].duplex;
- } else if (0 == phydev->link_timeout--) {
- /* The counter expired, so either we
- * switch to forced mode, or the
- * magic_aneg bit exists, and we try aneg
- * again */
- if (!(phydev->drv->flags & PHY_HAS_MAGICANEG)) {
- int idx;
-
- /* We'll start from the
- * fastest speed, and work
- * our way down */
- idx = phy_find_valid(0,
- phydev->supported);
-
- phydev->speed = settings[idx].speed;
- phydev->duplex = settings[idx].duplex;
-
- phydev->autoneg = AUTONEG_DISABLE;
- phydev->state = PHY_FORCING;
- phydev->link_timeout =
- PHY_FORCE_TIMEOUT;
-
- pr_info("Trying %d/%s\n",
- phydev->speed,
- DUPLEX_FULL ==
- phydev->duplex ?
- "FULL" : "HALF");
- }
+ phydev->autoneg = AUTONEG_DISABLE;
- needs_aneg = 1;
+ pr_info("Trying %d/%s\n", phydev->speed,
+ DUPLEX_FULL ==
+ phydev->duplex ?
+ "FULL" : "HALF");
}
break;
case PHY_NOLINK:
@@ -762,7 +779,7 @@ static void phy_timer(unsigned long data)
}
break;
case PHY_FORCING:
- err = phy_read_status(phydev);
+ err = genphy_update_link(phydev);
if (err)
break;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3bbd5e70c209..b01fc70a57db 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -59,6 +59,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
dev->duplex = -1;
dev->pause = dev->asym_pause = 0;
dev->link = 1;
+ dev->interface = PHY_INTERFACE_MODE_GMII;
dev->autoneg = AUTONEG_ENABLE;
@@ -137,11 +138,12 @@ void phy_prepare_link(struct phy_device *phydev,
* the desired functionality.
*/
struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
- void (*handler)(struct net_device *), u32 flags)
+ void (*handler)(struct net_device *), u32 flags,
+ u32 interface)
{
struct phy_device *phydev;
- phydev = phy_attach(dev, phy_id, flags);
+ phydev = phy_attach(dev, phy_id, flags, interface);
if (IS_ERR(phydev))
return phydev;
@@ -186,7 +188,7 @@ static int phy_compare_id(struct device *dev, void *data)
}
struct phy_device *phy_attach(struct net_device *dev,
- const char *phy_id, u32 flags)
+ const char *phy_id, u32 flags, u32 interface)
{
struct bus_type *bus = &mdio_bus_type;
struct phy_device *phydev;
@@ -231,6 +233,20 @@ struct phy_device *phy_attach(struct net_device *dev,
phydev->dev_flags = flags;
+ phydev->interface = interface;
+
+ /* Do initial configuration here, now that
+ * we have certain key parameters
+ * (dev_flags and interface) */
+ if (phydev->drv->config_init) {
+ int err;
+
+ err = phydev->drv->config_init(phydev);
+
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
return phydev;
}
EXPORT_SYMBOL(phy_attach);
@@ -427,6 +443,7 @@ int genphy_update_link(struct phy_device *phydev)
return 0;
}
+EXPORT_SYMBOL(genphy_update_link);
/* genphy_read_status
*
@@ -611,13 +628,8 @@ static int phy_probe(struct device *dev)
spin_unlock(&phydev->lock);
- if (err < 0)
- return err;
-
- if (phydev->drv->config_init)
- err = phydev->drv->config_init(phydev);
-
return err;
+
}
static int phy_remove(struct device *dev)
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index b977ed85ff39..45d3ca431957 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -571,8 +571,8 @@ static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
{
unsigned int val;
- val = (mdio_read(ioaddr, MII_BMCR) | BMCR_RESET) & 0xffff;
- mdio_write(ioaddr, MII_BMCR, val);
+ mdio_write(ioaddr, MII_BMCR, BMCR_RESET);
+ val = mdio_read(ioaddr, MII_BMCR);
}
static void rtl8169_check_link_status(struct net_device *dev,
@@ -1406,6 +1406,22 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
free_netdev(dev);
}
+static void rtl8169_phy_reset(struct net_device *dev,
+ struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ tp->phy_reset_enable(ioaddr);
+ for (i = 0; i < 100; i++) {
+ if (!tp->phy_reset_pending(ioaddr))
+ return;
+ msleep(1);
+ }
+ if (netif_msg_link(tp))
+ printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
+}
+
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -1434,6 +1450,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
+ rtl8169_phy_reset(dev, tp);
+
rtl8169_set_speed(dev, autoneg, speed, duplex);
if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
diff --git a/drivers/net/sk98lin/skethtool.c b/drivers/net/sk98lin/skethtool.c
index e5cb5b548b88..36460694eb82 100644
--- a/drivers/net/sk98lin/skethtool.c
+++ b/drivers/net/sk98lin/skethtool.c
@@ -581,6 +581,30 @@ static int setRxCsum(struct net_device *dev, u32 data)
return 0;
}
+static int getRegsLen(struct net_device *dev)
+{
+ return 0x4000;
+}
+
+/*
+ * Returns copy of whole control register region
+ * Note: skip RAM address register because accessing it will
+ * cause bus hangs!
+ */
+static void getRegs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ DEV_NET *pNet = netdev_priv(dev);
+ const void __iomem *io = pNet->pAC->IoBase;
+
+ regs->version = 1;
+ memset(p, 0, regs->len);
+ memcpy_fromio(p, io, B3_RAM_ADDR);
+
+ memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+ regs->len - B3_RI_WTO_R1);
+}
+
const struct ethtool_ops SkGeEthtoolOps = {
.get_settings = getSettings,
.set_settings = setSettings,
@@ -599,4 +623,6 @@ const struct ethtool_ops SkGeEthtoolOps = {
.set_tx_csum = setTxCsum,
.get_rx_csum = getRxCsum,
.set_rx_csum = setRxCsum,
+ .get_regs = getRegs,
+ .get_regs_len = getRegsLen,
};
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index d4913c3de2a1..a5d41ebc9fb4 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -113,6 +113,7 @@
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/ip.h>
+#include <linux/mii.h>
#include "h/skdrv1st.h"
#include "h/skdrv2nd.h"
@@ -2843,6 +2844,56 @@ unsigned long Flags; /* for spin lock */
return(&pAC->stats);
} /* SkGeStats */
+/*
+ * Basic MII register access
+ */
+static int SkGeMiiIoctl(struct net_device *dev,
+ struct mii_ioctl_data *data, int cmd)
+{
+ DEV_NET *pNet = netdev_priv(dev);
+ SK_AC *pAC = pNet->pAC;
+ SK_IOC IoC = pAC->IoBase;
+ int Port = pNet->PortNr;
+ SK_GEPORT *pPrt = &pAC->GIni.GP[Port];
+ unsigned long Flags;
+ int err = 0;
+ int reg = data->reg_num & 0x1f;
+ SK_U16 val = data->val_in;
+
+ if (!netif_running(dev))
+ return -ENODEV; /* Phy still in reset */
+
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ switch(cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = pPrt->PhyAddr;
+
+ /* fallthru */
+ case SIOCGMIIREG:
+ if (pAC->GIni.GIGenesis)
+ SkXmPhyRead(pAC, IoC, Port, reg, &val);
+ else
+ SkGmPhyRead(pAC, IoC, Port, reg, &val);
+
+ data->val_out = val;
+ break;
+
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ err = -EPERM;
+
+ else if (pAC->GIni.GIGenesis)
+ SkXmPhyWrite(pAC, IoC, Port, reg, val);
+ else
+ SkGmPhyWrite(pAC, IoC, Port, reg, val);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+ return err;
+}
+
/*****************************************************************************
*
@@ -2876,6 +2927,9 @@ int HeaderLength = sizeof(SK_U32) + sizeof(SK_U32);
pNet = netdev_priv(dev);
pAC = pNet->pAC;
+ if (cmd == SIOCGMIIPHY || cmd == SIOCSMIIREG || cmd == SIOCGMIIREG)
+ return SkGeMiiIoctl(dev, if_mii(rq), cmd);
+
if(copy_from_user(&Ioctl, rq->ifr_data, sizeof(SK_GE_IOCTL))) {
return -EFAULT;
}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b2949035f66a..27b537c8d5e3 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2154,8 +2154,6 @@ static void yukon_link_down(struct skge_port *skge)
int port = skge->port;
u16 ctrl;
- gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
-
ctrl = gma_read16(hw, port, GM_GP_CTRL);
ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
gma_write16(hw, port, GM_GP_CTRL, ctrl);
@@ -2167,7 +2165,6 @@ static void yukon_link_down(struct skge_port *skge)
gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
}
- yukon_reset(hw, port);
skge_link_down(skge);
yukon_init(hw, port);
@@ -2255,6 +2252,7 @@ static void skge_phy_reset(struct skge_port *skge)
{
struct skge_hw *hw = skge->hw;
int port = skge->port;
+ struct net_device *dev = hw->dev[port];
netif_stop_queue(skge->netdev);
netif_carrier_off(skge->netdev);
@@ -2268,6 +2266,8 @@ static void skge_phy_reset(struct skge_port *skge)
yukon_init(hw, port);
}
mutex_unlock(&hw->phy_mutex);
+
+ dev->set_multicast_list(dev);
}
/* Basic MII support */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 16616f5440d0..0ef1848b9761 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -104,6 +104,7 @@ static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
@@ -676,17 +677,15 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
/* Flush Rx MAC FIFO on any flow control or error */
sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
- /* Set threshold to 0xa (64 bytes)
- * ASF disabled so no need to do WA dev #4.30
- */
- sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
+ /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */
+ sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
/* Configure Tx MAC FIFO */
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
- sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 512/8);
+ sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
if (hw->dev[port]->mtu > ETH_DATA_LEN) {
/* set Tx GMAC FIFO Almost Empty Threshold */
@@ -1060,7 +1059,8 @@ static int sky2_rx_start(struct sky2_port *sky2)
sky2->rx_put = sky2->rx_next = 0;
sky2_qset(hw, rxq);
- if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+ (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0)) {
/* MAC Rx RAM Read is controlled by hardware */
sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
}
@@ -1453,7 +1453,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
if (unlikely(netif_msg_tx_done(sky2)))
printk(KERN_DEBUG "%s: tx done %u\n",
dev->name, idx);
- dev_kfree_skb(re->skb);
+ dev_kfree_skb_any(re->skb);
}
le->opcode = 0; /* paranoia */
@@ -1509,7 +1509,7 @@ static int sky2_down(struct net_device *dev)
/* WA for dev. #4.209 */
if (hw->chip_id == CHIP_ID_YUKON_EC_U
- && hw->chip_rev == CHIP_REV_YU_EC_U_A1)
+ && (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
sky2->speed != SPEED_1000 ?
TX_STFW_ENA : TX_STFW_DIS);
@@ -2065,7 +2065,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
case OP_RXSTAT:
skb = sky2_receive(dev, length, status);
if (!skb)
- break;
+ goto force_update;
skb->protocol = eth_type_trans(skb, dev);
dev->last_rx = jiffies;
@@ -2081,8 +2081,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
/* Update receiver after 16 frames */
if (++buf_write[le->link] == RX_BUF_WRITE) {
- sky2_put_idx(hw, rxqaddr[le->link],
- sky2->rx_put);
+force_update:
+ sky2_put_idx(hw, rxqaddr[le->link], sky2->rx_put);
buf_write[le->link] = 0;
}
@@ -3311,7 +3311,7 @@ static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id)
return IRQ_NONE;
if (status & Y2_IS_IRQ_SW) {
- hw->msi_detected = 1;
+ hw->msi = 1;
wake_up(&hw->msi_wait);
sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
}
@@ -3330,7 +3330,7 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
- err = request_irq(pdev->irq, sky2_test_intr, IRQF_SHARED, DRV_NAME, hw);
+ err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
if (err) {
printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
pci_name(pdev), pdev->irq);
@@ -3340,9 +3340,9 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
sky2_read8(hw, B0_CTST);
- wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10);
+ wait_event_timeout(hw->msi_wait, hw->msi, HZ/10);
- if (!hw->msi_detected) {
+ if (!hw->msi) {
/* MSI test failed, go back to INTx mode */
printk(KERN_INFO PFX "%s: No interrupt generated using MSI, "
"switching to INTx mode.\n",
@@ -3475,7 +3475,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_netdev;
}
- err = request_irq(pdev->irq, sky2_intr, IRQF_SHARED, dev->name, hw);
+ err = request_irq(pdev->irq, sky2_intr, hw->msi ? 0 : IRQF_SHARED,
+ dev->name, hw);
if (err) {
printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
pci_name(pdev), pdev->irq);
@@ -3505,7 +3506,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
return 0;
err_out_unregister:
- pci_disable_msi(pdev);
+ if (hw->msi)
+ pci_disable_msi(pdev);
unregister_netdev(dev);
err_out_free_netdev:
free_netdev(dev);
@@ -3548,7 +3550,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
sky2_read8(hw, B0_CTST);
free_irq(pdev->irq, hw);
- pci_disable_msi(pdev);
+ if (hw->msi)
+ pci_disable_msi(pdev);
pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 6d2a23f66c9a..7760545edbf2 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -383,8 +383,13 @@ enum {
CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */
- CHIP_REV_YU_EC_U_A0 = 0,
- CHIP_REV_YU_EC_U_A1 = 1,
+ CHIP_REV_YU_EC_U_A0 = 1,
+ CHIP_REV_YU_EC_U_A1 = 2,
+ CHIP_REV_YU_EC_U_B0 = 3,
+
+ CHIP_REV_YU_FE_A1 = 1,
+ CHIP_REV_YU_FE_A2 = 2,
+
};
/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
@@ -1895,7 +1900,7 @@ struct sky2_hw {
dma_addr_t st_dma;
struct timer_list idle_timer;
- int msi_detected;
+ int msi;
wait_queue_head_t msi_wait;
};
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 41c503d8bac4..c06ecc8002b9 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -264,8 +264,6 @@ enum alta_offsets {
ASICCtrl = 0x30,
EEData = 0x34,
EECtrl = 0x36,
- TxStartThresh = 0x3c,
- RxEarlyThresh = 0x3e,
FlashAddr = 0x40,
FlashData = 0x44,
TxStatus = 0x46,
@@ -790,6 +788,7 @@ static int netdev_open(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
+ unsigned long flags;
int i;
/* Do we need to reset the chip??? */
@@ -834,6 +833,10 @@ static int netdev_open(struct net_device *dev)
iowrite8(0x01, ioaddr + DebugCtrl1);
netif_start_queue(dev);
+ spin_lock_irqsave(&np->lock, flags);
+ reset_tx(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+
iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
if (netif_msg_ifup(np))
@@ -1081,6 +1084,8 @@ reset_tx (struct net_device *dev)
/* free all tx skbuff */
for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].next_desc = 0;
+
skb = np->tx_skbuff[i];
if (skb) {
pci_unmap_single(np->pci_dev,
@@ -1096,6 +1101,10 @@ reset_tx (struct net_device *dev)
}
np->cur_tx = np->dirty_tx = 0;
np->cur_task = 0;
+
+ np->last_tx = NULL;
+ iowrite8(127, ioaddr + TxDMAPollPeriod);
+
iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
return 0;
}
@@ -1111,6 +1120,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
int tx_cnt;
int tx_status;
int handled = 0;
+ int i;
do {
@@ -1153,21 +1163,24 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
np->stats.tx_fifo_errors++;
if (tx_status & 0x02)
np->stats.tx_window_errors++;
+
/*
** This reset has been verified on
** DFE-580TX boards ! phdm@macqel.be.
*/
if (tx_status & 0x10) { /* TxUnderrun */
- unsigned short txthreshold;
-
- txthreshold = ioread16 (ioaddr + TxStartThresh);
/* Restart Tx FIFO and transmitter */
sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
- iowrite16 (txthreshold, ioaddr + TxStartThresh);
/* No need to reset the Tx pointer here */
}
- /* Restart the Tx. */
- iowrite16 (TxEnable, ioaddr + MACCtrl1);
+ /* Restart the Tx. Need to make sure tx enabled */
+ i = 10;
+ do {
+ iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
+ if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
+ break;
+ mdelay(1);
+ } while (--i);
}
/* Yup, this is a documentation bug. It cost me *hours*. */
iowrite16 (0, ioaddr + TxStatus);
@@ -1629,6 +1642,14 @@ static int netdev_close(struct net_device *dev)
struct sk_buff *skb;
int i;
+ /* Wait and kill tasklet */
+ tasklet_kill(&np->rx_tasklet);
+ tasklet_kill(&np->tx_tasklet);
+ np->cur_tx = 0;
+ np->dirty_tx = 0;
+ np->cur_task = 0;
+ np->last_tx = NULL;
+
netif_stop_queue(dev);
if (netif_msg_ifdown(np)) {
@@ -1643,12 +1664,26 @@ static int netdev_close(struct net_device *dev)
/* Disable interrupts by clearing the interrupt mask. */
iowrite16(0x0000, ioaddr + IntrEnable);
+ /* Disable Rx and Tx DMA for safely release resource */
+ iowrite32(0x500, ioaddr + DMACtrl);
+
/* Stop the chip's Tx and Rx processes. */
iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
- /* Wait and kill tasklet */
- tasklet_kill(&np->rx_tasklet);
- tasklet_kill(&np->tx_tasklet);
+ for (i = 2000; i > 0; i--) {
+ if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
+ break;
+ mdelay(1);
+ }
+
+ iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
+ ioaddr +ASICCtrl + 2);
+
+ for (i = 2000; i > 0; i--) {
+ if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
+ break;
+ mdelay(1);
+ }
#ifdef __i386__
if (netif_msg_hw(np)) {
@@ -1686,6 +1721,7 @@ static int netdev_close(struct net_device *dev)
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].next_desc = 0;
skb = np->tx_skbuff[i];
if (skb) {
pci_unmap_single(np->pci_dev,
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index cd142d0302bc..8f4ecc1109cb 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1771,7 +1771,7 @@ static struct pci_driver olympic_driver = {
static int __init olympic_pci_init(void)
{
- return pci_module_init (&olympic_driver) ;
+ return pci_register_driver(&olympic_driver) ;
}
static void __exit olympic_pci_cleanup(void)
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
new file mode 100644
index 000000000000..893808ab3742
--- /dev/null
+++ b/drivers/net/tsi108_eth.c
@@ -0,0 +1,1708 @@
+/*******************************************************************************
+
+ Copyright(c) 2006 Tundra Semiconductor Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+*******************************************************************************/
+
+/* This driver is based on the driver code originally developed
+ * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by
+ * scott.wood@timesys.com * Copyright (C) 2003 TimeSys Corporation
+ *
+ * Currently changes from original version are:
+ * - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com)
+ * - modifications to handle two ports independently and support for
+ * additional PHY devices (alexandre.bounine@tundra.com)
+ * - Get hardware information from platform device. (tie-fei.zang@freescale.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/tsi108.h>
+
+#include "tsi108_eth.h"
+
+#define MII_READ_DELAY 10000 /* max link wait time in msec */
+
+#define TSI108_RXRING_LEN 256
+
+/* NOTE: The driver currently does not support receiving packets
+ * larger than the buffer size, so don't decrease this (unless you
+ * want to add such support).
+ */
+#define TSI108_RXBUF_SIZE 1536
+
+#define TSI108_TXRING_LEN 256
+
+#define TSI108_TX_INT_FREQ 64
+
+/* Check the phy status every half a second. */
+#define CHECK_PHY_INTERVAL (HZ/2)
+
+static int tsi108_init_one(struct platform_device *pdev);
+static int tsi108_ether_remove(struct platform_device *pdev);
+
+struct tsi108_prv_data {
+ void __iomem *regs; /* Base of normal regs */
+ void __iomem *phyregs; /* Base of register bank used for PHY access */
+
+ unsigned int phy; /* Index of PHY for this interface */
+ unsigned int irq_num;
+ unsigned int id;
+
+ struct timer_list timer;/* Timer that triggers the check phy function */
+ unsigned int rxtail; /* Next entry in rxring to read */
+ unsigned int rxhead; /* Next entry in rxring to give a new buffer */
+ unsigned int rxfree; /* Number of free, allocated RX buffers */
+
+ unsigned int rxpending; /* Non-zero if there are still descriptors
+ * to be processed from a previous descriptor
+ * interrupt condition that has been cleared */
+
+ unsigned int txtail; /* Next TX descriptor to check status on */
+ unsigned int txhead; /* Next TX descriptor to use */
+
+ /* Number of free TX descriptors. This could be calculated from
+ * rxhead and rxtail if one descriptor were left unused to disambiguate
+ * full and empty conditions, but it's simpler to just keep track
+ * explicitly. */
+
+ unsigned int txfree;
+
+ unsigned int phy_ok; /* The PHY is currently powered on. */
+
+ /* PHY status (duplex is 1 for half, 2 for full,
+ * so that the default 0 indicates that neither has
+ * yet been configured). */
+
+ unsigned int link_up;
+ unsigned int speed;
+ unsigned int duplex;
+
+ tx_desc *txring;
+ rx_desc *rxring;
+ struct sk_buff *txskbs[TSI108_TXRING_LEN];
+ struct sk_buff *rxskbs[TSI108_RXRING_LEN];
+
+ dma_addr_t txdma, rxdma;
+
+ /* txlock nests in misclock and phy_lock */
+
+ spinlock_t txlock, misclock;
+
+ /* stats is used to hold the upper bits of each hardware counter,
+ * and tmpstats is used to hold the full values for returning
+ * to the caller of get_stats(). They must be separate in case
+ * an overflow interrupt occurs before the stats are consumed.
+ */
+
+ struct net_device_stats stats;
+ struct net_device_stats tmpstats;
+
+ /* These stats are kept separate in hardware, thus require individual
+ * fields for handling carry. They are combined in get_stats.
+ */
+
+ unsigned long rx_fcs; /* Add to rx_frame_errors */
+ unsigned long rx_short_fcs; /* Add to rx_frame_errors */
+ unsigned long rx_long_fcs; /* Add to rx_frame_errors */
+ unsigned long rx_underruns; /* Add to rx_length_errors */
+ unsigned long rx_overruns; /* Add to rx_length_errors */
+
+ unsigned long tx_coll_abort; /* Add to tx_aborted_errors/collisions */
+ unsigned long tx_pause_drop; /* Add to tx_aborted_errors */
+
+ unsigned long mc_hash[16];
+ u32 msg_enable; /* debug message level */
+ struct mii_if_info mii_if;
+ unsigned int init_media;
+};
+
+/* Structure for a device driver */
+
+static struct platform_driver tsi_eth_driver = {
+ .probe = tsi108_init_one,
+ .remove = tsi108_ether_remove,
+ .driver = {
+ .name = "tsi-ethernet",
+ },
+};
+
+static void tsi108_timed_checker(unsigned long dev_ptr);
+
+static void dump_eth_one(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+
+ printk("Dumping %s...\n", dev->name);
+ printk("intstat %x intmask %x phy_ok %d"
+ " link %d speed %d duplex %d\n",
+ TSI_READ(TSI108_EC_INTSTAT),
+ TSI_READ(TSI108_EC_INTMASK), data->phy_ok,
+ data->link_up, data->speed, data->duplex);
+
+ printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
+ data->txhead, data->txtail, data->txfree,
+ TSI_READ(TSI108_EC_TXSTAT),
+ TSI_READ(TSI108_EC_TXESTAT),
+ TSI_READ(TSI108_EC_TXERR));
+
+ printk("RX: head %d, tail %d, free %d, stat %x,"
+ " estat %x, err %x, pending %d\n\n",
+ data->rxhead, data->rxtail, data->rxfree,
+ TSI_READ(TSI108_EC_RXSTAT),
+ TSI_READ(TSI108_EC_RXESTAT),
+ TSI_READ(TSI108_EC_RXERR), data->rxpending);
+}
+
+/* Synchronization is needed between the thread and up/down events.
+ * Note that the PHY is accessed through the same registers for both
+ * interfaces, so this can't be made interface-specific.
+ */
+
+static DEFINE_SPINLOCK(phy_lock);
+
+static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
+{
+ unsigned i;
+
+ TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
+ (data->phy << TSI108_MAC_MII_ADDR_PHY) |
+ (reg << TSI108_MAC_MII_ADDR_REG));
+ TSI_WRITE_PHY(TSI108_MAC_MII_CMD, 0);
+ TSI_WRITE_PHY(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
+ for (i = 0; i < 100; i++) {
+ if (!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
+ (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
+ break;
+ udelay(10);
+ }
+
+ if (i == 100)
+ return 0xffff;
+ else
+ return (TSI_READ_PHY(TSI108_MAC_MII_DATAIN));
+}
+
+static void tsi108_write_mii(struct tsi108_prv_data *data,
+ int reg, u16 val)
+{
+ unsigned i = 100;
+ TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
+ (data->phy << TSI108_MAC_MII_ADDR_PHY) |
+ (reg << TSI108_MAC_MII_ADDR_REG));
+ TSI_WRITE_PHY(TSI108_MAC_MII_DATAOUT, val);
+ while (i--) {
+ if(!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
+ TSI108_MAC_MII_IND_BUSY))
+ break;
+ udelay(10);
+ }
+}
+
+static int tsi108_mdio_read(struct net_device *dev, int addr, int reg)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ return tsi108_read_mii(data, reg);
+}
+
+static void tsi108_mdio_write(struct net_device *dev, int addr, int reg, int val)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ tsi108_write_mii(data, reg, val);
+}
+
+static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
+ int reg, u16 val)
+{
+ unsigned i = 1000;
+ TSI_WRITE(TSI108_MAC_MII_ADDR,
+ (0x1e << TSI108_MAC_MII_ADDR_PHY)
+ | (reg << TSI108_MAC_MII_ADDR_REG));
+ TSI_WRITE(TSI108_MAC_MII_DATAOUT, val);
+ while(i--) {
+ if(!(TSI_READ(TSI108_MAC_MII_IND) & TSI108_MAC_MII_IND_BUSY))
+ return;
+ udelay(10);
+ }
+ printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+}
+
+static int mii_speed(struct mii_if_info *mii)
+{
+ int advert, lpa, val, media;
+ int lpa2 = 0;
+ int speed;
+
+ if (!mii_link_ok(mii))
+ return 0;
+
+ val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR);
+ if ((val & BMSR_ANEGCOMPLETE) == 0)
+ return 0;
+
+ advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE);
+ lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA);
+ media = mii_nway_result(advert & lpa);
+
+ if (mii->supports_gmii)
+ lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000);
+
+ speed = lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
+ (media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10);
+ return speed;
+}
+
+static void tsi108_check_phy(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 mac_cfg2_reg, portctrl_reg;
+ u32 duplex;
+ u32 speed;
+ unsigned long flags;
+
+ /* Do a dummy read, as for some reason the first read
+ * after a link becomes up returns link down, even if
+ * it's been a while since the link came up.
+ */
+
+ spin_lock_irqsave(&phy_lock, flags);
+
+ if (!data->phy_ok)
+ goto out;
+
+ tsi108_read_mii(data, MII_BMSR);
+
+ duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
+ data->init_media = 0;
+
+ if (netif_carrier_ok(dev)) {
+
+ speed = mii_speed(&data->mii_if);
+
+ if ((speed != data->speed) || duplex) {
+
+ mac_cfg2_reg = TSI_READ(TSI108_MAC_CFG2);
+ portctrl_reg = TSI_READ(TSI108_EC_PORTCTRL);
+
+ mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
+
+ if (speed == 1000) {
+ mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
+ portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
+ } else {
+ mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
+ portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
+ }
+
+ data->speed = speed;
+
+ if (data->mii_if.full_duplex) {
+ mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
+ portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
+ data->duplex = 2;
+ } else {
+ mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
+ portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
+ data->duplex = 1;
+ }
+
+ TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
+ TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
+
+ if (data->link_up == 0) {
+ /* The manual says it can take 3-4 usecs for the speed change
+ * to take effect.
+ */
+ udelay(5);
+
+ spin_lock(&data->txlock);
+ if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
+ netif_wake_queue(dev);
+
+ data->link_up = 1;
+ spin_unlock(&data->txlock);
+ }
+ }
+
+ } else {
+ if (data->link_up == 1) {
+ netif_stop_queue(dev);
+ data->link_up = 0;
+ printk(KERN_NOTICE "%s : link is down\n", dev->name);
+ }
+
+ goto out;
+ }
+
+
+out:
+ spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static inline void
+tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
+ unsigned long *upper)
+{
+ if (carry & carry_bit)
+ *upper += carry_shift;
+}
+
+static void tsi108_stat_carry(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 carry1, carry2;
+
+ spin_lock_irq(&data->misclock);
+
+ carry1 = TSI_READ(TSI108_STAT_CARRY1);
+ carry2 = TSI_READ(TSI108_STAT_CARRY2);
+
+ TSI_WRITE(TSI108_STAT_CARRY1, carry1);
+ TSI_WRITE(TSI108_STAT_CARRY2, carry2);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
+ TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
+ TSI108_STAT_RXPKTS_CARRY,
+ &data->stats.rx_packets);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
+ TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
+ TSI108_STAT_RXMCAST_CARRY,
+ &data->stats.multicast);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
+ TSI108_STAT_RXALIGN_CARRY,
+ &data->stats.rx_frame_errors);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
+ TSI108_STAT_RXLENGTH_CARRY,
+ &data->stats.rx_length_errors);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
+ TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
+ TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
+ TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
+ TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
+ TSI108_STAT_RXDROP_CARRY,
+ &data->stats.rx_missed_errors);
+
+ tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
+ TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+ tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
+ TSI108_STAT_TXPKTS_CARRY,
+ &data->stats.tx_packets);
+
+ tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
+ TSI108_STAT_TXEXDEF_CARRY,
+ &data->stats.tx_aborted_errors);
+
+ tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
+ TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
+
+ tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
+ TSI108_STAT_TXTCOL_CARRY,
+ &data->stats.collisions);
+
+ tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
+ TSI108_STAT_TXPAUSEDROP_CARRY,
+ &data->tx_pause_drop);
+
+ spin_unlock_irq(&data->misclock);
+}
+
+/* Read a stat counter atomically with respect to carries.
+ * data->misclock must be held.
+ */
+static inline unsigned long
+tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
+ int carry_shift, unsigned long *upper)
+{
+ int carryreg;
+ unsigned long val;
+
+ if (reg < 0xb0)
+ carryreg = TSI108_STAT_CARRY1;
+ else
+ carryreg = TSI108_STAT_CARRY2;
+
+ again:
+ val = TSI_READ(reg) | *upper;
+
+ /* Check to see if it overflowed, but the interrupt hasn't
+ * been serviced yet. If so, handle the carry here, and
+ * try again.
+ */
+
+ if (unlikely(TSI_READ(carryreg) & carry_bit)) {
+ *upper += carry_shift;
+ TSI_WRITE(carryreg, carry_bit);
+ goto again;
+ }
+
+ return val;
+}
+
+static struct net_device_stats *tsi108_get_stats(struct net_device *dev)
+{
+ unsigned long excol;
+
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ spin_lock_irq(&data->misclock);
+
+ data->tmpstats.rx_packets =
+ tsi108_read_stat(data, TSI108_STAT_RXPKTS,
+ TSI108_STAT_CARRY1_RXPKTS,
+ TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
+
+ data->tmpstats.tx_packets =
+ tsi108_read_stat(data, TSI108_STAT_TXPKTS,
+ TSI108_STAT_CARRY2_TXPKTS,
+ TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
+
+ data->tmpstats.rx_bytes =
+ tsi108_read_stat(data, TSI108_STAT_RXBYTES,
+ TSI108_STAT_CARRY1_RXBYTES,
+ TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+ data->tmpstats.tx_bytes =
+ tsi108_read_stat(data, TSI108_STAT_TXBYTES,
+ TSI108_STAT_CARRY2_TXBYTES,
+ TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+ data->tmpstats.multicast =
+ tsi108_read_stat(data, TSI108_STAT_RXMCAST,
+ TSI108_STAT_CARRY1_RXMCAST,
+ TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
+
+ excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
+ TSI108_STAT_CARRY2_TXEXCOL,
+ TSI108_STAT_TXEXCOL_CARRY,
+ &data->tx_coll_abort);
+
+ data->tmpstats.collisions =
+ tsi108_read_stat(data, TSI108_STAT_TXTCOL,
+ TSI108_STAT_CARRY2_TXTCOL,
+ TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
+
+ data->tmpstats.collisions += excol;
+
+ data->tmpstats.rx_length_errors =
+ tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
+ TSI108_STAT_CARRY1_RXLENGTH,
+ TSI108_STAT_RXLENGTH_CARRY,
+ &data->stats.rx_length_errors);
+
+ data->tmpstats.rx_length_errors +=
+ tsi108_read_stat(data, TSI108_STAT_RXRUNT,
+ TSI108_STAT_CARRY1_RXRUNT,
+ TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+ data->tmpstats.rx_length_errors +=
+ tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
+ TSI108_STAT_CARRY1_RXJUMBO,
+ TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+ data->tmpstats.rx_frame_errors =
+ tsi108_read_stat(data, TSI108_STAT_RXALIGN,
+ TSI108_STAT_CARRY1_RXALIGN,
+ TSI108_STAT_RXALIGN_CARRY,
+ &data->stats.rx_frame_errors);
+
+ data->tmpstats.rx_frame_errors +=
+ tsi108_read_stat(data, TSI108_STAT_RXFCS,
+ TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
+ &data->rx_fcs);
+
+ data->tmpstats.rx_frame_errors +=
+ tsi108_read_stat(data, TSI108_STAT_RXFRAG,
+ TSI108_STAT_CARRY1_RXFRAG,
+ TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+ data->tmpstats.rx_missed_errors =
+ tsi108_read_stat(data, TSI108_STAT_RXDROP,
+ TSI108_STAT_CARRY1_RXDROP,
+ TSI108_STAT_RXDROP_CARRY,
+ &data->stats.rx_missed_errors);
+
+ /* These three are maintained by software. */
+ data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
+ data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
+
+ data->tmpstats.tx_aborted_errors =
+ tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
+ TSI108_STAT_CARRY2_TXEXDEF,
+ TSI108_STAT_TXEXDEF_CARRY,
+ &data->stats.tx_aborted_errors);
+
+ data->tmpstats.tx_aborted_errors +=
+ tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
+ TSI108_STAT_CARRY2_TXPAUSE,
+ TSI108_STAT_TXPAUSEDROP_CARRY,
+ &data->tx_pause_drop);
+
+ data->tmpstats.tx_aborted_errors += excol;
+
+ data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
+ data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
+ data->tmpstats.rx_crc_errors +
+ data->tmpstats.rx_frame_errors +
+ data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
+
+ spin_unlock_irq(&data->misclock);
+ return &data->tmpstats;
+}
+
+static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
+{
+ TSI_WRITE(TSI108_EC_RXQ_PTRHIGH,
+ TSI108_EC_RXQ_PTRHIGH_VALID);
+
+ TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
+ | TSI108_EC_RXCTRL_QUEUE0);
+}
+
+static void tsi108_restart_tx(struct tsi108_prv_data * data)
+{
+ TSI_WRITE(TSI108_EC_TXQ_PTRHIGH,
+ TSI108_EC_TXQ_PTRHIGH_VALID);
+
+ TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
+ TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
+}
+
+/* txlock must be held by caller, with IRQs disabled, and
+ * with permission to re-enable them when the lock is dropped.
+ */
+static void tsi108_complete_tx(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ int tx;
+ struct sk_buff *skb;
+ int release = 0;
+
+ while (!data->txfree || data->txhead != data->txtail) {
+ tx = data->txtail;
+
+ if (data->txring[tx].misc & TSI108_TX_OWN)
+ break;
+
+ skb = data->txskbs[tx];
+
+ if (!(data->txring[tx].misc & TSI108_TX_OK))
+ printk("%s: bad tx packet, misc %x\n",
+ dev->name, data->txring[tx].misc);
+
+ data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+ data->txfree++;
+
+ if (data->txring[tx].misc & TSI108_TX_EOF) {
+ dev_kfree_skb_any(skb);
+ release++;
+ }
+ }
+
+ if (release) {
+ if (is_valid_ether_addr(dev->dev_addr) && data->link_up)
+ netif_wake_queue(dev);
+ }
+}
+
+static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ int frags = skb_shinfo(skb)->nr_frags + 1;
+ int i;
+
+ if (!data->phy_ok && net_ratelimit())
+ printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
+
+ if (!data->link_up) {
+ printk(KERN_ERR "%s: Transmit while link is down!\n",
+ dev->name);
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (data->txfree < MAX_SKB_FRAGS + 1) {
+ netif_stop_queue(dev);
+
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: Transmit with full tx ring!\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
+ netif_stop_queue(dev);
+ }
+
+ spin_lock_irq(&data->txlock);
+
+ for (i = 0; i < frags; i++) {
+ int misc = 0;
+ int tx = data->txhead;
+
+ /* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
+ * the interrupt bit. TX descriptor-complete interrupts are
+ * enabled when the queue fills up, and masked when there is
+ * still free space. This way, when saturating the outbound
+ * link, the tx interrupts are kept to a reasonable level.
+ * When the queue is not full, reclamation of skbs still occurs
+ * as new packets are transmitted, or on a queue-empty
+ * interrupt.
+ */
+
+ if ((tx % TSI108_TX_INT_FREQ == 0) &&
+ ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
+ misc = TSI108_TX_INT;
+
+ data->txskbs[tx] = skb;
+
+ if (i == 0) {
+ data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
+ skb->len - skb->data_len, DMA_TO_DEVICE);
+ data->txring[tx].len = skb->len - skb->data_len;
+ misc |= TSI108_TX_SOF;
+ } else {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ data->txring[tx].buf0 =
+ dma_map_page(NULL, frag->page, frag->page_offset,
+ frag->size, DMA_TO_DEVICE);
+ data->txring[tx].len = frag->size;
+ }
+
+ if (i == frags - 1)
+ misc |= TSI108_TX_EOF;
+
+ if (netif_msg_pktdata(data)) {
+ int i;
+ printk("%s: Tx Frame contents (%d)\n", dev->name,
+ skb->len);
+ for (i = 0; i < skb->len; i++)
+ printk(" %2.2x", skb->data[i]);
+ printk(".\n");
+ }
+ data->txring[tx].misc = misc | TSI108_TX_OWN;
+
+ data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
+ data->txfree--;
+ }
+
+ tsi108_complete_tx(dev);
+
+ /* This must be done after the check for completed tx descriptors,
+ * so that the tail pointer is correct.
+ */
+
+ if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
+ tsi108_restart_tx(data);
+
+ spin_unlock_irq(&data->txlock);
+ return NETDEV_TX_OK;
+}
+
+static int tsi108_complete_rx(struct net_device *dev, int budget)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ int done = 0;
+
+ while (data->rxfree && done != budget) {
+ int rx = data->rxtail;
+ struct sk_buff *skb;
+
+ if (data->rxring[rx].misc & TSI108_RX_OWN)
+ break;
+
+ skb = data->rxskbs[rx];
+ data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+ data->rxfree--;
+ done++;
+
+ if (data->rxring[rx].misc & TSI108_RX_BAD) {
+ spin_lock_irq(&data->misclock);
+
+ if (data->rxring[rx].misc & TSI108_RX_CRC)
+ data->stats.rx_crc_errors++;
+ if (data->rxring[rx].misc & TSI108_RX_OVER)
+ data->stats.rx_fifo_errors++;
+
+ spin_unlock_irq(&data->misclock);
+
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+ if (netif_msg_pktdata(data)) {
+ int i;
+ printk("%s: Rx Frame contents (%d)\n",
+ dev->name, data->rxring[rx].len);
+ for (i = 0; i < data->rxring[rx].len; i++)
+ printk(" %2.2x", skb->data[i]);
+ printk(".\n");
+ }
+
+ skb->dev = dev;
+ skb_put(skb, data->rxring[rx].len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ dev->last_rx = jiffies;
+ }
+
+ return done;
+}
+
+static int tsi108_refill_rx(struct net_device *dev, int budget)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ int done = 0;
+
+ while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
+ int rx = data->rxhead;
+ struct sk_buff *skb;
+
+ data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2);
+ if (!skb)
+ break;
+
+ skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
+
+ data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
+ TSI108_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+
+ /* Sometimes the hardware sets blen to zero after packet
+ * reception, even though the manual says that it's only ever
+ * modified by the driver.
+ */
+
+ data->rxring[rx].blen = TSI108_RX_SKB_SIZE;
+ data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
+
+ data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
+ data->rxfree++;
+ done++;
+ }
+
+ if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) &
+ TSI108_EC_RXSTAT_QUEUE0))
+ tsi108_restart_rx(data, dev);
+
+ return done;
+}
+
+static int tsi108_poll(struct net_device *dev, int *budget)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 estat = TSI_READ(TSI108_EC_RXESTAT);
+ u32 intstat = TSI_READ(TSI108_EC_INTSTAT);
+ int total_budget = min(*budget, dev->quota);
+ int num_received = 0, num_filled = 0, budget_used;
+
+ intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+ TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
+
+ TSI_WRITE(TSI108_EC_RXESTAT, estat);
+ TSI_WRITE(TSI108_EC_INTSTAT, intstat);
+
+ if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
+ num_received = tsi108_complete_rx(dev, total_budget);
+
+ /* This should normally fill no more slots than the number of
+ * packets received in tsi108_complete_rx(). The exception
+ * is when we previously ran out of memory for RX SKBs. In that
+ * case, it's helpful to obey the budget, not only so that the
+ * CPU isn't hogged, but so that memory (which may still be low)
+ * is not hogged by one device.
+ *
+ * A work unit is considered to be two SKBs to allow us to catch
+ * up when the ring has shrunk due to out-of-memory but we're
+ * still removing the full budget's worth of packets each time.
+ */
+
+ if (data->rxfree < TSI108_RXRING_LEN)
+ num_filled = tsi108_refill_rx(dev, total_budget * 2);
+
+ if (intstat & TSI108_INT_RXERROR) {
+ u32 err = TSI_READ(TSI108_EC_RXERR);
+ TSI_WRITE(TSI108_EC_RXERR, err);
+
+ if (err) {
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: RX error %x\n",
+ dev->name, err);
+
+ if (!(TSI_READ(TSI108_EC_RXSTAT) &
+ TSI108_EC_RXSTAT_QUEUE0))
+ tsi108_restart_rx(data, dev);
+ }
+ }
+
+ if (intstat & TSI108_INT_RXOVERRUN) {
+ spin_lock_irq(&data->misclock);
+ data->stats.rx_fifo_errors++;
+ spin_unlock_irq(&data->misclock);
+ }
+
+ budget_used = max(num_received, num_filled / 2);
+
+ *budget -= budget_used;
+ dev->quota -= budget_used;
+
+ if (budget_used != total_budget) {
+ data->rxpending = 0;
+ netif_rx_complete(dev);
+
+ TSI_WRITE(TSI108_EC_INTMASK,
+ TSI_READ(TSI108_EC_INTMASK)
+ & ~(TSI108_INT_RXQUEUE0
+ | TSI108_INT_RXTHRESH |
+ TSI108_INT_RXOVERRUN |
+ TSI108_INT_RXERROR |
+ TSI108_INT_RXWAIT));
+
+ /* IRQs are level-triggered, so no need to re-check */
+ return 0;
+ } else {
+ data->rxpending = 1;
+ }
+
+ return 1;
+}
+
+static void tsi108_rx_int(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+
+ /* A race could cause dev to already be scheduled, so it's not an
+ * error if that happens (and interrupts shouldn't be re-masked,
+ * because that can cause harmful races, if poll has already
+ * unmasked them but not cleared LINK_STATE_SCHED).
+ *
+ * This can happen if this code races with tsi108_poll(), which masks
+ * the interrupts after tsi108_irq_one() read the mask, but before
+ * netif_rx_schedule is called. It could also happen due to calls
+ * from tsi108_check_rxring().
+ */
+
+ if (netif_rx_schedule_prep(dev)) {
+ /* Mask, rather than ack, the receive interrupts. The ack
+ * will happen in tsi108_poll().
+ */
+
+ TSI_WRITE(TSI108_EC_INTMASK,
+ TSI_READ(TSI108_EC_INTMASK) |
+ TSI108_INT_RXQUEUE0
+ | TSI108_INT_RXTHRESH |
+ TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
+ TSI108_INT_RXWAIT);
+ __netif_rx_schedule(dev);
+ } else {
+ if (!netif_running(dev)) {
+ /* This can happen if an interrupt occurs while the
+ * interface is being brought down, as the START
+ * bit is cleared before the stop function is called.
+ *
+ * In this case, the interrupts must be masked, or
+ * they will continue indefinitely.
+ *
+ * There's a race here if the interface is brought down
+ * and then up in rapid succession, as the device could
+ * be made running after the above check and before
+ * the masking below. This will only happen if the IRQ
+ * thread has a lower priority than the task brining
+ * up the interface. Fixing this race would likely
+ * require changes in generic code.
+ */
+
+ TSI_WRITE(TSI108_EC_INTMASK,
+ TSI_READ
+ (TSI108_EC_INTMASK) |
+ TSI108_INT_RXQUEUE0 |
+ TSI108_INT_RXTHRESH |
+ TSI108_INT_RXOVERRUN |
+ TSI108_INT_RXERROR |
+ TSI108_INT_RXWAIT);
+ }
+ }
+}
+
+/* If the RX ring has run out of memory, try periodically
+ * to allocate some more, as otherwise poll would never
+ * get called (apart from the initial end-of-queue condition).
+ *
+ * This is called once per second (by default) from the thread.
+ */
+
+static void tsi108_check_rxring(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+
+ /* A poll is scheduled, as opposed to caling tsi108_refill_rx
+ * directly, so as to keep the receive path single-threaded
+ * (and thus not needing a lock).
+ */
+
+ if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
+ tsi108_rx_int(dev);
+}
+
+static void tsi108_tx_int(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 estat = TSI_READ(TSI108_EC_TXESTAT);
+
+ TSI_WRITE(TSI108_EC_TXESTAT, estat);
+ TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
+ TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
+ if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
+ u32 err = TSI_READ(TSI108_EC_TXERR);
+ TSI_WRITE(TSI108_EC_TXERR, err);
+
+ if (err && net_ratelimit())
+ printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
+ }
+
+ if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
+ spin_lock(&data->txlock);
+ tsi108_complete_tx(dev);
+ spin_unlock(&data->txlock);
+ }
+}
+
+
+static irqreturn_t tsi108_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 stat = TSI_READ(TSI108_EC_INTSTAT);
+
+ if (!(stat & TSI108_INT_ANY))
+ return IRQ_NONE; /* Not our interrupt */
+
+ stat &= ~TSI_READ(TSI108_EC_INTMASK);
+
+ if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
+ TSI108_INT_TXERROR))
+ tsi108_tx_int(dev);
+ if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+ TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
+ TSI108_INT_RXERROR))
+ tsi108_rx_int(dev);
+
+ if (stat & TSI108_INT_SFN) {
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: SFN error\n", dev->name);
+ TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN);
+ }
+
+ if (stat & TSI108_INT_STATCARRY) {
+ tsi108_stat_carry(dev);
+ TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void tsi108_stop_ethernet(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ int i = 1000;
+ /* Disable all TX and RX queues ... */
+ TSI_WRITE(TSI108_EC_TXCTRL, 0);
+ TSI_WRITE(TSI108_EC_RXCTRL, 0);
+
+ /* ...and wait for them to become idle */
+ while(i--) {
+ if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE))
+ break;
+ udelay(10);
+ }
+ i = 1000;
+ while(i--){
+ if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE))
+ return;
+ udelay(10);
+ }
+ printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+}
+
+static void tsi108_reset_ether(struct tsi108_prv_data * data)
+{
+ TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
+ udelay(100);
+ TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+ TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
+ udelay(100);
+ TSI_WRITE(TSI108_EC_PORTCTRL,
+ TSI_READ(TSI108_EC_PORTCTRL) &
+ ~TSI108_EC_PORTCTRL_STATRST);
+
+ TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
+ udelay(100);
+ TSI_WRITE(TSI108_EC_TXCFG,
+ TSI_READ(TSI108_EC_TXCFG) &
+ ~TSI108_EC_TXCFG_RST);
+
+ TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
+ udelay(100);
+ TSI_WRITE(TSI108_EC_RXCFG,
+ TSI_READ(TSI108_EC_RXCFG) &
+ ~TSI108_EC_RXCFG_RST);
+
+ TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
+ TSI_READ(TSI108_MAC_MII_MGMT_CFG) |
+ TSI108_MAC_MII_MGMT_RST);
+ udelay(100);
+ TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
+ (TSI_READ(TSI108_MAC_MII_MGMT_CFG) &
+ ~(TSI108_MAC_MII_MGMT_RST |
+ TSI108_MAC_MII_MGMT_CLK)) | 0x07);
+}
+
+static int tsi108_get_mac(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
+ u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
+
+ /* Note that the octets are reversed from what the manual says,
+ * producing an even weirder ordering...
+ */
+ if (word2 == 0 && word1 == 0) {
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x06;
+ dev->dev_addr[2] = 0xd2;
+ dev->dev_addr[3] = 0x00;
+ dev->dev_addr[4] = 0x00;
+ if (0x8 == data->phy)
+ dev->dev_addr[5] = 0x01;
+ else
+ dev->dev_addr[5] = 0x02;
+
+ word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+ word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+ (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+ TSI_WRITE(TSI108_MAC_ADDR1, word1);
+ TSI_WRITE(TSI108_MAC_ADDR2, word2);
+ } else {
+ dev->dev_addr[0] = (word2 >> 16) & 0xff;
+ dev->dev_addr[1] = (word2 >> 24) & 0xff;
+ dev->dev_addr[2] = (word1 >> 0) & 0xff;
+ dev->dev_addr[3] = (word1 >> 8) & 0xff;
+ dev->dev_addr[4] = (word1 >> 16) & 0xff;
+ dev->dev_addr[5] = (word1 >> 24) & 0xff;
+ }
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tsi108_set_mac(struct net_device *dev, void *addr)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 word1, word2;
+ int i;
+
+ if (!is_valid_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < 6; i++)
+ /* +2 is for the offset of the HW addr type */
+ dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+
+ word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+ word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+ (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+ spin_lock_irq(&data->misclock);
+ TSI_WRITE(TSI108_MAC_ADDR1, word1);
+ TSI_WRITE(TSI108_MAC_ADDR2, word2);
+ spin_lock(&data->txlock);
+
+ if (data->txfree && data->link_up)
+ netif_wake_queue(dev);
+
+ spin_unlock(&data->txlock);
+ spin_unlock_irq(&data->misclock);
+ return 0;
+}
+
+/* Protected by dev->xmit_lock. */
+static void tsi108_set_rx_mode(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 rxcfg = TSI_READ(TSI108_EC_RXCFG);
+
+ if (dev->flags & IFF_PROMISC) {
+ rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
+ rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
+ goto out;
+ }
+
+ rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
+
+ if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ int i;
+ struct dev_mc_list *mc = dev->mc_list;
+ rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
+
+ memset(data->mc_hash, 0, sizeof(data->mc_hash));
+
+ while (mc) {
+ u32 hash, crc;
+
+ if (mc->dmi_addrlen == 6) {
+ crc = ether_crc(6, mc->dmi_addr);
+ hash = crc >> 23;
+
+ __set_bit(hash, &data->mc_hash[0]);
+ } else {
+ printk(KERN_ERR
+ "%s: got multicast address of length %d "
+ "instead of 6.\n", dev->name,
+ mc->dmi_addrlen);
+ }
+
+ mc = mc->next;
+ }
+
+ TSI_WRITE(TSI108_EC_HASHADDR,
+ TSI108_EC_HASHADDR_AUTOINC |
+ TSI108_EC_HASHADDR_MCAST);
+
+ for (i = 0; i < 16; i++) {
+ /* The manual says that the hardware may drop
+ * back-to-back writes to the data register.
+ */
+ udelay(1);
+ TSI_WRITE(TSI108_EC_HASHDATA,
+ data->mc_hash[i]);
+ }
+ }
+
+ out:
+ TSI_WRITE(TSI108_EC_RXCFG, rxcfg);
+}
+
+static void tsi108_init_phy(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 i = 0;
+ u16 phyval = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&phy_lock, flags);
+
+ tsi108_write_mii(data, MII_BMCR, BMCR_RESET);
+ while (i--){
+ if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET))
+ break;
+ udelay(10);
+ }
+ if (i == 0)
+ printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+
+#if (TSI108_PHY_TYPE == PHY_BCM54XX) /* Broadcom BCM54xx PHY */
+ tsi108_write_mii(data, 0x09, 0x0300);
+ tsi108_write_mii(data, 0x10, 0x1020);
+ tsi108_write_mii(data, 0x1c, 0x8c00);
+#endif
+
+ tsi108_write_mii(data,
+ MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+ while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART)
+ cpu_relax();
+
+ /* Set G/MII mode and receive clock select in TBI control #2. The
+ * second port won't work if this isn't done, even though we don't
+ * use TBI mode.
+ */
+
+ tsi108_write_tbi(data, 0x11, 0x30);
+
+ /* FIXME: It seems to take more than 2 back-to-back reads to the
+ * PHY_STAT register before the link up status bit is set.
+ */
+
+ data->link_up = 1;
+
+ while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
+ BMSR_LSTATUS)) {
+ if (i++ > (MII_READ_DELAY / 10)) {
+ data->link_up = 0;
+ break;
+ }
+ spin_unlock_irqrestore(&phy_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&phy_lock, flags);
+ }
+
+ printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
+ data->phy_ok = 1;
+ data->init_media = 1;
+ spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static void tsi108_kill_phy(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phy_lock, flags);
+ tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN);
+ data->phy_ok = 0;
+ spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static int tsi108_open(struct net_device *dev)
+{
+ int i;
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
+ unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
+
+ i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
+ if (i != 0) {
+ printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
+ data->id, data->irq_num);
+ return i;
+ } else {
+ dev->irq = data->irq_num;
+ printk(KERN_NOTICE
+ "tsi108_open : Port %d Assigned IRQ %d to %s\n",
+ data->id, dev->irq, dev->name);
+ }
+
+ data->rxring = dma_alloc_coherent(NULL, rxring_size,
+ &data->rxdma, GFP_KERNEL);
+
+ if (!data->rxring) {
+ printk(KERN_DEBUG
+ "TSI108_ETH: failed to allocate memory for rxring!\n");
+ return -ENOMEM;
+ } else {
+ memset(data->rxring, 0, rxring_size);
+ }
+
+ data->txring = dma_alloc_coherent(NULL, txring_size,
+ &data->txdma, GFP_KERNEL);
+
+ if (!data->txring) {
+ printk(KERN_DEBUG
+ "TSI108_ETH: failed to allocate memory for txring!\n");
+ pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+ return -ENOMEM;
+ } else {
+ memset(data->txring, 0, txring_size);
+ }
+
+ for (i = 0; i < TSI108_RXRING_LEN; i++) {
+ data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
+ data->rxring[i].blen = TSI108_RXBUF_SIZE;
+ data->rxring[i].vlan = 0;
+ }
+
+ data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
+
+ data->rxtail = 0;
+ data->rxhead = 0;
+
+ for (i = 0; i < TSI108_RXRING_LEN; i++) {
+ struct sk_buff *skb = dev_alloc_skb(TSI108_RXBUF_SIZE + NET_IP_ALIGN);
+
+ if (!skb) {
+ /* Bah. No memory for now, but maybe we'll get
+ * some more later.
+ * For now, we'll live with the smaller ring.
+ */
+ printk(KERN_WARNING
+ "%s: Could only allocate %d receive skb(s).\n",
+ dev->name, i);
+ data->rxhead = i;
+ break;
+ }
+
+ data->rxskbs[i] = skb;
+ /* Align the payload on a 4-byte boundary */
+ skb_reserve(skb, 2);
+ data->rxskbs[i] = skb;
+ data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
+ data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
+ }
+
+ data->rxfree = i;
+ TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma);
+
+ for (i = 0; i < TSI108_TXRING_LEN; i++) {
+ data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
+ data->txring[i].misc = 0;
+ }
+
+ data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
+ data->txtail = 0;
+ data->txhead = 0;
+ data->txfree = TSI108_TXRING_LEN;
+ TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma);
+ tsi108_init_phy(dev);
+
+ setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
+ mod_timer(&data->timer, jiffies + 1);
+
+ tsi108_restart_rx(data, dev);
+
+ TSI_WRITE(TSI108_EC_INTSTAT, ~0);
+
+ TSI_WRITE(TSI108_EC_INTMASK,
+ ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
+ TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
+ TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
+ TSI108_INT_SFN | TSI108_INT_STATCARRY));
+
+ TSI_WRITE(TSI108_MAC_CFG1,
+ TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int tsi108_close(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ del_timer_sync(&data->timer);
+
+ tsi108_stop_ethernet(dev);
+ tsi108_kill_phy(dev);
+ TSI_WRITE(TSI108_EC_INTMASK, ~0);
+ TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+ /* Check for any pending TX packets, and drop them. */
+
+ while (!data->txfree || data->txhead != data->txtail) {
+ int tx = data->txtail;
+ struct sk_buff *skb;
+ skb = data->txskbs[tx];
+ data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+ data->txfree++;
+ dev_kfree_skb(skb);
+ }
+
+ synchronize_irq(data->irq_num);
+ free_irq(data->irq_num, dev);
+
+ /* Discard the RX ring. */
+
+ while (data->rxfree) {
+ int rx = data->rxtail;
+ struct sk_buff *skb;
+
+ skb = data->rxskbs[rx];
+ data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+ data->rxfree--;
+ dev_kfree_skb(skb);
+ }
+
+ dma_free_coherent(0,
+ TSI108_RXRING_LEN * sizeof(rx_desc),
+ data->rxring, data->rxdma);
+ dma_free_coherent(0,
+ TSI108_TXRING_LEN * sizeof(tx_desc),
+ data->txring, data->txdma);
+
+ return 0;
+}
+
+static void tsi108_init_mac(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+
+ TSI_WRITE(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
+ TSI108_MAC_CFG2_PADCRC);
+
+ TSI_WRITE(TSI108_EC_TXTHRESH,
+ (192 << TSI108_EC_TXTHRESH_STARTFILL) |
+ (192 << TSI108_EC_TXTHRESH_STOPFILL));
+
+ TSI_WRITE(TSI108_STAT_CARRYMASK1,
+ ~(TSI108_STAT_CARRY1_RXBYTES |
+ TSI108_STAT_CARRY1_RXPKTS |
+ TSI108_STAT_CARRY1_RXFCS |
+ TSI108_STAT_CARRY1_RXMCAST |
+ TSI108_STAT_CARRY1_RXALIGN |
+ TSI108_STAT_CARRY1_RXLENGTH |
+ TSI108_STAT_CARRY1_RXRUNT |
+ TSI108_STAT_CARRY1_RXJUMBO |
+ TSI108_STAT_CARRY1_RXFRAG |
+ TSI108_STAT_CARRY1_RXJABBER |
+ TSI108_STAT_CARRY1_RXDROP));
+
+ TSI_WRITE(TSI108_STAT_CARRYMASK2,
+ ~(TSI108_STAT_CARRY2_TXBYTES |
+ TSI108_STAT_CARRY2_TXPKTS |
+ TSI108_STAT_CARRY2_TXEXDEF |
+ TSI108_STAT_CARRY2_TXEXCOL |
+ TSI108_STAT_CARRY2_TXTCOL |
+ TSI108_STAT_CARRY2_TXPAUSE));
+
+ TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
+ TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+ TSI_WRITE(TSI108_EC_RXCFG,
+ TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
+
+ TSI_WRITE(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
+ TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
+ TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+ TSI108_EC_TXQ_CFG_SFNPORT));
+
+ TSI_WRITE(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
+ TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
+ TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+ TSI108_EC_RXQ_CFG_SFNPORT));
+
+ TSI_WRITE(TSI108_EC_TXQ_BUFCFG,
+ TSI108_EC_TXQ_BUFCFG_BURST256 |
+ TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+ TSI108_EC_TXQ_BUFCFG_SFNPORT));
+
+ TSI_WRITE(TSI108_EC_RXQ_BUFCFG,
+ TSI108_EC_RXQ_BUFCFG_BURST256 |
+ TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+ TSI108_EC_RXQ_BUFCFG_SFNPORT));
+
+ TSI_WRITE(TSI108_EC_INTMASK, ~0);
+}
+
+static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
+}
+
+static int
+tsi108_init_one(struct platform_device *pdev)
+{
+ struct net_device *dev = NULL;
+ struct tsi108_prv_data *data = NULL;
+ hw_info *einfo;
+ int err = 0;
+
+ einfo = pdev->dev.platform_data;
+
+ if (NULL == einfo) {
+ printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
+ pdev->id);
+ return -ENODEV;
+ }
+
+ /* Create an ethernet device instance */
+
+ dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
+ if (!dev) {
+ printk("tsi108_eth: Could not allocate a device structure\n");
+ return -ENOMEM;
+ }
+
+ printk("tsi108_eth%d: probe...\n", pdev->id);
+ data = netdev_priv(dev);
+
+ pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
+ pdev->id, einfo->regs, einfo->phyregs,
+ einfo->phy, einfo->irq_num);
+
+ data->regs = ioremap(einfo->regs, 0x400);
+ if (NULL == data->regs) {
+ err = -ENOMEM;
+ goto regs_fail;
+ }
+
+ data->phyregs = ioremap(einfo->phyregs, 0x400);
+ if (NULL == data->phyregs) {
+ err = -ENOMEM;
+ goto regs_fail;
+ }
+/* MII setup */
+ data->mii_if.dev = dev;
+ data->mii_if.mdio_read = tsi108_mdio_read;
+ data->mii_if.mdio_write = tsi108_mdio_write;
+ data->mii_if.phy_id = einfo->phy;
+ data->mii_if.phy_id_mask = 0x1f;
+ data->mii_if.reg_num_mask = 0x1f;
+ data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
+
+ data->phy = einfo->phy;
+ data->irq_num = einfo->irq_num;
+ data->id = pdev->id;
+ dev->open = tsi108_open;
+ dev->stop = tsi108_close;
+ dev->hard_start_xmit = tsi108_send_packet;
+ dev->set_mac_address = tsi108_set_mac;
+ dev->set_multicast_list = tsi108_set_rx_mode;
+ dev->get_stats = tsi108_get_stats;
+ dev->poll = tsi108_poll;
+ dev->do_ioctl = tsi108_do_ioctl;
+ dev->weight = 64; /* 64 is more suitable for GigE interface - klai */
+
+ /* Apparently, the Linux networking code won't use scatter-gather
+ * if the hardware doesn't do checksums. However, it's faster
+ * to checksum in place and use SG, as (among other reasons)
+ * the cache won't be dirtied (which then has to be flushed
+ * before DMA). The checksumming is done by the driver (via
+ * a new function skb_csum_dev() in net/core/skbuff.c).
+ */
+
+ dev->features = NETIF_F_HIGHDMA;
+ SET_MODULE_OWNER(dev);
+
+ spin_lock_init(&data->txlock);
+ spin_lock_init(&data->misclock);
+
+ tsi108_reset_ether(data);
+ tsi108_kill_phy(dev);
+
+ if ((err = tsi108_get_mac(dev)) != 0) {
+ printk(KERN_ERR "%s: Invalid MAC address. Please correct.\n",
+ dev->name);
+ goto register_fail;
+ }
+
+ tsi108_init_mac(dev);
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
+ dev->name);
+ goto register_fail;
+ }
+
+ printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+#ifdef DEBUG
+ data->msg_enable = DEBUG;
+ dump_eth_one(dev);
+#endif
+
+ return 0;
+
+register_fail:
+ iounmap(data->regs);
+ iounmap(data->phyregs);
+
+regs_fail:
+ free_netdev(dev);
+ return err;
+}
+
+/* There's no way to either get interrupts from the PHY when
+ * something changes, or to have the Tsi108 automatically communicate
+ * with the PHY to reconfigure itself.
+ *
+ * Thus, we have to do it using a timer.
+ */
+
+static void tsi108_timed_checker(unsigned long dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tsi108_prv_data *data = netdev_priv(dev);
+
+ tsi108_check_phy(dev);
+ tsi108_check_rxring(dev);
+ mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
+}
+
+static int tsi108_ether_init(void)
+{
+ int ret;
+ ret = platform_driver_register (&tsi_eth_driver);
+ if (ret < 0){
+ printk("tsi108_ether_init: error initializing ethernet "
+ "device\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int tsi108_ether_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct tsi108_prv_data *priv = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ tsi108_stop_ethernet(dev);
+ platform_set_drvdata(pdev, NULL);
+ iounmap(priv->regs);
+ iounmap(priv->phyregs);
+ free_netdev(dev);
+
+ return 0;
+}
+static void tsi108_ether_exit(void)
+{
+ platform_driver_unregister(&tsi_eth_driver);
+}
+
+module_init(tsi108_ether_init);
+module_exit(tsi108_ether_exit);
+
+MODULE_AUTHOR("Tundra Semiconductor Corporation");
+MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/tsi108_eth.h b/drivers/net/tsi108_eth.h
new file mode 100644
index 000000000000..77a769df228a
--- /dev/null
+++ b/drivers/net/tsi108_eth.h
@@ -0,0 +1,365 @@
+/*
+ * (C) Copyright 2005 Tundra Semiconductor Corp.
+ * Kong Lai, <kong.lai@tundra.com).
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * net/tsi108_eth.h - definitions for Tsi108 GIGE network controller.
+ */
+
+#ifndef __TSI108_ETH_H
+#define __TSI108_ETH_H
+
+#include <linux/types.h>
+
+#define TSI_WRITE(offset, val) \
+ out_be32((data->regs + (offset)), val)
+
+#define TSI_READ(offset) \
+ in_be32((data->regs + (offset)))
+
+#define TSI_WRITE_PHY(offset, val) \
+ out_be32((data->phyregs + (offset)), val)
+
+#define TSI_READ_PHY(offset) \
+ in_be32((data->phyregs + (offset)))
+
+/*
+ * PHY Configuration Options
+ *
+ * NOTE: Enable set of definitions corresponding to your board type
+ */
+#define PHY_MV88E 1 /* Marvel 88Exxxx PHY */
+#define PHY_BCM54XX 2 /* Broardcom BCM54xx PHY */
+#define TSI108_PHY_TYPE PHY_MV88E
+
+/*
+ * TSI108 GIGE port registers
+ */
+
+#define TSI108_ETH_PORT_NUM 2
+#define TSI108_PBM_PORT 2
+#define TSI108_SDRAM_PORT 4
+
+#define TSI108_MAC_CFG1 (0x000)
+#define TSI108_MAC_CFG1_SOFTRST (1 << 31)
+#define TSI108_MAC_CFG1_LOOPBACK (1 << 8)
+#define TSI108_MAC_CFG1_RXEN (1 << 2)
+#define TSI108_MAC_CFG1_TXEN (1 << 0)
+
+#define TSI108_MAC_CFG2 (0x004)
+#define TSI108_MAC_CFG2_DFLT_PREAMBLE (7 << 12)
+#define TSI108_MAC_CFG2_IFACE_MASK (3 << 8)
+#define TSI108_MAC_CFG2_NOGIG (1 << 8)
+#define TSI108_MAC_CFG2_GIG (2 << 8)
+#define TSI108_MAC_CFG2_PADCRC (1 << 2)
+#define TSI108_MAC_CFG2_FULLDUPLEX (1 << 0)
+
+#define TSI108_MAC_MII_MGMT_CFG (0x020)
+#define TSI108_MAC_MII_MGMT_CLK (7 << 0)
+#define TSI108_MAC_MII_MGMT_RST (1 << 31)
+
+#define TSI108_MAC_MII_CMD (0x024)
+#define TSI108_MAC_MII_CMD_READ (1 << 0)
+
+#define TSI108_MAC_MII_ADDR (0x028)
+#define TSI108_MAC_MII_ADDR_REG 0
+#define TSI108_MAC_MII_ADDR_PHY 8
+
+#define TSI108_MAC_MII_DATAOUT (0x02c)
+#define TSI108_MAC_MII_DATAIN (0x030)
+
+#define TSI108_MAC_MII_IND (0x034)
+#define TSI108_MAC_MII_IND_NOTVALID (1 << 2)
+#define TSI108_MAC_MII_IND_SCANNING (1 << 1)
+#define TSI108_MAC_MII_IND_BUSY (1 << 0)
+
+#define TSI108_MAC_IFCTRL (0x038)
+#define TSI108_MAC_IFCTRL_PHYMODE (1 << 24)
+
+#define TSI108_MAC_ADDR1 (0x040)
+#define TSI108_MAC_ADDR2 (0x044)
+
+#define TSI108_STAT_RXBYTES (0x06c)
+#define TSI108_STAT_RXBYTES_CARRY (1 << 24)
+
+#define TSI108_STAT_RXPKTS (0x070)
+#define TSI108_STAT_RXPKTS_CARRY (1 << 18)
+
+#define TSI108_STAT_RXFCS (0x074)
+#define TSI108_STAT_RXFCS_CARRY (1 << 12)
+
+#define TSI108_STAT_RXMCAST (0x078)
+#define TSI108_STAT_RXMCAST_CARRY (1 << 18)
+
+#define TSI108_STAT_RXALIGN (0x08c)
+#define TSI108_STAT_RXALIGN_CARRY (1 << 12)
+
+#define TSI108_STAT_RXLENGTH (0x090)
+#define TSI108_STAT_RXLENGTH_CARRY (1 << 12)
+
+#define TSI108_STAT_RXRUNT (0x09c)
+#define TSI108_STAT_RXRUNT_CARRY (1 << 12)
+
+#define TSI108_STAT_RXJUMBO (0x0a0)
+#define TSI108_STAT_RXJUMBO_CARRY (1 << 12)
+
+#define TSI108_STAT_RXFRAG (0x0a4)
+#define TSI108_STAT_RXFRAG_CARRY (1 << 12)
+
+#define TSI108_STAT_RXJABBER (0x0a8)
+#define TSI108_STAT_RXJABBER_CARRY (1 << 12)
+
+#define TSI108_STAT_RXDROP (0x0ac)
+#define TSI108_STAT_RXDROP_CARRY (1 << 12)
+
+#define TSI108_STAT_TXBYTES (0x0b0)
+#define TSI108_STAT_TXBYTES_CARRY (1 << 24)
+
+#define TSI108_STAT_TXPKTS (0x0b4)
+#define TSI108_STAT_TXPKTS_CARRY (1 << 18)
+
+#define TSI108_STAT_TXEXDEF (0x0c8)
+#define TSI108_STAT_TXEXDEF_CARRY (1 << 12)
+
+#define TSI108_STAT_TXEXCOL (0x0d8)
+#define TSI108_STAT_TXEXCOL_CARRY (1 << 12)
+
+#define TSI108_STAT_TXTCOL (0x0dc)
+#define TSI108_STAT_TXTCOL_CARRY (1 << 13)
+
+#define TSI108_STAT_TXPAUSEDROP (0x0e4)
+#define TSI108_STAT_TXPAUSEDROP_CARRY (1 << 12)
+
+#define TSI108_STAT_CARRY1 (0x100)
+#define TSI108_STAT_CARRY1_RXBYTES (1 << 16)
+#define TSI108_STAT_CARRY1_RXPKTS (1 << 15)
+#define TSI108_STAT_CARRY1_RXFCS (1 << 14)
+#define TSI108_STAT_CARRY1_RXMCAST (1 << 13)
+#define TSI108_STAT_CARRY1_RXALIGN (1 << 8)
+#define TSI108_STAT_CARRY1_RXLENGTH (1 << 7)
+#define TSI108_STAT_CARRY1_RXRUNT (1 << 4)
+#define TSI108_STAT_CARRY1_RXJUMBO (1 << 3)
+#define TSI108_STAT_CARRY1_RXFRAG (1 << 2)
+#define TSI108_STAT_CARRY1_RXJABBER (1 << 1)
+#define TSI108_STAT_CARRY1_RXDROP (1 << 0)
+
+#define TSI108_STAT_CARRY2 (0x104)
+#define TSI108_STAT_CARRY2_TXBYTES (1 << 13)
+#define TSI108_STAT_CARRY2_TXPKTS (1 << 12)
+#define TSI108_STAT_CARRY2_TXEXDEF (1 << 7)
+#define TSI108_STAT_CARRY2_TXEXCOL (1 << 3)
+#define TSI108_STAT_CARRY2_TXTCOL (1 << 2)
+#define TSI108_STAT_CARRY2_TXPAUSE (1 << 0)
+
+#define TSI108_STAT_CARRYMASK1 (0x108)
+#define TSI108_STAT_CARRYMASK2 (0x10c)
+
+#define TSI108_EC_PORTCTRL (0x200)
+#define TSI108_EC_PORTCTRL_STATRST (1 << 31)
+#define TSI108_EC_PORTCTRL_STATEN (1 << 28)
+#define TSI108_EC_PORTCTRL_NOGIG (1 << 18)
+#define TSI108_EC_PORTCTRL_HALFDUPLEX (1 << 16)
+
+#define TSI108_EC_INTSTAT (0x204)
+#define TSI108_EC_INTMASK (0x208)
+
+#define TSI108_INT_ANY (1 << 31)
+#define TSI108_INT_SFN (1 << 30)
+#define TSI108_INT_RXIDLE (1 << 29)
+#define TSI108_INT_RXABORT (1 << 28)
+#define TSI108_INT_RXERROR (1 << 27)
+#define TSI108_INT_RXOVERRUN (1 << 26)
+#define TSI108_INT_RXTHRESH (1 << 25)
+#define TSI108_INT_RXWAIT (1 << 24)
+#define TSI108_INT_RXQUEUE0 (1 << 16)
+#define TSI108_INT_STATCARRY (1 << 15)
+#define TSI108_INT_TXIDLE (1 << 13)
+#define TSI108_INT_TXABORT (1 << 12)
+#define TSI108_INT_TXERROR (1 << 11)
+#define TSI108_INT_TXUNDERRUN (1 << 10)
+#define TSI108_INT_TXTHRESH (1 << 9)
+#define TSI108_INT_TXWAIT (1 << 8)
+#define TSI108_INT_TXQUEUE0 (1 << 0)
+
+#define TSI108_EC_TXCFG (0x220)
+#define TSI108_EC_TXCFG_RST (1 << 31)
+
+#define TSI108_EC_TXCTRL (0x224)
+#define TSI108_EC_TXCTRL_IDLEINT (1 << 31)
+#define TSI108_EC_TXCTRL_ABORT (1 << 30)
+#define TSI108_EC_TXCTRL_GO (1 << 15)
+#define TSI108_EC_TXCTRL_QUEUE0 (1 << 0)
+
+#define TSI108_EC_TXSTAT (0x228)
+#define TSI108_EC_TXSTAT_ACTIVE (1 << 15)
+#define TSI108_EC_TXSTAT_QUEUE0 (1 << 0)
+
+#define TSI108_EC_TXESTAT (0x22c)
+#define TSI108_EC_TXESTAT_Q0_ERR (1 << 24)
+#define TSI108_EC_TXESTAT_Q0_DESCINT (1 << 16)
+#define TSI108_EC_TXESTAT_Q0_EOF (1 << 8)
+#define TSI108_EC_TXESTAT_Q0_EOQ (1 << 0)
+
+#define TSI108_EC_TXERR (0x278)
+
+#define TSI108_EC_TXQ_CFG (0x280)
+#define TSI108_EC_TXQ_CFG_DESC_INT (1 << 20)
+#define TSI108_EC_TXQ_CFG_EOQ_OWN_INT (1 << 19)
+#define TSI108_EC_TXQ_CFG_WSWP (1 << 11)
+#define TSI108_EC_TXQ_CFG_BSWP (1 << 10)
+#define TSI108_EC_TXQ_CFG_SFNPORT 0
+
+#define TSI108_EC_TXQ_BUFCFG (0x284)
+#define TSI108_EC_TXQ_BUFCFG_BURST8 (0 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST32 (1 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST128 (2 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST256 (3 << 8)
+#define TSI108_EC_TXQ_BUFCFG_WSWP (1 << 11)
+#define TSI108_EC_TXQ_BUFCFG_BSWP (1 << 10)
+#define TSI108_EC_TXQ_BUFCFG_SFNPORT 0
+
+#define TSI108_EC_TXQ_PTRLOW (0x288)
+
+#define TSI108_EC_TXQ_PTRHIGH (0x28c)
+#define TSI108_EC_TXQ_PTRHIGH_VALID (1 << 31)
+
+#define TSI108_EC_TXTHRESH (0x230)
+#define TSI108_EC_TXTHRESH_STARTFILL 0
+#define TSI108_EC_TXTHRESH_STOPFILL 16
+
+#define TSI108_EC_RXCFG (0x320)
+#define TSI108_EC_RXCFG_RST (1 << 31)
+
+#define TSI108_EC_RXSTAT (0x328)
+#define TSI108_EC_RXSTAT_ACTIVE (1 << 15)
+#define TSI108_EC_RXSTAT_QUEUE0 (1 << 0)
+
+#define TSI108_EC_RXESTAT (0x32c)
+#define TSI108_EC_RXESTAT_Q0_ERR (1 << 24)
+#define TSI108_EC_RXESTAT_Q0_DESCINT (1 << 16)
+#define TSI108_EC_RXESTAT_Q0_EOF (1 << 8)
+#define TSI108_EC_RXESTAT_Q0_EOQ (1 << 0)
+
+#define TSI108_EC_HASHADDR (0x360)
+#define TSI108_EC_HASHADDR_AUTOINC (1 << 31)
+#define TSI108_EC_HASHADDR_DO1STREAD (1 << 30)
+#define TSI108_EC_HASHADDR_UNICAST (0 << 4)
+#define TSI108_EC_HASHADDR_MCAST (1 << 4)
+
+#define TSI108_EC_HASHDATA (0x364)
+
+#define TSI108_EC_RXQ_PTRLOW (0x388)
+
+#define TSI108_EC_RXQ_PTRHIGH (0x38c)
+#define TSI108_EC_RXQ_PTRHIGH_VALID (1 << 31)
+
+/* Station Enable -- accept packets destined for us */
+#define TSI108_EC_RXCFG_SE (1 << 13)
+/* Unicast Frame Enable -- for packets not destined for us */
+#define TSI108_EC_RXCFG_UFE (1 << 12)
+/* Multicast Frame Enable */
+#define TSI108_EC_RXCFG_MFE (1 << 11)
+/* Broadcast Frame Enable */
+#define TSI108_EC_RXCFG_BFE (1 << 10)
+#define TSI108_EC_RXCFG_UC_HASH (1 << 9)
+#define TSI108_EC_RXCFG_MC_HASH (1 << 8)
+
+#define TSI108_EC_RXQ_CFG (0x380)
+#define TSI108_EC_RXQ_CFG_DESC_INT (1 << 20)
+#define TSI108_EC_RXQ_CFG_EOQ_OWN_INT (1 << 19)
+#define TSI108_EC_RXQ_CFG_WSWP (1 << 11)
+#define TSI108_EC_RXQ_CFG_BSWP (1 << 10)
+#define TSI108_EC_RXQ_CFG_SFNPORT 0
+
+#define TSI108_EC_RXQ_BUFCFG (0x384)
+#define TSI108_EC_RXQ_BUFCFG_BURST8 (0 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST32 (1 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST128 (2 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST256 (3 << 8)
+#define TSI108_EC_RXQ_BUFCFG_WSWP (1 << 11)
+#define TSI108_EC_RXQ_BUFCFG_BSWP (1 << 10)
+#define TSI108_EC_RXQ_BUFCFG_SFNPORT 0
+
+#define TSI108_EC_RXCTRL (0x324)
+#define TSI108_EC_RXCTRL_ABORT (1 << 30)
+#define TSI108_EC_RXCTRL_GO (1 << 15)
+#define TSI108_EC_RXCTRL_QUEUE0 (1 << 0)
+
+#define TSI108_EC_RXERR (0x378)
+
+#define TSI108_TX_EOF (1 << 0) /* End of frame; last fragment of packet */
+#define TSI108_TX_SOF (1 << 1) /* Start of frame; first frag. of packet */
+#define TSI108_TX_VLAN (1 << 2) /* Per-frame VLAN: enables VLAN override */
+#define TSI108_TX_HUGE (1 << 3) /* Huge frame enable */
+#define TSI108_TX_PAD (1 << 4) /* Pad the packet if too short */
+#define TSI108_TX_CRC (1 << 5) /* Generate CRC for this packet */
+#define TSI108_TX_INT (1 << 14) /* Generate an IRQ after frag. processed */
+#define TSI108_TX_RETRY (0xf << 16) /* 4 bit field indicating num. of retries */
+#define TSI108_TX_COL (1 << 20) /* Set if a collision occured */
+#define TSI108_TX_LCOL (1 << 24) /* Set if a late collision occured */
+#define TSI108_TX_UNDER (1 << 25) /* Set if a FIFO underrun occured */
+#define TSI108_TX_RLIM (1 << 26) /* Set if the retry limit was reached */
+#define TSI108_TX_OK (1 << 30) /* Set if the frame TX was successful */
+#define TSI108_TX_OWN (1 << 31) /* Set if the device owns the descriptor */
+
+/* Note: the descriptor layouts assume big-endian byte order. */
+typedef struct {
+ u32 buf0;
+ u32 buf1; /* Base address of buffer */
+ u32 next0; /* Address of next descriptor, if any */
+ u32 next1;
+ u16 vlan; /* VLAN, if override enabled for this packet */
+ u16 len; /* Length of buffer in bytes */
+ u32 misc; /* See TSI108_TX_* above */
+ u32 reserved0; /*reserved0 and reserved1 are added to make the desc */
+ u32 reserved1; /* 32-byte aligned */
+} __attribute__ ((aligned(32))) tx_desc;
+
+#define TSI108_RX_EOF (1 << 0) /* End of frame; last fragment of packet */
+#define TSI108_RX_SOF (1 << 1) /* Start of frame; first frag. of packet */
+#define TSI108_RX_VLAN (1 << 2) /* Set on SOF if packet has a VLAN */
+#define TSI108_RX_FTYPE (1 << 3) /* Length/Type field is type, not length */
+#define TSI108_RX_RUNT (1 << 4)/* Packet is less than minimum size */
+#define TSI108_RX_HASH (1 << 7)/* Hash table match */
+#define TSI108_RX_BAD (1 << 8) /* Bad frame */
+#define TSI108_RX_OVER (1 << 9) /* FIFO overrun occured */
+#define TSI108_RX_TRUNC (1 << 11) /* Packet truncated due to excess length */
+#define TSI108_RX_CRC (1 << 12) /* Packet had a CRC error */
+#define TSI108_RX_INT (1 << 13) /* Generate an IRQ after frag. processed */
+#define TSI108_RX_OWN (1 << 15) /* Set if the device owns the descriptor */
+
+#define TSI108_RX_SKB_SIZE 1536 /* The RX skb length */
+
+typedef struct {
+ u32 buf0; /* Base address of buffer */
+ u32 buf1; /* Base address of buffer */
+ u32 next0; /* Address of next descriptor, if any */
+ u32 next1; /* Address of next descriptor, if any */
+ u16 vlan; /* VLAN of received packet, first frag only */
+ u16 len; /* Length of received fragment in bytes */
+ u16 blen; /* Length of buffer in bytes */
+ u16 misc; /* See TSI108_RX_* above */
+ u32 reserved0; /* reserved0 and reserved1 are added to make the desc */
+ u32 reserved1; /* 32-byte aligned */
+} __attribute__ ((aligned(32))) rx_desc;
+
+#endif /* __TSI108_ETH_H */
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index f6b3a94e97bf..9d67f11422ec 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1906,9 +1906,7 @@ fill_defaults:
de->media[i].csr15 = t21041_csr15[i];
}
- de->ee_data = kmalloc(DE_EEPROM_SIZE, GFP_KERNEL);
- if (de->ee_data)
- memcpy(de->ee_data, &ee_data[0], DE_EEPROM_SIZE);
+ de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
return;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 4dd8a0bae860..7f59a3d4fda2 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -187,7 +187,7 @@ struct rx_desc {
struct dmfe_board_info {
u32 chip_id; /* Chip vendor/Device ID */
u32 chip_revision; /* Chip revision */
- struct DEVICE *next_dev; /* next device */
+ struct DEVICE *dev; /* net device */
struct pci_dev *pdev; /* PCI device */
spinlock_t lock;
@@ -399,6 +399,8 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
/* Init system & device */
db = netdev_priv(dev);
+ db->dev = dev;
+
/* Allocate Tx/Rx descriptor memory */
db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
@@ -426,6 +428,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
dev->poll_controller = &poll_dmfe;
#endif
dev->ethtool_ops = &netdev_ethtool_ops;
+ netif_carrier_off(db->dev);
spin_lock_init(&db->lock);
pci_read_config_dword(pdev, 0x50, &pci_pmr);
@@ -1050,6 +1053,7 @@ static void netdev_get_drvinfo(struct net_device *dev,
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
+ .get_link = ethtool_op_get_link,
};
/*
@@ -1144,6 +1148,7 @@ static void dmfe_timer(unsigned long data)
/* Link Failed */
DMFE_DBUG(0, "Link Failed", tmp_cr12);
db->link_failed = 1;
+ netif_carrier_off(db->dev);
/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
/* AUTO or force 1M Homerun/Longrun don't need */
@@ -1166,6 +1171,8 @@ static void dmfe_timer(unsigned long data)
if ( (db->media_mode & DMFE_AUTO) &&
dmfe_sense_speed(db) )
db->link_failed = 1;
+ else
+ netif_carrier_on(db->dev);
dmfe_process_mode(db);
/* SHOW_MEDIA_TYPE(db->op_mode); */
}
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index b37888011067..1f05511fa390 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -30,7 +30,7 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
-#include <asm/of_device.h>
+#include <asm/of_platform.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
#include <asm/io.h>
@@ -4301,12 +4301,12 @@ static int __init ucc_geth_init(void)
memcpy(&(ugeth_info[i]), &ugeth_primary_info,
sizeof(ugeth_primary_info));
- return of_register_driver(&ucc_geth_driver);
+ return of_register_platform_driver(&ucc_geth_driver);
}
static void __exit ucc_geth_exit(void)
{
- of_unregister_driver(&ucc_geth_driver);
+ of_unregister_platform_driver(&ucc_geth_driver);
}
module_init(ucc_geth_init);
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index b5d0d7fb647a..d5ab9cf13257 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -57,44 +57,6 @@ config COSA
The driver will be compiled as a module: the
module will be called cosa.
-config DSCC4
- tristate "Etinc PCISYNC serial board support"
- depends on WAN && PCI && m
- help
- Driver for Etinc PCISYNC boards based on the Infineon (ex. Siemens)
- DSCC4 chipset.
-
- This is supposed to work with the four port card. Take a look at
- <http://www.cogenit.fr/dscc4/> for further information about the
- driver.
-
- To compile this driver as a module, choose M here: the
- module will be called dscc4.
-
-config DSCC4_PCISYNC
- bool "Etinc PCISYNC features"
- depends on DSCC4
- help
- Due to Etinc's design choice for its PCISYNC cards, some operations
- are only allowed on specific ports of the DSCC4. This option is the
- only way for the driver to know that it shouldn't return a success
- code for these operations.
-
- Please say Y if your card is an Etinc's PCISYNC.
-
-config DSCC4_PCI_RST
- bool "Hard reset support"
- depends on DSCC4
- help
- Various DSCC4 bugs forbid any reliable software reset of the ASIC.
- As a replacement, some vendors provide a way to assert the PCI #RST
- pin of DSCC4 through the GPIO port of the card. If you choose Y,
- the driver will make use of this feature before module removal
- (i.e. rmmod). The feature is known to be available on Commtech's
- cards. Contact your manufacturer for details.
-
- Say Y if your card supports this feature.
-
#
# Lan Media's board. Currently 1000, 1200, 5200, 5245
#
@@ -323,6 +285,44 @@ config FARSYNC
To compile this driver as a module, choose M here: the
module will be called farsync.
+config DSCC4
+ tristate "Etinc PCISYNC serial board support"
+ depends on HDLC && PCI && m
+ help
+ Driver for Etinc PCISYNC boards based on the Infineon (ex. Siemens)
+ DSCC4 chipset.
+
+ This is supposed to work with the four port card. Take a look at
+ <http://www.cogenit.fr/dscc4/> for further information about the
+ driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dscc4.
+
+config DSCC4_PCISYNC
+ bool "Etinc PCISYNC features"
+ depends on DSCC4
+ help
+ Due to Etinc's design choice for its PCISYNC cards, some operations
+ are only allowed on specific ports of the DSCC4. This option is the
+ only way for the driver to know that it shouldn't return a success
+ code for these operations.
+
+ Please say Y if your card is an Etinc's PCISYNC.
+
+config DSCC4_PCI_RST
+ bool "Hard reset support"
+ depends on DSCC4
+ help
+ Various DSCC4 bugs forbid any reliable software reset of the ASIC.
+ As a replacement, some vendors provide a way to assert the PCI #RST
+ pin of DSCC4 through the GPIO port of the card. If you choose Y,
+ the driver will make use of this feature before module removal
+ (i.e. rmmod). The feature is known to be available on Commtech's
+ cards. Contact your manufacturer for details.
+
+ Say Y if your card supports this feature.
+
config DLCI
tristate "Frame Relay DLCI support"
depends on WAN
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 0c07b8b7250d..10bcb48e80d0 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -595,7 +595,7 @@ static void atmel_join_bss(struct atmel_private *priv, int bss_index);
static void atmel_smooth_qual(struct atmel_private *priv);
static void atmel_writeAR(struct net_device *dev, u16 data);
static int probe_atmel_card(struct net_device *dev);
-static int reset_atmel_card(struct net_device *dev );
+static int reset_atmel_card(struct net_device *dev);
static void atmel_enter_state(struct atmel_private *priv, int new_state);
int atmel_open (struct net_device *dev);
@@ -784,11 +784,11 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast,
static int start_tx(struct sk_buff *skb, struct net_device *dev)
{
+ static const u8 SNAP_RFC1024[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
struct atmel_private *priv = netdev_priv(dev);
struct ieee80211_hdr_4addr header;
unsigned long flags;
u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
- u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
if (priv->card && priv->present_callback &&
!(*priv->present_callback)(priv->card)) {
@@ -1193,7 +1193,7 @@ static irqreturn_t service_interrupt(int irq, void *dev_id)
atmel_set_gcr(dev, GCR_ACKINT); /* acknowledge interrupt */
- for (i = 0; i < sizeof(irq_order)/sizeof(u8); i++)
+ for (i = 0; i < ARRAY_SIZE(irq_order); i++)
if (isr & irq_order[i])
break;
@@ -1345,10 +1345,10 @@ int atmel_open(struct net_device *dev)
atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS, priv->reg_domain);
} else {
priv->reg_domain = atmel_get_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS);
- for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++)
+ for (i = 0; i < ARRAY_SIZE(channel_table); i++)
if (priv->reg_domain == channel_table[i].reg_domain)
break;
- if (i == sizeof(channel_table)/sizeof(channel_table[0])) {
+ if (i == ARRAY_SIZE(channel_table)) {
priv->reg_domain = REG_DOMAIN_MKK1;
printk(KERN_ALERT "%s: failed to get regulatory domain: assuming MKK1.\n", dev->name);
}
@@ -1393,7 +1393,7 @@ static int atmel_validate_channel(struct atmel_private *priv, int channel)
else return suitable default channel */
int i;
- for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++)
+ for (i = 0; i < ARRAY_SIZE(channel_table); i++)
if (priv->reg_domain == channel_table[i].reg_domain) {
if (channel >= channel_table[i].min &&
channel <= channel_table[i].max)
@@ -1437,7 +1437,7 @@ static int atmel_proc_output (char *buf, struct atmel_private *priv)
}
r = "<unknown>";
- for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++)
+ for (i = 0; i < ARRAY_SIZE(channel_table); i++)
if (priv->reg_domain == channel_table[i].reg_domain)
r = channel_table[i].name;
@@ -1736,7 +1736,7 @@ static int atmel_set_encode(struct net_device *dev,
/* Disable the key */
priv->wep_key_len[index] = 0;
/* Check if the key is not marked as invalid */
- if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
+ if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
/* Cleanup */
memset(priv->wep_keys[index], 0, 13);
/* Copy the key in the driver */
@@ -1907,7 +1907,7 @@ static int atmel_get_encodeext(struct net_device *dev,
encoding->flags = idx + 1;
memset(ext, 0, sizeof(*ext));
-
+
if (!priv->wep_is_on) {
ext->alg = IW_ENCODE_ALG_NONE;
ext->key_len = 0;
@@ -2343,6 +2343,14 @@ static int atmel_get_scan(struct net_device *dev,
iwe.u.freq.e = 0;
current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_FREQ_LEN);
+ /* Add quality statistics */
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.level = priv->BSSinfo[i].RSSI;
+ iwe.u.qual.qual = iwe.u.qual.level;
+ /* iwe.u.qual.noise = SOMETHING */
+ current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA , &iwe, IW_EV_QUAL_LEN);
+
+
iwe.cmd = SIOCGIWENCODE;
if (priv->BSSinfo[i].UsingWEP)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
@@ -2373,7 +2381,7 @@ static int atmel_get_range(struct net_device *dev,
range->min_nwid = 0x0000;
range->max_nwid = 0x0000;
range->num_channels = 0;
- for (j = 0; j < sizeof(channel_table)/sizeof(channel_table[0]); j++)
+ for (j = 0; j < ARRAY_SIZE(channel_table); j++)
if (priv->reg_domain == channel_table[j].reg_domain) {
range->num_channels = channel_table[j].max - channel_table[j].min + 1;
break;
@@ -2579,9 +2587,9 @@ static const struct iw_priv_args atmel_private_args[] = {
static const struct iw_handler_def atmel_handler_def =
{
- .num_standard = sizeof(atmel_handler)/sizeof(iw_handler),
- .num_private = sizeof(atmel_private_handler)/sizeof(iw_handler),
- .num_private_args = sizeof(atmel_private_args)/sizeof(struct iw_priv_args),
+ .num_standard = ARRAY_SIZE(atmel_handler),
+ .num_private = ARRAY_SIZE(atmel_private_handler),
+ .num_private_args = ARRAY_SIZE(atmel_private_args),
.standard = (iw_handler *) atmel_handler,
.private = (iw_handler *) atmel_private_handler,
.private_args = (struct iw_priv_args *) atmel_private_args,
@@ -2645,7 +2653,7 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
domain[REGDOMAINSZ] = 0;
rc = -EINVAL;
- for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(channel_table); i++) {
/* strcasecmp doesn't exist in the library */
char *a = channel_table[i].name;
char *b = domain;
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 785664090bb4..5c410989c4d7 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -5,12 +5,12 @@
Copyright 2000-2001 ATMEL Corporation.
Copyright 2003 Simon Kelley.
- This code was developed from version 2.1.1 of the Atmel drivers,
- released by Atmel corp. under the GPL in December 2002. It also
- includes code from the Linux aironet drivers (C) Benjamin Reed,
- and the Linux PCMCIA package, (C) David Hinds.
+ This code was developed from version 2.1.1 of the Atmel drivers,
+ released by Atmel corp. under the GPL in December 2002. It also
+ includes code from the Linux aironet drivers (C) Benjamin Reed,
+ and the Linux PCMCIA package, (C) David Hinds.
- For all queries about this code, please contact the current author,
+ For all queries about this code, please contact the current author,
Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
This program is free software; you can redistribute it and/or modify
@@ -87,7 +87,7 @@ MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards");
event is received. The config() and release() entry points are
used to configure or release a socket, in response to card
insertion and ejection events. They are invoked from the atmel_cs
- event handler.
+ event handler.
*/
static int atmel_config(struct pcmcia_device *link);
@@ -133,22 +133,22 @@ static void atmel_detach(struct pcmcia_device *p_dev);
device IO routines can use a flag like this to throttle IO to a
card that is not ready to accept it.
*/
-
+
typedef struct local_info_t {
dev_node_t node;
struct net_device *eth_dev;
} local_info_t;
/*======================================================================
-
+
atmel_attach() creates an "instance" of the driver, allocating
local data structures for one device. The device is registered
with Card Services.
-
+
The dev_link structure is initialized, but we don't actually
configure the card at this point -- we wait until we receive a
card insertion event.
-
+
======================================================================*/
static int atmel_probe(struct pcmcia_device *p_dev)
@@ -184,12 +184,12 @@ static int atmel_probe(struct pcmcia_device *p_dev)
} /* atmel_attach */
/*======================================================================
-
+
This deletes a driver "instance". The device is de-registered
with Card Services. If it has been released, all local data
structures are freed. Otherwise, the structures will be freed
when the device is released.
-
+
======================================================================*/
static void atmel_detach(struct pcmcia_device *link)
@@ -202,11 +202,11 @@ static void atmel_detach(struct pcmcia_device *link)
}
/*======================================================================
-
+
atmel_config() is scheduled to run after a CARD_INSERTION event
is received, to configure the PCMCIA socket, and to make the
device available to the system.
-
+
======================================================================*/
#define CS_CHECK(fn, ret) \
@@ -237,12 +237,12 @@ static int atmel_config(struct pcmcia_device *link)
did = handle_to_dev(link).driver_data;
DEBUG(0, "atmel_config(0x%p)\n", link);
-
+
tuple.Attributes = 0;
tuple.TupleData = buf;
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
-
+
/*
This reads the card's CONFIG tuple to find its configuration
registers.
@@ -258,7 +258,7 @@ static int atmel_config(struct pcmcia_device *link)
In this loop, we scan the CIS for configuration table entries,
each of which describes a valid card configuration, including
voltage, IO window, memory window, and interrupt settings.
-
+
We make no assumptions about the card to be configured: we use
just the information available in the CIS. In an ideal world,
this would work for any PCMCIA card, but it requires a complete
@@ -274,17 +274,17 @@ static int atmel_config(struct pcmcia_device *link)
if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
pcmcia_parse_tuple(link, &tuple, &parse) != 0)
goto next_entry;
-
+
if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
if (cfg->index == 0) goto next_entry;
link->conf.ConfigIndex = cfg->index;
-
+
/* Does this card need audio output? */
if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
}
-
+
/* Use power settings for Vcc and Vpp if present */
/* Note that the CIS values need to be rescaled */
if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
@@ -293,11 +293,11 @@ static int atmel_config(struct pcmcia_device *link)
else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
link->conf.Vpp =
dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
-
+
/* Do we need to allocate an interrupt? */
if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
link->conf.Attributes |= CONF_ENABLE_IRQ;
-
+
/* IO window settings */
link->io.NumPorts1 = link->io.NumPorts2 = 0;
if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
@@ -315,18 +315,18 @@ static int atmel_config(struct pcmcia_device *link)
link->io.NumPorts2 = io->win[1].len;
}
}
-
+
/* This reserves IO space but doesn't actually enable it */
if (pcmcia_request_io(link, &link->io) != 0)
goto next_entry;
/* If we got this far, we're cool! */
break;
-
+
next_entry:
CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
}
-
+
/*
Allocate an interrupt line. Note that this does not assign a
handler to the interrupt, unless the 'Handler' member of the
@@ -334,31 +334,31 @@ static int atmel_config(struct pcmcia_device *link)
*/
if (link->conf.Attributes & CONF_ENABLE_IRQ)
CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
-
+
/*
This actually configures the PCMCIA socket -- setting up
the I/O windows and the interrupt mapping, and putting the
card and host interface into "Memory and IO" mode.
*/
CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
-
+
if (link->irq.AssignedIRQ == 0) {
- printk(KERN_ALERT
+ printk(KERN_ALERT
"atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
goto cs_failed;
}
-
- ((local_info_t*)link->priv)->eth_dev =
+
+ ((local_info_t*)link->priv)->eth_dev =
init_atmel_card(link->irq.AssignedIRQ,
link->io.BasePort1,
did ? did->driver_info : ATMEL_FW_TYPE_NONE,
&handle_to_dev(link),
- card_present,
+ card_present,
link);
- if (!((local_info_t*)link->priv)->eth_dev)
+ if (!((local_info_t*)link->priv)->eth_dev)
goto cs_failed;
-
-
+
+
/*
At this point, the dev_node_t structure(s) need to be
initialized and arranged in a linked list at link->dev_node.
@@ -376,11 +376,11 @@ static int atmel_config(struct pcmcia_device *link)
}
/*======================================================================
-
+
After a card is removed, atmel_release() will unregister the
device, and release the PCMCIA configuration. If the device is
still open, this will be postponed until it is closed.
-
+
======================================================================*/
static void atmel_release(struct pcmcia_device *link)
@@ -517,7 +517,7 @@ static void atmel_cs_cleanup(void)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
+ POSSIBILITY OF SUCH DAMAGE.
*/
module_init(atmel_cs_init);
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 3bfa791c323d..92f87fbe750f 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -53,18 +53,18 @@ static int __devinit atmel_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
struct net_device *dev;
-
+
if (pci_enable_device(pdev))
return -ENODEV;
-
+
pci_set_master(pdev);
-
- dev = init_atmel_card(pdev->irq, pdev->resource[1].start,
+
+ dev = init_atmel_card(pdev->irq, pdev->resource[1].start,
ATMEL_FW_TYPE_506,
&pdev->dev, NULL, NULL);
if (!dev)
return -ENODEV;
-
+
pci_set_drvdata(pdev, dev);
return 0;
}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index d6a8bf09878e..94dfb92fab5c 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -159,6 +159,7 @@
/* Chipcommon registers. */
#define BCM43xx_CHIPCOMMON_CAPABILITIES 0x04
+#define BCM43xx_CHIPCOMMON_CTL 0x28
#define BCM43xx_CHIPCOMMON_PLLONDELAY 0xB0
#define BCM43xx_CHIPCOMMON_FREFSELDELAY 0xB4
#define BCM43xx_CHIPCOMMON_SLOWCLKCTL 0xB8
@@ -172,6 +173,33 @@
/* SBTOPCI2 values. */
#define BCM43xx_SBTOPCI2_PREFETCH 0x4
#define BCM43xx_SBTOPCI2_BURST 0x8
+#define BCM43xx_SBTOPCI2_MEMREAD_MULTI 0x20
+
+/* PCI-E core registers. */
+#define BCM43xx_PCIECORE_REG_ADDR 0x0130
+#define BCM43xx_PCIECORE_REG_DATA 0x0134
+#define BCM43xx_PCIECORE_MDIO_CTL 0x0128
+#define BCM43xx_PCIECORE_MDIO_DATA 0x012C
+
+/* PCI-E registers. */
+#define BCM43xx_PCIE_TLP_WORKAROUND 0x0004
+#define BCM43xx_PCIE_DLLP_LINKCTL 0x0100
+
+/* PCI-E MDIO bits. */
+#define BCM43xx_PCIE_MDIO_ST 0x40000000
+#define BCM43xx_PCIE_MDIO_WT 0x10000000
+#define BCM43xx_PCIE_MDIO_DEV 22
+#define BCM43xx_PCIE_MDIO_REG 18
+#define BCM43xx_PCIE_MDIO_TA 0x00020000
+#define BCM43xx_PCIE_MDIO_TC 0x0100
+
+/* MDIO devices. */
+#define BCM43xx_MDIO_SERDES_RX 0x1F
+
+/* SERDES RX registers. */
+#define BCM43xx_SERDES_RXTIMER 0x2
+#define BCM43xx_SERDES_CDR 0x6
+#define BCM43xx_SERDES_CDR_BW 0x7
/* Chipcommon capabilities. */
#define BCM43xx_CAPABILITIES_PCTL 0x00040000
@@ -221,6 +249,7 @@
#define BCM43xx_COREID_USB20_HOST 0x819
#define BCM43xx_COREID_USB20_DEV 0x81a
#define BCM43xx_COREID_SDIO_HOST 0x81b
+#define BCM43xx_COREID_PCIE 0x820
/* Core Information Registers */
#define BCM43xx_CIR_BASE 0xf00
@@ -365,6 +394,9 @@
#define BCM43xx_DEFAULT_SHORT_RETRY_LIMIT 7
#define BCM43xx_DEFAULT_LONG_RETRY_LIMIT 4
+/* FIXME: the next line is a guess as to what the maximum RSSI value might be */
+#define RX_RSSI_MAX 60
+
/* Max size of a security key */
#define BCM43xx_SEC_KEYSIZE 16
/* Security algorithms. */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index a1b783813d8e..5b3c27359a18 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -130,6 +130,10 @@ MODULE_PARM_DESC(fwpostfix, "Postfix for .fw files. Useful for debugging.");
{ PCI_VENDOR_ID_BROADCOM, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
/* Broadcom 4307 802.11b */
{ PCI_VENDOR_ID_BROADCOM, 0x4307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ /* Broadcom 4311 802.11(a)/b/g */
+ { PCI_VENDOR_ID_BROADCOM, 0x4311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ /* Broadcom 4312 802.11a/b/g */
+ { PCI_VENDOR_ID_BROADCOM, 0x4312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
/* Broadcom 4318 802.11b/g */
{ PCI_VENDOR_ID_BROADCOM, 0x4318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
/* Broadcom 4319 802.11a/b/g */
@@ -2600,8 +2604,9 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
/* fetch sb_id_hi from core information registers */
sb_id_hi = bcm43xx_read32(bcm, BCM43xx_CIR_SB_ID_HI);
- core_id = (sb_id_hi & 0xFFF0) >> 4;
- core_rev = (sb_id_hi & 0xF);
+ core_id = (sb_id_hi & 0x8FF0) >> 4;
+ core_rev = (sb_id_hi & 0x7000) >> 8;
+ core_rev |= (sb_id_hi & 0xF);
core_vendor = (sb_id_hi & 0xFFFF0000) >> 16;
/* if present, chipcommon is always core 0; read the chipid from it */
@@ -2679,14 +2684,10 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
bcm->chip_id, bcm->chip_rev);
dprintk(KERN_INFO PFX "Number of cores: %d\n", core_count);
if (bcm->core_chipcommon.available) {
- dprintk(KERN_INFO PFX "Core 0: ID 0x%x, rev 0x%x, vendor 0x%x, %s\n",
- core_id, core_rev, core_vendor,
- bcm43xx_core_enabled(bcm) ? "enabled" : "disabled");
- }
-
- if (bcm->core_chipcommon.available)
+ dprintk(KERN_INFO PFX "Core 0: ID 0x%x, rev 0x%x, vendor 0x%x\n",
+ core_id, core_rev, core_vendor);
current_core = 1;
- else
+ } else
current_core = 0;
for ( ; current_core < core_count; current_core++) {
struct bcm43xx_coreinfo *core;
@@ -2704,13 +2705,13 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
core_rev = (sb_id_hi & 0xF);
core_vendor = (sb_id_hi & 0xFFFF0000) >> 16;
- dprintk(KERN_INFO PFX "Core %d: ID 0x%x, rev 0x%x, vendor 0x%x, %s\n",
- current_core, core_id, core_rev, core_vendor,
- bcm43xx_core_enabled(bcm) ? "enabled" : "disabled" );
+ dprintk(KERN_INFO PFX "Core %d: ID 0x%x, rev 0x%x, vendor 0x%x\n",
+ current_core, core_id, core_rev, core_vendor);
core = NULL;
switch (core_id) {
case BCM43xx_COREID_PCI:
+ case BCM43xx_COREID_PCIE:
core = &bcm->core_pci;
if (core->available) {
printk(KERN_WARNING PFX "Multiple PCI cores found.\n");
@@ -2749,12 +2750,12 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
case 6:
case 7:
case 9:
+ case 10:
break;
default:
- printk(KERN_ERR PFX "Error: Unsupported 80211 core revision %u\n",
+ printk(KERN_WARNING PFX
+ "Unsupported 80211 core revision %u\n",
core_rev);
- err = -ENODEV;
- goto out;
}
bcm->nr_80211_available++;
core->priv = ext_80211;
@@ -2868,16 +2869,11 @@ static int bcm43xx_wireless_core_init(struct bcm43xx_private *bcm,
u32 sbimconfiglow;
u8 limit;
- if (bcm->chip_rev < 5) {
+ if (bcm->core_pci.rev <= 5 && bcm->core_pci.id != BCM43xx_COREID_PCIE) {
sbimconfiglow = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
- if (bcm->bustype == BCM43xx_BUSTYPE_PCI)
- sbimconfiglow |= 0x32;
- else if (bcm->bustype == BCM43xx_BUSTYPE_SB)
- sbimconfiglow |= 0x53;
- else
- assert(0);
+ sbimconfiglow |= 0x32;
bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, sbimconfiglow);
}
@@ -3004,22 +3000,64 @@ static void bcm43xx_pcicore_broadcast_value(struct bcm43xx_private *bcm,
static int bcm43xx_pcicore_commit_settings(struct bcm43xx_private *bcm)
{
- int err;
- struct bcm43xx_coreinfo *old_core;
+ int err = 0;
- old_core = bcm->current_core;
- err = bcm43xx_switch_core(bcm, &bcm->core_pci);
- if (err)
- goto out;
+ bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
- bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
+ if (bcm->core_chipcommon.available) {
+ err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
+ if (err)
+ goto out;
+
+ bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
+
+ /* this function is always called when a PCI core is mapped */
+ err = bcm43xx_switch_core(bcm, &bcm->core_pci);
+ if (err)
+ goto out;
+ } else
+ bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
+
+ bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
- bcm43xx_switch_core(bcm, old_core);
- assert(err == 0);
out:
return err;
}
+static u32 bcm43xx_pcie_reg_read(struct bcm43xx_private *bcm, u32 address)
+{
+ bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_ADDR, address);
+ return bcm43xx_read32(bcm, BCM43xx_PCIECORE_REG_DATA);
+}
+
+static void bcm43xx_pcie_reg_write(struct bcm43xx_private *bcm, u32 address,
+ u32 data)
+{
+ bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_ADDR, address);
+ bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_DATA, data);
+}
+
+static void bcm43xx_pcie_mdio_write(struct bcm43xx_private *bcm, u8 dev, u8 reg,
+ u16 data)
+{
+ int i;
+
+ bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_CTL, 0x0082);
+ bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_DATA, BCM43xx_PCIE_MDIO_ST |
+ BCM43xx_PCIE_MDIO_WT | (dev << BCM43xx_PCIE_MDIO_DEV) |
+ (reg << BCM43xx_PCIE_MDIO_REG) | BCM43xx_PCIE_MDIO_TA |
+ data);
+ udelay(10);
+
+ for (i = 0; i < 10; i++) {
+ if (bcm43xx_read32(bcm, BCM43xx_PCIECORE_MDIO_CTL) &
+ BCM43xx_PCIE_MDIO_TC)
+ break;
+ msleep(1);
+ }
+ bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_CTL, 0);
+}
+
/* Make an I/O Core usable. "core_mask" is the bitmask of the cores to enable.
* To enable core 0, pass a core_mask of 1<<0
*/
@@ -3039,7 +3077,8 @@ static int bcm43xx_setup_backplane_pci_connection(struct bcm43xx_private *bcm,
if (err)
goto out;
- if (bcm->core_pci.rev < 6) {
+ if (bcm->current_core->rev < 6 ||
+ bcm->current_core->id == BCM43xx_COREID_PCI) {
value = bcm43xx_read32(bcm, BCM43xx_CIR_SBINTVEC);
value |= (1 << backplane_flag_nr);
bcm43xx_write32(bcm, BCM43xx_CIR_SBINTVEC, value);
@@ -3057,21 +3096,46 @@ static int bcm43xx_setup_backplane_pci_connection(struct bcm43xx_private *bcm,
}
}
- value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
- value |= BCM43xx_SBTOPCI2_PREFETCH | BCM43xx_SBTOPCI2_BURST;
- bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
-
- if (bcm->core_pci.rev < 5) {
- value = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
- value |= (2 << BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_SHIFT)
- & BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
- value |= (3 << BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_SHIFT)
- & BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
- bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, value);
- err = bcm43xx_pcicore_commit_settings(bcm);
- assert(err == 0);
+ if (bcm->current_core->id == BCM43xx_COREID_PCI) {
+ value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
+ value |= BCM43xx_SBTOPCI2_PREFETCH | BCM43xx_SBTOPCI2_BURST;
+ bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
+
+ if (bcm->current_core->rev < 5) {
+ value = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
+ value |= (2 << BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_SHIFT)
+ & BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
+ value |= (3 << BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_SHIFT)
+ & BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
+ bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, value);
+ err = bcm43xx_pcicore_commit_settings(bcm);
+ assert(err == 0);
+ } else if (bcm->current_core->rev >= 11) {
+ value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
+ value |= BCM43xx_SBTOPCI2_MEMREAD_MULTI;
+ bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
+ }
+ } else {
+ if (bcm->current_core->rev == 0 || bcm->current_core->rev == 1) {
+ value = bcm43xx_pcie_reg_read(bcm, BCM43xx_PCIE_TLP_WORKAROUND);
+ value |= 0x8;
+ bcm43xx_pcie_reg_write(bcm, BCM43xx_PCIE_TLP_WORKAROUND,
+ value);
+ }
+ if (bcm->current_core->rev == 0) {
+ bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
+ BCM43xx_SERDES_RXTIMER, 0x8128);
+ bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
+ BCM43xx_SERDES_CDR, 0x0100);
+ bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
+ BCM43xx_SERDES_CDR_BW, 0x1466);
+ } else if (bcm->current_core->rev == 1) {
+ value = bcm43xx_pcie_reg_read(bcm, BCM43xx_PCIE_DLLP_LINKCTL);
+ value |= 0x40;
+ bcm43xx_pcie_reg_write(bcm, BCM43xx_PCIE_DLLP_LINKCTL,
+ value);
+ }
}
-
out_switch_back:
err = bcm43xx_switch_core(bcm, old_core);
out:
@@ -3140,55 +3204,27 @@ static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm)
static void do_periodic_work(struct bcm43xx_private *bcm)
{
- unsigned int state;
-
- state = bcm->periodic_state;
- if (state % 8 == 0)
+ if (bcm->periodic_state % 8 == 0)
bcm43xx_periodic_every120sec(bcm);
- if (state % 4 == 0)
+ if (bcm->periodic_state % 4 == 0)
bcm43xx_periodic_every60sec(bcm);
- if (state % 2 == 0)
+ if (bcm->periodic_state % 2 == 0)
bcm43xx_periodic_every30sec(bcm);
- if (state % 1 == 0)
- bcm43xx_periodic_every15sec(bcm);
- bcm->periodic_state = state + 1;
+ bcm43xx_periodic_every15sec(bcm);
schedule_delayed_work(&bcm->periodic_work, HZ * 15);
}
-/* Estimate a "Badness" value based on the periodic work
- * state-machine state. "Badness" is worse (bigger), if the
- * periodic work will take longer.
- */
-static int estimate_periodic_work_badness(unsigned int state)
-{
- int badness = 0;
-
- if (state % 8 == 0) /* every 120 sec */
- badness += 10;
- if (state % 4 == 0) /* every 60 sec */
- badness += 5;
- if (state % 2 == 0) /* every 30 sec */
- badness += 1;
- if (state % 1 == 0) /* every 15 sec */
- badness += 1;
-
-#define BADNESS_LIMIT 4
- return badness;
-}
-
static void bcm43xx_periodic_work_handler(void *d)
{
struct bcm43xx_private *bcm = d;
struct net_device *net_dev = bcm->net_dev;
unsigned long flags;
u32 savedirqs = 0;
- int badness;
unsigned long orig_trans_start = 0;
mutex_lock(&bcm->mutex);
- badness = estimate_periodic_work_badness(bcm->periodic_state);
- if (badness > BADNESS_LIMIT) {
+ if (unlikely(bcm->periodic_state % 4 == 0)) {
/* Periodic work will take a long time, so we want it to
* be preemtible.
*/
@@ -3220,7 +3256,7 @@ static void bcm43xx_periodic_work_handler(void *d)
do_periodic_work(bcm);
- if (badness > BADNESS_LIMIT) {
+ if (unlikely(bcm->periodic_state % 4 == 0)) {
spin_lock_irqsave(&bcm->irq_lock, flags);
tasklet_enable(&bcm->isr_tasklet);
bcm43xx_interrupt_enable(bcm, savedirqs);
@@ -3231,6 +3267,7 @@ static void bcm43xx_periodic_work_handler(void *d)
net_dev->trans_start = orig_trans_start;
}
mmiowb();
+ bcm->periodic_state++;
spin_unlock_irqrestore(&bcm->irq_lock, flags);
mutex_unlock(&bcm->mutex);
}
@@ -3676,7 +3713,7 @@ static int bcm43xx_read_phyinfo(struct bcm43xx_private *bcm)
bcm->ieee->freq_band = IEEE80211_24GHZ_BAND;
break;
case BCM43xx_PHYTYPE_G:
- if (phy_rev > 7)
+ if (phy_rev > 8)
phy_rev_ok = 0;
bcm->ieee->modulation = IEEE80211_OFDM_MODULATION |
IEEE80211_CCK_MODULATION;
@@ -3688,6 +3725,8 @@ static int bcm43xx_read_phyinfo(struct bcm43xx_private *bcm)
phy_type);
return -ENODEV;
};
+ bcm->ieee->perfect_rssi = RX_RSSI_MAX;
+ bcm->ieee->worst_rssi = 0;
if (!phy_rev_ok) {
printk(KERN_WARNING PFX "Invalid PHY Revision %x\n",
phy_rev);
@@ -3974,11 +4013,6 @@ static int bcm43xx_ieee80211_hard_start_xmit(struct ieee80211_txb *txb,
return NETDEV_TX_OK;
}
-static struct net_device_stats * bcm43xx_net_get_stats(struct net_device *net_dev)
-{
- return &(bcm43xx_priv(net_dev)->ieee->stats);
-}
-
static void bcm43xx_net_tx_timeout(struct net_device *net_dev)
{
struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
@@ -4092,7 +4126,6 @@ static int __devinit bcm43xx_init_one(struct pci_dev *pdev,
net_dev->open = bcm43xx_net_open;
net_dev->stop = bcm43xx_net_stop;
- net_dev->get_stats = bcm43xx_net_get_stats;
net_dev->tx_timeout = bcm43xx_net_tx_timeout;
#ifdef CONFIG_NET_POLL_CONTROLLER
net_dev->poll_controller = bcm43xx_net_poll_controller;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_power.c b/drivers/net/wireless/bcm43xx/bcm43xx_power.c
index 6569da3a7a39..7e774f410953 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_power.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_power.c
@@ -153,8 +153,6 @@ int bcm43xx_pctl_init(struct bcm43xx_private *bcm)
int err, maxfreq;
struct bcm43xx_coreinfo *old_core;
- if (!(bcm->chipcommon_capabilities & BCM43xx_CAPABILITIES_PCTL))
- return 0;
old_core = bcm->current_core;
err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
if (err == -ENODEV)
@@ -162,11 +160,27 @@ int bcm43xx_pctl_init(struct bcm43xx_private *bcm)
if (err)
goto out;
- maxfreq = bcm43xx_pctl_clockfreqlimit(bcm, 1);
- bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_PLLONDELAY,
- (maxfreq * 150 + 999999) / 1000000);
- bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_FREFSELDELAY,
- (maxfreq * 15 + 999999) / 1000000);
+ if (bcm->chip_id == 0x4321) {
+ if (bcm->chip_rev == 0)
+ bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_CTL, 0x03A4);
+ if (bcm->chip_rev == 1)
+ bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_CTL, 0x00A4);
+ }
+
+ if (bcm->chipcommon_capabilities & BCM43xx_CAPABILITIES_PCTL) {
+ if (bcm->current_core->rev >= 10) {
+ /* Set Idle Power clock rate to 1Mhz */
+ bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_SYSCLKCTL,
+ (bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SYSCLKCTL)
+ & 0x0000FFFF) | 0x40000);
+ } else {
+ maxfreq = bcm43xx_pctl_clockfreqlimit(bcm, 1);
+ bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_PLLONDELAY,
+ (maxfreq * 150 + 999999) / 1000000);
+ bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_FREFSELDELAY,
+ (maxfreq * 15 + 999999) / 1000000);
+ }
+ }
err = bcm43xx_switch_core(bcm, old_core);
assert(err == 0);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index d27016f8c736..a659442b9c15 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -47,9 +47,6 @@
#define BCM43xx_WX_VERSION 18
#define MAX_WX_STRING 80
-/* FIXME: the next line is a guess as to what the maximum RSSI value might be */
-#define RX_RSSI_MAX 60
-
static int bcm43xx_wx_get_name(struct net_device *net_dev,
struct iw_request_info *info,
@@ -693,6 +690,7 @@ static int bcm43xx_wx_set_swencryption(struct net_device *net_dev,
bcm->ieee->host_encrypt = !!on;
bcm->ieee->host_decrypt = !!on;
bcm->ieee->host_build_iv = !on;
+ bcm->ieee->host_strip_iv_icv = !on;
spin_unlock_irqrestore(&bcm->irq_lock, flags);
mutex_unlock(&bcm->mutex);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
index 0159e4e93201..3e2462671690 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
@@ -544,24 +544,6 @@ int bcm43xx_rx(struct bcm43xx_private *bcm,
}
frame_ctl = le16_to_cpu(wlhdr->frame_ctl);
- if ((frame_ctl & IEEE80211_FCTL_PROTECTED) && !bcm->ieee->host_decrypt) {
- frame_ctl &= ~IEEE80211_FCTL_PROTECTED;
- wlhdr->frame_ctl = cpu_to_le16(frame_ctl);
- /* trim IV and ICV */
- /* FIXME: this must be done only for WEP encrypted packets */
- if (skb->len < 32) {
- dprintkl(KERN_ERR PFX "RX packet dropped (PROTECTED flag "
- "set and length < 32)\n");
- return -EINVAL;
- } else {
- memmove(skb->data + 4, skb->data, 24);
- skb_pull(skb, 4);
- skb_trim(skb, skb->len - 4);
- stats.len -= 8;
- }
- wlhdr = (struct ieee80211_hdr_4addr *)(skb->data);
- }
-
switch (WLAN_FC_GET_TYPE(frame_ctl)) {
case IEEE80211_FTYPE_MGMT:
ieee80211_rx_mgt(bcm->ieee, wlhdr, &stats);
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index c2fa011be291..d1de9766c831 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -425,8 +425,14 @@ static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state)
static int prism2_pci_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
+ int err;
- pci_enable_device(pdev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+ dev->name);
+ return err;
+ }
pci_restore_state(pdev);
prism2_hw_config(dev, 0);
if (netif_running(dev)) {
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 4e4eaa2a99ca..79607b8b877c 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -5827,19 +5827,6 @@ static void ipw2100_tx_timeout(struct net_device *dev)
schedule_reset(priv);
}
-/*
- * TODO: reimplement it so that it reads statistics
- * from the adapter using ordinal tables
- * instead of/in addition to collecting them
- * in the driver
- */
-static struct net_device_stats *ipw2100_stats(struct net_device *dev)
-{
- struct ipw2100_priv *priv = ieee80211_priv(dev);
-
- return &priv->ieee->stats;
-}
-
static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
{
/* This is called when wpa_supplicant loads and closes the driver
@@ -6022,7 +6009,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
dev->open = ipw2100_open;
dev->stop = ipw2100_close;
dev->init = ipw2100_net_init;
- dev->get_stats = ipw2100_stats;
dev->ethtool_ops = &ipw2100_ethtool_ops;
dev->tx_timeout = ipw2100_tx_timeout;
dev->wireless_handlers = &ipw2100_wx_handler_def;
@@ -6423,6 +6409,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
{
struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
struct net_device *dev = priv->net_dev;
+ int err;
u32 val;
if (IPW2100_PM_DISABLED)
@@ -6433,7 +6420,12 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name);
pci_set_power_state(pci_dev, PCI_D0);
- pci_enable_device(pci_dev);
+ err = pci_enable_device(pci_dev);
+ if (err) {
+ printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+ dev->name);
+ return err;
+ }
pci_restore_state(pci_dev);
/*
@@ -7568,11 +7560,10 @@ static int ipw2100_wx_set_genie(struct net_device *dev,
return -EINVAL;
if (wrqu->data.length) {
- buf = kmalloc(wrqu->data.length, GFP_KERNEL);
+ buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, extra, wrqu->data.length);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = wrqu->data.length;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 72120d5c2f7b..c692d01a76ca 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -11727,12 +11727,18 @@ static int ipw_pci_resume(struct pci_dev *pdev)
{
struct ipw_priv *priv = pci_get_drvdata(pdev);
struct net_device *dev = priv->net_dev;
+ int err;
u32 val;
printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
pci_set_power_state(pdev, PCI_D0);
- pci_enable_device(pdev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+ dev->name);
+ return err;
+ }
pci_restore_state(pdev);
/*
diff --git a/drivers/net/wireless/orinoco_pci.h b/drivers/net/wireless/orinoco_pci.h
index be1abea4b64f..f4e5e06760c1 100644
--- a/drivers/net/wireless/orinoco_pci.h
+++ b/drivers/net/wireless/orinoco_pci.h
@@ -60,7 +60,12 @@ static int orinoco_pci_resume(struct pci_dev *pdev)
int err;
pci_set_power_state(pdev, 0);
- pci_enable_device(pdev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+ dev->name);
+ return err;
+ }
pci_restore_state(pdev);
err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED,
diff --git a/drivers/net/wireless/prism54/isl_38xx.c b/drivers/net/wireless/prism54/isl_38xx.c
index 23deee69974b..02fc67bccbd0 100644
--- a/drivers/net/wireless/prism54/isl_38xx.c
+++ b/drivers/net/wireless/prism54/isl_38xx.c
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
* Copyright (C) 2003-2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>_
*
@@ -38,7 +37,7 @@
* isl38xx_disable_interrupts - disable all interrupts
* @device: pci memory base address
*
- * Instructs the device to disable all interrupt reporting by asserting
+ * Instructs the device to disable all interrupt reporting by asserting
* the IRQ line. New events may still show up in the interrupt identification
* register located at offset %ISL38XX_INT_IDENT_REG.
*/
@@ -204,17 +203,19 @@ isl38xx_interface_reset(void __iomem *device_base, dma_addr_t host_address)
/* enable the interrupt for detecting initialization */
/* Note: Do not enable other interrupts here. We want the
- * device to have come up first 100% before allowing any other
+ * device to have come up first 100% before allowing any other
* interrupts. */
isl38xx_w32_flush(device_base, ISL38XX_INT_IDENT_INIT, ISL38XX_INT_EN_REG);
udelay(ISL38XX_WRITEIO_DELAY); /* allow complete full reset */
}
void
-isl38xx_enable_common_interrupts(void __iomem *device_base) {
+isl38xx_enable_common_interrupts(void __iomem *device_base)
+{
u32 reg;
- reg = ( ISL38XX_INT_IDENT_UPDATE |
- ISL38XX_INT_IDENT_SLEEP | ISL38XX_INT_IDENT_WAKEUP);
+
+ reg = ISL38XX_INT_IDENT_UPDATE | ISL38XX_INT_IDENT_SLEEP |
+ ISL38XX_INT_IDENT_WAKEUP;
isl38xx_w32_flush(device_base, reg, ISL38XX_INT_EN_REG);
udelay(ISL38XX_WRITEIO_DELAY);
}
@@ -234,23 +235,21 @@ isl38xx_in_queue(isl38xx_control_block *cb, int queue)
/* send queues */
case ISL38XX_CB_TX_MGMTQ:
BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
+
case ISL38XX_CB_TX_DATA_LQ:
case ISL38XX_CB_TX_DATA_HQ:
BUG_ON(delta > ISL38XX_CB_TX_QSIZE);
return delta;
- break;
/* receive queues */
case ISL38XX_CB_RX_MGMTQ:
BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
return ISL38XX_CB_MGMT_QSIZE - delta;
- break;
case ISL38XX_CB_RX_DATA_LQ:
case ISL38XX_CB_RX_DATA_HQ:
BUG_ON(delta > ISL38XX_CB_RX_QSIZE);
return ISL38XX_CB_RX_QSIZE - delta;
- break;
}
BUG();
return 0;
diff --git a/drivers/net/wireless/prism54/isl_38xx.h b/drivers/net/wireless/prism54/isl_38xx.h
index 8af20980af8d..3fadcb6f5297 100644
--- a/drivers/net/wireless/prism54/isl_38xx.h
+++ b/drivers/net/wireless/prism54/isl_38xx.h
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -67,10 +66,10 @@
* @base: (host) memory base address of the device
* @val: 32bit value (host order) to write
* @offset: byte offset into @base to write value to
- *
+ *
* This helper takes care of writing a 32bit datum to the
- * specified offset into the device's pci memory space, and making sure
- * the pci memory buffers get flushed by performing one harmless read
+ * specified offset into the device's pci memory space, and making sure
+ * the pci memory buffers get flushed by performing one harmless read
* from the %ISL38XX_PCI_POSTING_FLUSH offset.
*/
static inline void
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 286325ca3293..4a20e45de3ca 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
* (C) 2003,2004 Aurelien Alleaume <slts@free.fr>
* (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
@@ -55,12 +54,12 @@ static const unsigned char scan_rate_list[] = { 2, 4, 11, 22,
* prism54_mib_mode_helper - MIB change mode helper function
* @mib: the &struct islpci_mib object to modify
* @iw_mode: new mode (%IW_MODE_*)
- *
+ *
* This is a helper function, hence it does not lock. Make sure
- * caller deals with locking *if* necessary. This function sets the
- * mode-dependent mib values and does the mapping of the Linux
- * Wireless API modes to Device firmware modes. It also checks for
- * correct valid Linux wireless modes.
+ * caller deals with locking *if* necessary. This function sets the
+ * mode-dependent mib values and does the mapping of the Linux
+ * Wireless API modes to Device firmware modes. It also checks for
+ * correct valid Linux wireless modes.
*/
static int
prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode)
@@ -118,7 +117,7 @@ prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode)
*
* this function initializes the struct given as @mib with defaults,
* of which many are retrieved from the global module parameter
- * variables.
+ * variables.
*/
void
@@ -134,7 +133,7 @@ prism54_mib_init(islpci_private *priv)
authen = CARD_DEFAULT_AUTHEN;
wep = CARD_DEFAULT_WEP;
filter = CARD_DEFAULT_FILTER; /* (0) Do not filter un-encrypted data */
- dot1x = CARD_DEFAULT_DOT1X;
+ dot1x = CARD_DEFAULT_DOT1X;
mlme = CARD_DEFAULT_MLME_MODE;
conformance = CARD_DEFAULT_CONFORMANCE;
power = 127;
@@ -228,7 +227,7 @@ prism54_get_wireless_stats(struct net_device *ndev)
} else
priv->iwstatistics.qual.updated = 0;
- /* Update our wireless stats, but do not schedule to often
+ /* Update our wireless stats, but do not schedule to often
* (max 1 HZ) */
if ((priv->stats_timestamp == 0) ||
time_after(jiffies, priv->stats_timestamp + 1 * HZ)) {
@@ -705,7 +704,7 @@ prism54_get_scan(struct net_device *ndev, struct iw_request_info *info,
* Starting with WE-17, the buffer can be as big as needed.
* But the device won't repport anything if you change the value
* of IWMAX_BSS=24. */
-
+
rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
bsslist = r.ptr;
@@ -785,7 +784,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
return rvalue;
}
-/* Provides no functionality, just completes the ioctl. In essence this is a
+/* Provides no functionality, just completes the ioctl. In essence this is a
* just a cosmetic ioctl.
*/
static int
@@ -1104,7 +1103,7 @@ prism54_set_encode(struct net_device *ndev, struct iw_request_info *info,
&key);
}
/*
- * If a valid key is set, encryption should be enabled
+ * If a valid key is set, encryption should be enabled
* (user may turn it off later).
* This is also how "iwconfig ethX key on" works
*/
@@ -1126,7 +1125,7 @@ prism54_set_encode(struct net_device *ndev, struct iw_request_info *info,
}
/* now read the flags */
if (dwrq->flags & IW_ENCODE_DISABLED) {
- /* Encoding disabled,
+ /* Encoding disabled,
* authen = DOT11_AUTH_OS;
* invoke = 0;
* exunencrypt = 0; */
@@ -1214,7 +1213,7 @@ prism54_get_txpower(struct net_device *ndev, struct iw_request_info *info,
vwrq->value = (s32) r.u / 4;
vwrq->fixed = 1;
/* radio is not turned of
- * btw: how is possible to turn off only the radio
+ * btw: how is possible to turn off only the radio
*/
vwrq->disabled = 0;
@@ -2354,17 +2353,17 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
handle_request(priv, mlme, oid);
send_formatted_event(priv, "Authenticate request (ex)", mlme, 1);
- if (priv->iw_mode != IW_MODE_MASTER
+ if (priv->iw_mode != IW_MODE_MASTER
&& mlmeex->state != DOT11_STATE_AUTHING)
break;
confirm = kmalloc(sizeof(struct obj_mlmeex) + 6, GFP_ATOMIC);
- if (!confirm)
+ if (!confirm)
break;
memcpy(&confirm->address, mlmeex->address, ETH_ALEN);
- printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
+ printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
mlmeex->address[0],
mlmeex->address[1],
mlmeex->address[2],
@@ -2398,10 +2397,10 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
handle_request(priv, mlme, oid);
send_formatted_event(priv, "Associate request (ex)", mlme, 1);
- if (priv->iw_mode != IW_MODE_MASTER
+ if (priv->iw_mode != IW_MODE_MASTER
&& mlmeex->state != DOT11_STATE_ASSOCING)
break;
-
+
confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC);
if (!confirm)
@@ -2417,7 +2416,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
if (!wpa_ie_len) {
printk(KERN_DEBUG "No WPA IE found from "
- "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
+ "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
mlmeex->address[0],
mlmeex->address[1],
mlmeex->address[2],
@@ -2435,14 +2434,14 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
mgt_set_varlen(priv, oid, confirm, wpa_ie_len);
kfree(confirm);
-
+
break;
case DOT11_OID_REASSOCIATEEX:
handle_request(priv, mlme, oid);
send_formatted_event(priv, "Reassociate request (ex)", mlme, 1);
- if (priv->iw_mode != IW_MODE_MASTER
+ if (priv->iw_mode != IW_MODE_MASTER
&& mlmeex->state != DOT11_STATE_ASSOCING)
break;
@@ -2461,7 +2460,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
if (!wpa_ie_len) {
printk(KERN_DEBUG "No WPA IE found from "
- "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
+ "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
mlmeex->address[0],
mlmeex->address[1],
mlmeex->address[2],
@@ -2473,13 +2472,13 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
break;
}
- confirm->size = wpa_ie_len;
+ confirm->size = wpa_ie_len;
memcpy(&confirm->data, wpa_ie, wpa_ie_len);
mgt_set_varlen(priv, oid, confirm, wpa_ie_len);
kfree(confirm);
-
+
break;
default:
@@ -2545,10 +2544,10 @@ enum {
#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
((int) (&((struct prism2_hostapd_param *) 0)->u.generic_elem.data))
-/* Maximum length for algorithm names (-1 for nul termination)
+/* Maximum length for algorithm names (-1 for nul termination)
* used in ioctl() */
#define HOSTAP_CRYPT_ALG_NAME_LEN 16
-
+
struct prism2_hostapd_param {
u32 cmd;
u8 sta_addr[ETH_ALEN];
@@ -2621,7 +2620,7 @@ prism2_ioctl_set_encryption(struct net_device *dev,
&key);
}
/*
- * If a valid key is set, encryption should be enabled
+ * If a valid key is set, encryption should be enabled
* (user may turn it off later).
* This is also how "iwconfig ethX key on" works
*/
@@ -2643,7 +2642,7 @@ prism2_ioctl_set_encryption(struct net_device *dev,
}
/* now read the flags */
if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
- /* Encoding disabled,
+ /* Encoding disabled,
* authen = DOT11_AUTH_OS;
* invoke = 0;
* exunencrypt = 0; */
@@ -2710,7 +2709,7 @@ prism2_ioctl_set_generic_element(struct net_device *ndev,
ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
- if (ret == 0)
+ if (ret == 0)
printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
ndev->name);
}
@@ -2870,7 +2869,7 @@ prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
mlme = DOT11_MLME_AUTO;
printk("%s: Disabling WPA\n", ndev->name);
break;
- case 2:
+ case 2:
case 1: /* WPA */
printk("%s: Enabling WPA\n", ndev->name);
break;
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index 65f33acd0a42..e8183d30c52e 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
* (C) 2003 Aurelien Alleaume <slts@free.fr>
* (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h
index 419edf7ccf1a..b7534c2869c8 100644
--- a/drivers/net/wireless/prism54/isl_oid.h
+++ b/drivers/net/wireless/prism54/isl_oid.h
@@ -1,6 +1,4 @@
/*
- *
- *
* Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
* Copyright (C) 2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
* Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
@@ -23,7 +21,7 @@
#if !defined(_ISL_OID_H)
#define _ISL_OID_H
-/*
+/*
* MIB related constant and structure definitions for communicating
* with the device firmware
*/
@@ -99,21 +97,21 @@ struct obj_attachment {
char data[0];
} __attribute__((packed));
-/*
+/*
* in case everything's ok, the inlined function below will be
* optimized away by the compiler...
*/
static inline void
__bug_on_wrong_struct_sizes(void)
{
- BUG_ON(sizeof (struct obj_ssid) != 34);
- BUG_ON(sizeof (struct obj_key) != 34);
- BUG_ON(sizeof (struct obj_mlme) != 12);
- BUG_ON(sizeof (struct obj_mlmeex) != 14);
- BUG_ON(sizeof (struct obj_buffer) != 8);
- BUG_ON(sizeof (struct obj_bss) != 60);
- BUG_ON(sizeof (struct obj_bsslist) != 4);
- BUG_ON(sizeof (struct obj_frequencies) != 2);
+ BUILD_BUG_ON(sizeof (struct obj_ssid) != 34);
+ BUILD_BUG_ON(sizeof (struct obj_key) != 34);
+ BUILD_BUG_ON(sizeof (struct obj_mlme) != 12);
+ BUILD_BUG_ON(sizeof (struct obj_mlmeex) != 14);
+ BUILD_BUG_ON(sizeof (struct obj_buffer) != 8);
+ BUILD_BUG_ON(sizeof (struct obj_bss) != 60);
+ BUILD_BUG_ON(sizeof (struct obj_bsslist) != 4);
+ BUILD_BUG_ON(sizeof (struct obj_frequencies) != 2);
}
enum dot11_state_t {
@@ -154,13 +152,13 @@ enum dot11_priv_t {
/* Prism "Nitro" / Frameburst / "Packet Frame Grouping"
* Value is in microseconds. Represents the # microseconds
- * the firmware will take to group frames before sending out then out
+ * the firmware will take to group frames before sending out then out
* together with a CSMA contention. Without this all frames are
- * sent with a CSMA contention.
- * Bibliography:
+ * sent with a CSMA contention.
+ * Bibliography:
* http://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html
*/
-enum dot11_maxframeburst_t {
+enum dot11_maxframeburst_t {
/* Values for DOT11_OID_MAXFRAMEBURST */
DOT11_MAXFRAMEBURST_OFF = 0, /* Card firmware default */
DOT11_MAXFRAMEBURST_MIXED_SAFE = 650, /* 802.11 a,b,g safe */
@@ -176,9 +174,9 @@ enum dot11_maxframeburst_t {
/* Support for 802.11 long and short frame preambles.
* Long preamble uses 128-bit sync field, 8-bit CRC
* Short preamble uses 56-bit sync field, 16-bit CRC
- *
+ *
* 802.11a -- not sure, both optionally ?
- * 802.11b supports long and optionally short
+ * 802.11b supports long and optionally short
* 802.11g supports both */
enum dot11_preamblesettings_t {
DOT11_PREAMBLESETTING_LONG = 0,
@@ -194,7 +192,7 @@ enum dot11_preamblesettings_t {
* Long uses 802.11a slot timing (9 usec ?)
* Short uses 802.11b slot timing (20 use ?) */
enum dot11_slotsettings_t {
- DOT11_SLOTSETTINGS_LONG = 0,
+ DOT11_SLOTSETTINGS_LONG = 0,
/* Allows *only* long 802.11b slot timing */
DOT11_SLOTSETTINGS_SHORT = 1,
/* Allows *only* long 802.11a slot timing */
@@ -203,7 +201,7 @@ enum dot11_slotsettings_t {
};
/* All you need to know, ERP is "Extended Rate PHY".
- * An Extended Rate PHY (ERP) STA or AP shall support three different
+ * An Extended Rate PHY (ERP) STA or AP shall support three different
* preamble and header formats:
* Long preamble (refer to above)
* Short preamble (refer to above)
@@ -221,7 +219,7 @@ enum do11_nonerpstatus_t {
/* (ERP is "Extended Rate PHY") Way to read NONERP is NON-ERP-*
* The key here is DOT11 NON ERP NEVER protects against
* NON ERP STA's. You *don't* want this unless
- * you know what you are doing. It means you will only
+ * you know what you are doing. It means you will only
* get Extended Rate capabilities */
enum dot11_nonerpprotection_t {
DOT11_NONERP_NEVER = 0,
@@ -229,13 +227,13 @@ enum dot11_nonerpprotection_t {
DOT11_NONERP_DYNAMIC = 2
};
-/* Preset OID configuration for 802.11 modes
- * Note: DOT11_OID_CW[MIN|MAX] hold the values of the
+/* Preset OID configuration for 802.11 modes
+ * Note: DOT11_OID_CW[MIN|MAX] hold the values of the
* DCS MIN|MAX backoff used */
enum dot11_profile_t { /* And set/allowed values */
/* Allowed values for DOT11_OID_PROFILES */
DOT11_PROFILE_B_ONLY = 0,
- /* DOT11_OID_RATES: 1, 2, 5.5, 11Mbps
+ /* DOT11_OID_RATES: 1, 2, 5.5, 11Mbps
* DOT11_OID_PREAMBLESETTINGS: DOT11_PREAMBLESETTING_DYNAMIC
* DOT11_OID_CWMIN: 31
* DOT11_OID_NONEPROTECTION: DOT11_NOERP_DYNAMIC
@@ -275,7 +273,7 @@ enum oid_inl_conformance_t {
OID_INL_CONFORMANCE_NONE = 0, /* Perform active scanning */
OID_INL_CONFORMANCE_STRICT = 1, /* Strictly adhere to 802.11d */
OID_INL_CONFORMANCE_FLEXIBLE = 2, /* Use passed 802.11d info to
- * determine channel AND/OR just make assumption that active
+ * determine channel AND/OR just make assumption that active
* channels are valid channels */
};
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index ec1c00f19eb3..1e0603ca436c 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
* Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
* Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
@@ -413,7 +412,7 @@ prism54_bring_down(islpci_private *priv)
islpci_set_state(priv, PRV_STATE_PREBOOT);
/* disable all device interrupts in case they weren't */
- isl38xx_disable_interrupts(priv->device_base);
+ isl38xx_disable_interrupts(priv->device_base);
/* For safety reasons, we may want to ensure that no DMA transfer is
* currently in progress by emptying the TX and RX queues. */
@@ -480,7 +479,7 @@ islpci_reset_if(islpci_private *priv)
DEFINE_WAIT(wait);
prepare_to_wait(&priv->reset_done, &wait, TASK_UNINTERRUPTIBLE);
-
+
/* now the last step is to reset the interface */
isl38xx_interface_reset(priv->device_base, priv->device_host_address);
islpci_set_state(priv, PRV_STATE_PREINIT);
@@ -488,7 +487,7 @@ islpci_reset_if(islpci_private *priv)
for(count = 0; count < 2 && result; count++) {
/* The software reset acknowledge needs about 220 msec here.
* Be conservative and wait for up to one second. */
-
+
remaining = schedule_timeout_uninterruptible(HZ);
if(remaining > 0) {
@@ -496,7 +495,7 @@ islpci_reset_if(islpci_private *priv)
break;
}
- /* If we're here it's because our IRQ hasn't yet gone through.
+ /* If we're here it's because our IRQ hasn't yet gone through.
* Retry a bit more...
*/
printk(KERN_ERR "%s: no 'reset complete' IRQ seen - retrying\n",
@@ -514,7 +513,7 @@ islpci_reset_if(islpci_private *priv)
/* Now that the device is 100% up, let's allow
* for the other interrupts --
- * NOTE: this is not *yet* true since we've only allowed the
+ * NOTE: this is not *yet* true since we've only allowed the
* INIT interrupt on the IRQ line. We can perhaps poll
* the IRQ line until we know for sure the reset went through */
isl38xx_enable_common_interrupts(priv->device_base);
@@ -716,7 +715,7 @@ islpci_alloc_memory(islpci_private *priv)
prism54_acl_init(&priv->acl);
prism54_wpa_bss_ie_init(priv);
- if (mgt_init(priv))
+ if (mgt_init(priv))
goto out_free;
return 0;
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index 2f7e525d0cf6..a9aa1662eaa4 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -1,6 +1,5 @@
/*
- *
- * Copyright (C) 2002 Intersil Americas Inc.
+ * Copyright (C) 2002 Intersil Americas Inc.
* Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
* Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
* Copyright (C) 2003 Aurelien Alleaume <slts@free.fr>
@@ -72,12 +71,12 @@ struct islpci_bss_wpa_ie {
u8 bssid[ETH_ALEN];
u8 wpa_ie[MAX_WPA_IE_LEN];
size_t wpa_ie_len;
-
+
};
typedef struct {
spinlock_t slock; /* generic spinlock; */
-
+
u32 priv_oid;
/* our mib cache */
@@ -85,7 +84,7 @@ typedef struct {
struct rw_semaphore mib_sem;
void **mib;
char nickname[IW_ESSID_MAX_SIZE+1];
-
+
/* Take care of the wireless stats */
struct work_struct stats_work;
struct semaphore stats_sem;
@@ -120,7 +119,7 @@ typedef struct {
struct net_device *ndev;
/* device queue interface members */
- struct isl38xx_cb *control_block; /* device control block
+ struct isl38xx_cb *control_block; /* device control block
(== driver_mem_address!) */
/* Each queue has three indexes:
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index a8261d8454dd..676d83813dc8 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
* Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
* This program is free software; you can redistribute it and/or modify
@@ -48,7 +47,7 @@ islpci_eth_cleanup_transmit(islpci_private *priv,
/* read the index of the first fragment to be freed */
index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
- /* check for holes in the arrays caused by multi fragment frames
+ /* check for holes in the arrays caused by multi fragment frames
* searching for the last fragment of a frame */
if (priv->pci_map_tx_address[index] != (dma_addr_t) NULL) {
/* entry is the last fragment of a frame
@@ -253,6 +252,7 @@ islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
* header and without the FCS. But there a is a bit that
* indicates if the packet is corrupted :-) */
struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
+
if (hdr->flags & 0x01)
/* This one is bad. Drop it ! */
return -1;
@@ -284,7 +284,7 @@ islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
(struct avs_80211_1_header *) skb_push(*skb,
sizeof (struct
avs_80211_1_header));
-
+
avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
avs->mactime = cpu_to_be64(le64_to_cpu(clock));
@@ -390,7 +390,7 @@ islpci_eth_receive(islpci_private *priv)
struct rx_annex_header *annex =
(struct rx_annex_header *) skb->data;
wstats.level = annex->rfmon.rssi;
- /* The noise value can be a bit outdated if nobody's
+ /* The noise value can be a bit outdated if nobody's
* reading wireless stats... */
wstats.noise = priv->local_iwstatistics.qual.noise;
wstats.qual = wstats.level - wstats.noise;
@@ -464,10 +464,8 @@ islpci_eth_receive(islpci_private *priv)
break;
}
/* update the fragment address */
- control_block->rx_data_low[index].address = cpu_to_le32((u32)
- priv->
- pci_map_rx_address
- [index]);
+ control_block->rx_data_low[index].address =
+ cpu_to_le32((u32)priv->pci_map_rx_address[index]);
wmb();
/* increment the driver read pointer */
@@ -484,10 +482,12 @@ islpci_eth_receive(islpci_private *priv)
void
islpci_do_reset_and_wake(void *data)
{
- islpci_private *priv = (islpci_private *) data;
+ islpci_private *priv = data;
+
islpci_reset(priv, 1);
- netif_wake_queue(priv->ndev);
priv->reset_task_pending = 0;
+ smp_wmb();
+ netif_wake_queue(priv->ndev);
}
void
@@ -499,12 +499,14 @@ islpci_eth_tx_timeout(struct net_device *ndev)
/* increment the transmit error counter */
statistics->tx_errors++;
- printk(KERN_WARNING "%s: tx_timeout", ndev->name);
if (!priv->reset_task_pending) {
- priv->reset_task_pending = 1;
- printk(", scheduling a reset");
+ printk(KERN_WARNING
+ "%s: tx_timeout, scheduling reset", ndev->name);
netif_stop_queue(ndev);
+ priv->reset_task_pending = 1;
schedule_work(&priv->reset_task);
+ } else {
+ printk(KERN_WARNING
+ "%s: tx_timeout, waiting for reset", ndev->name);
}
- printk("\n");
}
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index bc9d7a60b8d6..26789454067c 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index f692dccf0d07..58257b40c043 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
* Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
*
@@ -40,8 +39,8 @@ static int init_pcitm = 0;
module_param(init_pcitm, int, 0);
/* In this order: vendor, device, subvendor, subdevice, class, class_mask,
- * driver_data
- * If you have an update for this please contact prism54-devel@prism54.org
+ * driver_data
+ * If you have an update for this please contact prism54-devel@prism54.org
* The latest list can be found at http://prism54.org/supported_cards.php */
static const struct pci_device_id prism54_id_tbl[] = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
@@ -132,15 +131,15 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* 0x40 is the programmable timer to configure the response timeout (TRDY_TIMEOUT)
* 0x41 is the programmable timer to configure the retry timeout (RETRY_TIMEOUT)
- * The RETRY_TIMEOUT is used to set the number of retries that the core, as a
- * Master, will perform before abandoning a cycle. The default value for
- * RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new
- * devices. A write of zero to the RETRY_TIMEOUT register disables this
- * function to allow use with any non-compliant legacy devices that may
- * execute more retries.
+ * The RETRY_TIMEOUT is used to set the number of retries that the core, as a
+ * Master, will perform before abandoning a cycle. The default value for
+ * RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new
+ * devices. A write of zero to the RETRY_TIMEOUT register disables this
+ * function to allow use with any non-compliant legacy devices that may
+ * execute more retries.
*
- * Writing zero to both these two registers will disable both timeouts and
- * *can* solve problems caused by devices that are slow to respond.
+ * Writing zero to both these two registers will disable both timeouts and
+ * *can* solve problems caused by devices that are slow to respond.
* Make this configurable - MSW
*/
if ( init_pcitm >= 0 ) {
@@ -171,14 +170,15 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
/* enable MWI */
- pci_set_mwi(pdev);
+ if (!pci_set_mwi(pdev))
+ printk(KERN_INFO "%s: pci_set_mwi(pdev) succeeded\n", DRV_NAME);
/* setup the network device interface and its structure */
if (!(ndev = islpci_setup(pdev))) {
/* error configuring the driver as a network device */
printk(KERN_ERR "%s: could not configure network device\n",
DRV_NAME);
- goto do_pci_release_regions;
+ goto do_pci_clear_mwi;
}
priv = netdev_priv(ndev);
@@ -208,6 +208,8 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
priv = NULL;
+ do_pci_clear_mwi:
+ pci_clear_mwi(pdev);
do_pci_release_regions:
pci_release_regions(pdev);
do_pci_disable_device:
@@ -241,7 +243,7 @@ prism54_remove(struct pci_dev *pdev)
isl38xx_disable_interrupts(priv->device_base);
islpci_set_state(priv, PRV_STATE_OFF);
/* This bellow causes a lockup at rmmod time. It might be
- * because some interrupts still linger after rmmod time,
+ * because some interrupts still linger after rmmod time,
* see bug #17 */
/* pci_set_power_state(pdev, 3);*/ /* try to power-off */
}
@@ -255,6 +257,8 @@ prism54_remove(struct pci_dev *pdev)
free_netdev(ndev);
priv = NULL;
+ pci_clear_mwi(pdev);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -288,12 +292,19 @@ prism54_resume(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
- BUG_ON(!priv);
+ int err;
- pci_enable_device(pdev);
+ BUG_ON(!priv);
printk(KERN_NOTICE "%s: got resume request\n", ndev->name);
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+ ndev->name);
+ return err;
+ }
+
pci_restore_state(pdev);
/* alright let's go into the PREBOOT state */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index 2e061a80b294..036a875054c9 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
* Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net>
*
@@ -502,7 +501,7 @@ islpci_mgt_transaction(struct net_device *ndev,
printk(KERN_WARNING "%s: timeout waiting for mgmt response\n",
ndev->name);
- /* TODO: we should reset the device here */
+ /* TODO: we should reset the device here */
out:
finish_wait(&priv->mgmt_wqueue, &wait);
up(&priv->mgmt_sem);
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h
index 2982be3363ef..fc53b587b722 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.h
+++ b/drivers/net/wireless/prism54/islpci_mgt.h
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
* Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
*
@@ -36,8 +35,8 @@ extern int pc_debug;
/* General driver definitions */
-#define PCIDEVICE_LATENCY_TIMER_MIN 0x40
-#define PCIDEVICE_LATENCY_TIMER_VAL 0x50
+#define PCIDEVICE_LATENCY_TIMER_MIN 0x40
+#define PCIDEVICE_LATENCY_TIMER_VAL 0x50
/* Debugging verbose definitions */
#define SHOW_NOTHING 0x00 /* overrules everything */
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index ebb238785839..fbc52b6a3024 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (C) 2003,2004 Aurelien Alleaume <slts@free.fr>
*
* This program is free software; you can redistribute it and/or modify
@@ -503,7 +503,7 @@ mgt_set_varlen(islpci_private *priv, enum oid_num_t n, void *data, int extra_len
}
if (ret || response_op == PIMFOR_OP_ERROR)
ret = -EIO;
- } else
+ } else
ret = -EIO;
/* re-set given data to what it was */
@@ -727,7 +727,7 @@ mgt_commit(islpci_private *priv)
* MEDIUMLIMIT,BEACONPERIOD,DTIMPERIOD,ATIMWINDOW,LISTENINTERVAL
* FREQUENCY,EXTENDEDRATES.
*
- * The way to do this is to set ESSID. Note though that they may get
+ * The way to do this is to set ESSID. Note though that they may get
* unlatch before though by setting another OID. */
#if 0
void
diff --git a/drivers/net/wireless/prism54/prismcompat.h b/drivers/net/wireless/prism54/prismcompat.h
index d71eca55a302..aa1d1747784f 100644
--- a/drivers/net/wireless/prism54/prismcompat.h
+++ b/drivers/net/wireless/prism54/prismcompat.h
@@ -1,4 +1,4 @@
-/*
+/*
* (C) 2004 Margit Schubert-While <margitsw@t-online.de>
*
* This program is free software; you can redistribute it and/or modify
@@ -16,7 +16,7 @@
*
*/
-/*
+/*
* Compatibility header file to aid support of different kernel versions
*/
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index aa661b2b76c7..8be99ebbe1cd 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -1076,6 +1076,31 @@ static int set_mandatory_rates(struct zd_chip *chip, enum ieee80211_std std)
return zd_iowrite32_locked(chip, rates, CR_MANDATORY_RATE_TBL);
}
+int zd_chip_set_rts_cts_rate_locked(struct zd_chip *chip,
+ u8 rts_rate, int preamble)
+{
+ int rts_mod = ZD_RX_CCK;
+ u32 value = 0;
+
+ /* Modulation bit */
+ if (ZD_CS_TYPE(rts_rate) == ZD_CS_OFDM)
+ rts_mod = ZD_RX_OFDM;
+
+ dev_dbg_f(zd_chip_dev(chip), "rts_rate=%x preamble=%x\n",
+ rts_rate, preamble);
+
+ value |= rts_rate << RTSCTS_SH_RTS_RATE;
+ value |= rts_mod << RTSCTS_SH_RTS_MOD_TYPE;
+ value |= preamble << RTSCTS_SH_RTS_PMB_TYPE;
+ value |= preamble << RTSCTS_SH_CTS_PMB_TYPE;
+
+ /* We always send 11M self-CTS messages, like the vendor driver. */
+ value |= ZD_CCK_RATE_11M << RTSCTS_SH_CTS_RATE;
+ value |= ZD_RX_CCK << RTSCTS_SH_CTS_MOD_TYPE;
+
+ return zd_iowrite32_locked(chip, value, CR_RTS_CTS_RATE);
+}
+
int zd_chip_enable_hwint(struct zd_chip *chip)
{
int r;
@@ -1355,17 +1380,12 @@ out:
return r;
}
-int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
+int zd_chip_set_basic_rates_locked(struct zd_chip *chip, u16 cr_rates)
{
- int r;
-
- if (cr_rates & ~(CR_RATES_80211B|CR_RATES_80211G))
- return -EINVAL;
+ ZD_ASSERT((cr_rates & ~(CR_RATES_80211B | CR_RATES_80211G)) == 0);
+ dev_dbg_f(zd_chip_dev(chip), "%x\n", cr_rates);
- mutex_lock(&chip->mutex);
- r = zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL);
- mutex_unlock(&chip->mutex);
- return r;
+ return zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL);
}
static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size)
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index ae59597ce4e1..ca892b9a6448 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -337,24 +337,24 @@
#define CR_MAC_PS_STATE CTL_REG(0x050C)
#define CR_INTERRUPT CTL_REG(0x0510)
-#define INT_TX_COMPLETE 0x00000001
-#define INT_RX_COMPLETE 0x00000002
-#define INT_RETRY_FAIL 0x00000004
-#define INT_WAKEUP 0x00000008
-#define INT_DTIM_NOTIFY 0x00000020
-#define INT_CFG_NEXT_BCN 0x00000040
-#define INT_BUS_ABORT 0x00000080
-#define INT_TX_FIFO_READY 0x00000100
-#define INT_UART 0x00000200
-#define INT_TX_COMPLETE_EN 0x00010000
-#define INT_RX_COMPLETE_EN 0x00020000
-#define INT_RETRY_FAIL_EN 0x00040000
-#define INT_WAKEUP_EN 0x00080000
-#define INT_DTIM_NOTIFY_EN 0x00200000
-#define INT_CFG_NEXT_BCN_EN 0x00400000
-#define INT_BUS_ABORT_EN 0x00800000
-#define INT_TX_FIFO_READY_EN 0x01000000
-#define INT_UART_EN 0x02000000
+#define INT_TX_COMPLETE (1 << 0)
+#define INT_RX_COMPLETE (1 << 1)
+#define INT_RETRY_FAIL (1 << 2)
+#define INT_WAKEUP (1 << 3)
+#define INT_DTIM_NOTIFY (1 << 5)
+#define INT_CFG_NEXT_BCN (1 << 6)
+#define INT_BUS_ABORT (1 << 7)
+#define INT_TX_FIFO_READY (1 << 8)
+#define INT_UART (1 << 9)
+#define INT_TX_COMPLETE_EN (1 << 16)
+#define INT_RX_COMPLETE_EN (1 << 17)
+#define INT_RETRY_FAIL_EN (1 << 18)
+#define INT_WAKEUP_EN (1 << 19)
+#define INT_DTIM_NOTIFY_EN (1 << 21)
+#define INT_CFG_NEXT_BCN_EN (1 << 22)
+#define INT_BUS_ABORT_EN (1 << 23)
+#define INT_TX_FIFO_READY_EN (1 << 24)
+#define INT_UART_EN (1 << 25)
#define CR_TSF_LOW_PART CTL_REG(0x0514)
#define CR_TSF_HIGH_PART CTL_REG(0x0518)
@@ -398,18 +398,18 @@
* device will use a rate in this table that is less than or equal to the rate
* of the incoming frame which prompted the response */
#define CR_BASIC_RATE_TBL CTL_REG(0x0630)
-#define CR_RATE_1M 0x0001 /* 802.11b */
-#define CR_RATE_2M 0x0002 /* 802.11b */
-#define CR_RATE_5_5M 0x0004 /* 802.11b */
-#define CR_RATE_11M 0x0008 /* 802.11b */
-#define CR_RATE_6M 0x0100 /* 802.11g */
-#define CR_RATE_9M 0x0200 /* 802.11g */
-#define CR_RATE_12M 0x0400 /* 802.11g */
-#define CR_RATE_18M 0x0800 /* 802.11g */
-#define CR_RATE_24M 0x1000 /* 802.11g */
-#define CR_RATE_36M 0x2000 /* 802.11g */
-#define CR_RATE_48M 0x4000 /* 802.11g */
-#define CR_RATE_54M 0x8000 /* 802.11g */
+#define CR_RATE_1M (1 << 0) /* 802.11b */
+#define CR_RATE_2M (1 << 1) /* 802.11b */
+#define CR_RATE_5_5M (1 << 2) /* 802.11b */
+#define CR_RATE_11M (1 << 3) /* 802.11b */
+#define CR_RATE_6M (1 << 8) /* 802.11g */
+#define CR_RATE_9M (1 << 9) /* 802.11g */
+#define CR_RATE_12M (1 << 10) /* 802.11g */
+#define CR_RATE_18M (1 << 11) /* 802.11g */
+#define CR_RATE_24M (1 << 12) /* 802.11g */
+#define CR_RATE_36M (1 << 13) /* 802.11g */
+#define CR_RATE_48M (1 << 14) /* 802.11g */
+#define CR_RATE_54M (1 << 15) /* 802.11g */
#define CR_RATES_80211G 0xff00
#define CR_RATES_80211B 0x000f
@@ -420,15 +420,24 @@
#define CR_MANDATORY_RATE_TBL CTL_REG(0x0634)
#define CR_RTS_CTS_RATE CTL_REG(0x0638)
+/* These are all bit indexes in CR_RTS_CTS_RATE, so remember to shift. */
+#define RTSCTS_SH_RTS_RATE 0
+#define RTSCTS_SH_EXP_CTS_RATE 4
+#define RTSCTS_SH_RTS_MOD_TYPE 8
+#define RTSCTS_SH_RTS_PMB_TYPE 9
+#define RTSCTS_SH_CTS_RATE 16
+#define RTSCTS_SH_CTS_MOD_TYPE 24
+#define RTSCTS_SH_CTS_PMB_TYPE 25
+
#define CR_WEP_PROTECT CTL_REG(0x063C)
#define CR_RX_THRESHOLD CTL_REG(0x0640)
/* register for controlling the LEDS */
#define CR_LED CTL_REG(0x0644)
/* masks for controlling LEDs */
-#define LED1 0x0100
-#define LED2 0x0200
-#define LED_SW 0x0400
+#define LED1 (1 << 8)
+#define LED2 (1 << 9)
+#define LED_SW (1 << 10)
/* Seems to indicate that the configuration is over.
*/
@@ -455,18 +464,18 @@
* registers, so one could argue it is a LOCK bit. But calling it
* LOCK_PHY_REGS makes it confusing.
*/
-#define UNLOCK_PHY_REGS 0x0080
+#define UNLOCK_PHY_REGS (1 << 7)
#define CR_DEVICE_STATE CTL_REG(0x0684)
#define CR_UNDERRUN_CNT CTL_REG(0x0688)
#define CR_RX_FILTER CTL_REG(0x068c)
-#define RX_FILTER_ASSOC_RESPONSE 0x0002
-#define RX_FILTER_REASSOC_RESPONSE 0x0008
-#define RX_FILTER_PROBE_RESPONSE 0x0020
-#define RX_FILTER_BEACON 0x0100
-#define RX_FILTER_DISASSOC 0x0400
-#define RX_FILTER_AUTH 0x0800
+#define RX_FILTER_ASSOC_RESPONSE (1 << 1)
+#define RX_FILTER_REASSOC_RESPONSE (1 << 3)
+#define RX_FILTER_PROBE_RESPONSE (1 << 5)
+#define RX_FILTER_BEACON (1 << 8)
+#define RX_FILTER_DISASSOC (1 << 10)
+#define RX_FILTER_AUTH (1 << 11)
#define AP_RX_FILTER 0x0400feff
#define STA_RX_FILTER 0x0000ffff
@@ -794,6 +803,9 @@ void zd_chip_disable_rx(struct zd_chip *chip);
int zd_chip_enable_hwint(struct zd_chip *chip);
int zd_chip_disable_hwint(struct zd_chip *chip);
+int zd_chip_set_rts_cts_rate_locked(struct zd_chip *chip,
+ u8 rts_rate, int preamble);
+
static inline int zd_get_encryption_type(struct zd_chip *chip, u32 *type)
{
return zd_ioread32(chip, CR_ENCRYPTION_TYPE, type);
@@ -809,7 +821,17 @@ static inline int zd_chip_get_basic_rates(struct zd_chip *chip, u16 *cr_rates)
return zd_ioread16(chip, CR_BASIC_RATE_TBL, cr_rates);
}
-int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates);
+int zd_chip_set_basic_rates_locked(struct zd_chip *chip, u16 cr_rates);
+
+static inline int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
+{
+ int r;
+
+ mutex_lock(&chip->mutex);
+ r = zd_chip_set_basic_rates_locked(chip, cr_rates);
+ mutex_unlock(&chip->mutex);
+ return r;
+}
static inline int zd_chip_set_rx_filter(struct zd_chip *chip, u32 filter)
{
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index a13ec72eb304..fb22f62cf1f3 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -39,6 +39,7 @@ do { \
if (!(x)) { \
pr_debug("%s:%d ASSERT %s VIOLATED!\n", \
__FILE__, __LINE__, __stringify(x)); \
+ dump_stack(); \
} \
} while (0)
#else
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.c b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
index 66905f7b61ff..189160efd2ae 100644
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.c
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
@@ -37,7 +37,12 @@ static const struct channel_range channel_ranges[] = {
[ZD_REGDOMAIN_JAPAN] = { 1, 14},
[ZD_REGDOMAIN_SPAIN] = { 1, 14},
[ZD_REGDOMAIN_FRANCE] = { 1, 14},
- [ZD_REGDOMAIN_JAPAN_ADD] = {14, 15},
+
+ /* Japan originally only had channel 14 available (see CHNL_ID 0x40 in
+ * 802.11). However, in 2001 the range was extended to include channels
+ * 1-13. The ZyDAS devices still use the old region code but are
+ * designed to allow the extra channel access in Japan. */
+ [ZD_REGDOMAIN_JAPAN_ADD] = { 1, 15},
};
const struct channel_range *zd_channel_range(u8 regdomain)
@@ -133,9 +138,6 @@ int zd_find_channel(u8 *channel, const struct iw_freq *freq)
int i, r;
u32 mhz;
- if (!(freq->flags & IW_FREQ_FIXED))
- return 0;
-
if (freq->m < 1000) {
if (freq->m > NUM_CHANNELS || freq->m == 0)
return -EINVAL;
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
index 36329890dfec..26b8298dff8c 100644
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.h
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
@@ -50,6 +50,7 @@ static inline u8 zd_ofdm_plcp_header_rate(
return header->prefix[0] & 0xf;
}
+/* These are referred to as zd_rates */
#define ZD_OFDM_RATE_6M 0xb
#define ZD_OFDM_RATE_9M 0xf
#define ZD_OFDM_RATE_12M 0xa
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index e5fedf968c19..2696f95b9278 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -32,6 +32,8 @@
static void ieee_init(struct ieee80211_device *ieee);
static void softmac_init(struct ieee80211softmac_device *sm);
+static void set_rts_cts_work(void *d);
+static void set_basic_rates_work(void *d);
static void housekeeping_init(struct zd_mac *mac);
static void housekeeping_enable(struct zd_mac *mac);
@@ -46,6 +48,8 @@ int zd_mac_init(struct zd_mac *mac,
memset(mac, 0, sizeof(*mac));
spin_lock_init(&mac->lock);
mac->netdev = netdev;
+ INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac);
+ INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac);
ieee_init(ieee);
softmac_init(ieee80211_priv(netdev));
@@ -213,6 +217,13 @@ int zd_mac_stop(struct net_device *netdev)
housekeeping_disable(mac);
ieee80211softmac_stop(netdev);
+ /* Ensure no work items are running or queued from this point */
+ cancel_delayed_work(&mac->set_rts_cts_work);
+ cancel_delayed_work(&mac->set_basic_rates_work);
+ flush_workqueue(zd_workqueue);
+ mac->updating_rts_rate = 0;
+ mac->updating_basic_rates = 0;
+
zd_chip_disable_hwint(chip);
zd_chip_switch_radio_off(chip);
zd_chip_disable_int(chip);
@@ -286,6 +297,186 @@ u8 zd_mac_get_regdomain(struct zd_mac *mac)
return regdomain;
}
+/* Fallback to lowest rate, if rate is unknown. */
+static u8 rate_to_zd_rate(u8 rate)
+{
+ switch (rate) {
+ case IEEE80211_CCK_RATE_2MB:
+ return ZD_CCK_RATE_2M;
+ case IEEE80211_CCK_RATE_5MB:
+ return ZD_CCK_RATE_5_5M;
+ case IEEE80211_CCK_RATE_11MB:
+ return ZD_CCK_RATE_11M;
+ case IEEE80211_OFDM_RATE_6MB:
+ return ZD_OFDM_RATE_6M;
+ case IEEE80211_OFDM_RATE_9MB:
+ return ZD_OFDM_RATE_9M;
+ case IEEE80211_OFDM_RATE_12MB:
+ return ZD_OFDM_RATE_12M;
+ case IEEE80211_OFDM_RATE_18MB:
+ return ZD_OFDM_RATE_18M;
+ case IEEE80211_OFDM_RATE_24MB:
+ return ZD_OFDM_RATE_24M;
+ case IEEE80211_OFDM_RATE_36MB:
+ return ZD_OFDM_RATE_36M;
+ case IEEE80211_OFDM_RATE_48MB:
+ return ZD_OFDM_RATE_48M;
+ case IEEE80211_OFDM_RATE_54MB:
+ return ZD_OFDM_RATE_54M;
+ }
+ return ZD_CCK_RATE_1M;
+}
+
+static u16 rate_to_cr_rate(u8 rate)
+{
+ switch (rate) {
+ case IEEE80211_CCK_RATE_2MB:
+ return CR_RATE_1M;
+ case IEEE80211_CCK_RATE_5MB:
+ return CR_RATE_5_5M;
+ case IEEE80211_CCK_RATE_11MB:
+ return CR_RATE_11M;
+ case IEEE80211_OFDM_RATE_6MB:
+ return CR_RATE_6M;
+ case IEEE80211_OFDM_RATE_9MB:
+ return CR_RATE_9M;
+ case IEEE80211_OFDM_RATE_12MB:
+ return CR_RATE_12M;
+ case IEEE80211_OFDM_RATE_18MB:
+ return CR_RATE_18M;
+ case IEEE80211_OFDM_RATE_24MB:
+ return CR_RATE_24M;
+ case IEEE80211_OFDM_RATE_36MB:
+ return CR_RATE_36M;
+ case IEEE80211_OFDM_RATE_48MB:
+ return CR_RATE_48M;
+ case IEEE80211_OFDM_RATE_54MB:
+ return CR_RATE_54M;
+ }
+ return CR_RATE_1M;
+}
+
+static void try_enable_tx(struct zd_mac *mac)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mac->lock, flags);
+ if (mac->updating_rts_rate == 0 && mac->updating_basic_rates == 0)
+ netif_wake_queue(mac->netdev);
+ spin_unlock_irqrestore(&mac->lock, flags);
+}
+
+static void set_rts_cts_work(void *d)
+{
+ struct zd_mac *mac = d;
+ unsigned long flags;
+ u8 rts_rate;
+ unsigned int short_preamble;
+
+ mutex_lock(&mac->chip.mutex);
+
+ spin_lock_irqsave(&mac->lock, flags);
+ mac->updating_rts_rate = 0;
+ rts_rate = mac->rts_rate;
+ short_preamble = mac->short_preamble;
+ spin_unlock_irqrestore(&mac->lock, flags);
+
+ zd_chip_set_rts_cts_rate_locked(&mac->chip, rts_rate, short_preamble);
+ mutex_unlock(&mac->chip.mutex);
+
+ try_enable_tx(mac);
+}
+
+static void set_basic_rates_work(void *d)
+{
+ struct zd_mac *mac = d;
+ unsigned long flags;
+ u16 basic_rates;
+
+ mutex_lock(&mac->chip.mutex);
+
+ spin_lock_irqsave(&mac->lock, flags);
+ mac->updating_basic_rates = 0;
+ basic_rates = mac->basic_rates;
+ spin_unlock_irqrestore(&mac->lock, flags);
+
+ zd_chip_set_basic_rates_locked(&mac->chip, basic_rates);
+ mutex_unlock(&mac->chip.mutex);
+
+ try_enable_tx(mac);
+}
+
+static void bssinfo_change(struct net_device *netdev, u32 changes)
+{
+ struct zd_mac *mac = zd_netdev_mac(netdev);
+ struct ieee80211softmac_device *softmac = ieee80211_priv(netdev);
+ struct ieee80211softmac_bss_info *bssinfo = &softmac->bssinfo;
+ int need_set_rts_cts = 0;
+ int need_set_rates = 0;
+ u16 basic_rates;
+ unsigned long flags;
+
+ dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
+
+ if (changes & IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE) {
+ spin_lock_irqsave(&mac->lock, flags);
+ mac->short_preamble = bssinfo->short_preamble;
+ spin_unlock_irqrestore(&mac->lock, flags);
+ need_set_rts_cts = 1;
+ }
+
+ if (changes & IEEE80211SOFTMAC_BSSINFOCHG_RATES) {
+ /* Set RTS rate to highest available basic rate */
+ u8 rate = ieee80211softmac_highest_supported_rate(softmac,
+ &bssinfo->supported_rates, 1);
+ rate = rate_to_zd_rate(rate);
+
+ spin_lock_irqsave(&mac->lock, flags);
+ if (rate != mac->rts_rate) {
+ mac->rts_rate = rate;
+ need_set_rts_cts = 1;
+ }
+ spin_unlock_irqrestore(&mac->lock, flags);
+
+ /* Set basic rates */
+ need_set_rates = 1;
+ if (bssinfo->supported_rates.count == 0) {
+ /* Allow the device to be flexible */
+ basic_rates = CR_RATES_80211B | CR_RATES_80211G;
+ } else {
+ int i = 0;
+ basic_rates = 0;
+
+ for (i = 0; i < bssinfo->supported_rates.count; i++) {
+ u16 rate = bssinfo->supported_rates.rates[i];
+ if ((rate & IEEE80211_BASIC_RATE_MASK) == 0)
+ continue;
+
+ rate &= ~IEEE80211_BASIC_RATE_MASK;
+ basic_rates |= rate_to_cr_rate(rate);
+ }
+ }
+ spin_lock_irqsave(&mac->lock, flags);
+ mac->basic_rates = basic_rates;
+ spin_unlock_irqrestore(&mac->lock, flags);
+ }
+
+ /* Schedule any changes we made above */
+
+ spin_lock_irqsave(&mac->lock, flags);
+ if (need_set_rts_cts && !mac->updating_rts_rate) {
+ mac->updating_rts_rate = 1;
+ netif_stop_queue(mac->netdev);
+ queue_work(zd_workqueue, &mac->set_rts_cts_work);
+ }
+ if (need_set_rates && !mac->updating_basic_rates) {
+ mac->updating_basic_rates = 1;
+ netif_stop_queue(mac->netdev);
+ queue_work(zd_workqueue, &mac->set_basic_rates_work);
+ }
+ spin_unlock_irqrestore(&mac->lock, flags);
+}
+
static void set_channel(struct net_device *netdev, u8 channel)
{
struct zd_mac *mac = zd_netdev_mac(netdev);
@@ -295,7 +486,6 @@ static void set_channel(struct net_device *netdev, u8 channel)
zd_chip_set_channel(&mac->chip, channel);
}
-/* TODO: Should not work in Managed mode. */
int zd_mac_request_channel(struct zd_mac *mac, u8 channel)
{
unsigned long lock_flags;
@@ -317,31 +507,22 @@ int zd_mac_request_channel(struct zd_mac *mac, u8 channel)
return 0;
}
-int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags)
+u8 zd_mac_get_channel(struct zd_mac *mac)
{
- struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
+ u8 channel = zd_chip_get_channel(&mac->chip);
- *channel = zd_chip_get_channel(&mac->chip);
- if (ieee->iw_mode != IW_MODE_INFRA) {
- spin_lock_irq(&mac->lock);
- *flags = *channel == mac->requested_channel ?
- MAC_FIXED_CHANNEL : 0;
- spin_unlock(&mac->lock);
- } else {
- *flags = 0;
- }
- dev_dbg_f(zd_mac_dev(mac), "channel %u flags %u\n", *channel, *flags);
- return 0;
+ dev_dbg_f(zd_mac_dev(mac), "channel %u\n", channel);
+ return channel;
}
/* If wrong rate is given, we are falling back to the slowest rate: 1MBit/s */
-static u8 cs_typed_rate(u8 cs_rate)
+static u8 zd_rate_typed(u8 zd_rate)
{
static const u8 typed_rates[16] = {
- [ZD_CS_CCK_RATE_1M] = ZD_CS_CCK|ZD_CS_CCK_RATE_1M,
- [ZD_CS_CCK_RATE_2M] = ZD_CS_CCK|ZD_CS_CCK_RATE_2M,
- [ZD_CS_CCK_RATE_5_5M] = ZD_CS_CCK|ZD_CS_CCK_RATE_5_5M,
- [ZD_CS_CCK_RATE_11M] = ZD_CS_CCK|ZD_CS_CCK_RATE_11M,
+ [ZD_CCK_RATE_1M] = ZD_CS_CCK|ZD_CCK_RATE_1M,
+ [ZD_CCK_RATE_2M] = ZD_CS_CCK|ZD_CCK_RATE_2M,
+ [ZD_CCK_RATE_5_5M] = ZD_CS_CCK|ZD_CCK_RATE_5_5M,
+ [ZD_CCK_RATE_11M] = ZD_CS_CCK|ZD_CCK_RATE_11M,
[ZD_OFDM_RATE_6M] = ZD_CS_OFDM|ZD_OFDM_RATE_6M,
[ZD_OFDM_RATE_9M] = ZD_CS_OFDM|ZD_OFDM_RATE_9M,
[ZD_OFDM_RATE_12M] = ZD_CS_OFDM|ZD_OFDM_RATE_12M,
@@ -353,37 +534,7 @@ static u8 cs_typed_rate(u8 cs_rate)
};
ZD_ASSERT(ZD_CS_RATE_MASK == 0x0f);
- return typed_rates[cs_rate & ZD_CS_RATE_MASK];
-}
-
-/* Fallback to lowest rate, if rate is unknown. */
-static u8 rate_to_cs_rate(u8 rate)
-{
- switch (rate) {
- case IEEE80211_CCK_RATE_2MB:
- return ZD_CS_CCK_RATE_2M;
- case IEEE80211_CCK_RATE_5MB:
- return ZD_CS_CCK_RATE_5_5M;
- case IEEE80211_CCK_RATE_11MB:
- return ZD_CS_CCK_RATE_11M;
- case IEEE80211_OFDM_RATE_6MB:
- return ZD_OFDM_RATE_6M;
- case IEEE80211_OFDM_RATE_9MB:
- return ZD_OFDM_RATE_9M;
- case IEEE80211_OFDM_RATE_12MB:
- return ZD_OFDM_RATE_12M;
- case IEEE80211_OFDM_RATE_18MB:
- return ZD_OFDM_RATE_18M;
- case IEEE80211_OFDM_RATE_24MB:
- return ZD_OFDM_RATE_24M;
- case IEEE80211_OFDM_RATE_36MB:
- return ZD_OFDM_RATE_36M;
- case IEEE80211_OFDM_RATE_48MB:
- return ZD_OFDM_RATE_48M;
- case IEEE80211_OFDM_RATE_54MB:
- return ZD_OFDM_RATE_54M;
- }
- return ZD_CS_CCK_RATE_1M;
+ return typed_rates[zd_rate & ZD_CS_RATE_MASK];
}
int zd_mac_set_mode(struct zd_mac *mac, u32 mode)
@@ -484,13 +635,13 @@ int zd_mac_get_range(struct zd_mac *mac, struct iw_range *range)
return 0;
}
-static int zd_calc_tx_length_us(u8 *service, u8 cs_rate, u16 tx_length)
+static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length)
{
static const u8 rate_divisor[] = {
- [ZD_CS_CCK_RATE_1M] = 1,
- [ZD_CS_CCK_RATE_2M] = 2,
- [ZD_CS_CCK_RATE_5_5M] = 11, /* bits must be doubled */
- [ZD_CS_CCK_RATE_11M] = 11,
+ [ZD_CCK_RATE_1M] = 1,
+ [ZD_CCK_RATE_2M] = 2,
+ [ZD_CCK_RATE_5_5M] = 11, /* bits must be doubled */
+ [ZD_CCK_RATE_11M] = 11,
[ZD_OFDM_RATE_6M] = 6,
[ZD_OFDM_RATE_9M] = 9,
[ZD_OFDM_RATE_12M] = 12,
@@ -504,15 +655,15 @@ static int zd_calc_tx_length_us(u8 *service, u8 cs_rate, u16 tx_length)
u32 bits = (u32)tx_length * 8;
u32 divisor;
- divisor = rate_divisor[cs_rate];
+ divisor = rate_divisor[zd_rate];
if (divisor == 0)
return -EINVAL;
- switch (cs_rate) {
- case ZD_CS_CCK_RATE_5_5M:
+ switch (zd_rate) {
+ case ZD_CCK_RATE_5_5M:
bits = (2*bits) + 10; /* round up to the next integer */
break;
- case ZD_CS_CCK_RATE_11M:
+ case ZD_CCK_RATE_11M:
if (service) {
u32 t = bits % 11;
*service &= ~ZD_PLCP_SERVICE_LENGTH_EXTENSION;
@@ -532,16 +683,16 @@ enum {
R2M_11A = 0x02,
};
-static u8 cs_rate_to_modulation(u8 cs_rate, int flags)
+static u8 zd_rate_to_modulation(u8 zd_rate, int flags)
{
u8 modulation;
- modulation = cs_typed_rate(cs_rate);
+ modulation = zd_rate_typed(zd_rate);
if (flags & R2M_SHORT_PREAMBLE) {
switch (ZD_CS_RATE(modulation)) {
- case ZD_CS_CCK_RATE_2M:
- case ZD_CS_CCK_RATE_5_5M:
- case ZD_CS_CCK_RATE_11M:
+ case ZD_CCK_RATE_2M:
+ case ZD_CCK_RATE_5_5M:
+ case ZD_CCK_RATE_11M:
modulation |= ZD_CS_CCK_PREA_SHORT;
return modulation;
}
@@ -558,39 +709,36 @@ static void cs_set_modulation(struct zd_mac *mac, struct zd_ctrlset *cs,
{
struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev);
u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(hdr->frame_ctl));
- u8 rate, cs_rate;
+ u8 rate, zd_rate;
int is_mgt = (ftype == IEEE80211_FTYPE_MGMT) != 0;
+ int is_multicast = is_multicast_ether_addr(hdr->addr1);
+ int short_preamble = ieee80211softmac_short_preamble_ok(softmac,
+ is_multicast, is_mgt);
+ int flags = 0;
+
+ /* FIXME: 802.11a? */
+ rate = ieee80211softmac_suggest_txrate(softmac, is_multicast, is_mgt);
- /* FIXME: 802.11a? short preamble? */
- rate = ieee80211softmac_suggest_txrate(softmac,
- is_multicast_ether_addr(hdr->addr1), is_mgt);
+ if (short_preamble)
+ flags |= R2M_SHORT_PREAMBLE;
- cs_rate = rate_to_cs_rate(rate);
- cs->modulation = cs_rate_to_modulation(cs_rate, 0);
+ zd_rate = rate_to_zd_rate(rate);
+ cs->modulation = zd_rate_to_modulation(zd_rate, flags);
}
static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
struct ieee80211_hdr_4addr *header)
{
+ struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev);
unsigned int tx_length = le16_to_cpu(cs->tx_length);
u16 fctl = le16_to_cpu(header->frame_ctl);
u16 ftype = WLAN_FC_GET_TYPE(fctl);
u16 stype = WLAN_FC_GET_STYPE(fctl);
/*
- * CONTROL:
- * - start at 0x00
- * - if fragment 0, enable bit 0
+ * CONTROL TODO:
* - if backoff needed, enable bit 0
* - if burst (backoff not needed) disable bit 0
- * - if multicast, enable bit 1
- * - if PS-POLL frame, enable bit 2
- * - if in INDEPENDENT_BSS mode and zd1205_DestPowerSave, then enable
- * bit 4 (FIXME: wtf)
- * - if frag_len > RTS threshold, set bit 5 as long if it isnt
- * multicast or mgt
- * - if bit 5 is set, and we are in OFDM mode, unset bit 5 and set bit
- * 7
*/
cs->control = 0;
@@ -607,17 +755,18 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
if (stype == IEEE80211_STYPE_PSPOLL)
cs->control |= ZD_CS_PS_POLL_FRAME;
+ /* Unicast data frames over the threshold should have RTS */
if (!is_multicast_ether_addr(header->addr1) &&
- ftype != IEEE80211_FTYPE_MGMT &&
- tx_length > zd_netdev_ieee80211(mac->netdev)->rts)
- {
- /* FIXME: check the logic */
- if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM) {
- /* 802.11g */
- cs->control |= ZD_CS_SELF_CTS;
- } else { /* 802.11b */
- cs->control |= ZD_CS_RTS;
- }
+ ftype != IEEE80211_FTYPE_MGMT &&
+ tx_length > zd_netdev_ieee80211(mac->netdev)->rts)
+ cs->control |= ZD_CS_RTS;
+
+ /* Use CTS-to-self protection if required */
+ if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM &&
+ ieee80211softmac_protection_needed(softmac)) {
+ /* FIXME: avoid sending RTS *and* self-CTS, is that correct? */
+ cs->control &= ~ZD_CS_RTS;
+ cs->control |= ZD_CS_SELF_CTS;
}
/* FIXME: Management frame? */
@@ -782,9 +931,11 @@ static int is_data_packet_for_us(struct ieee80211_device *ieee,
(netdev->flags & IFF_PROMISC);
}
-/* Filters receiving packets. If it returns 1 send it to ieee80211_rx, if 0
- * return. If an error is detected -EINVAL is returned. ieee80211_rx_mgt() is
- * called here.
+/* Filters received packets. The function returns 1 if the packet should be
+ * forwarded to ieee80211_rx(). If the packet should be ignored the function
+ * returns 0. If an invalid packet is found the function returns -EINVAL.
+ *
+ * The function calls ieee80211_rx_mgt() directly.
*
* It has been based on ieee80211_rx_any.
*/
@@ -810,9 +961,9 @@ static int filter_rx(struct ieee80211_device *ieee,
ieee80211_rx_mgt(ieee, hdr, stats);
return 0;
case IEEE80211_FTYPE_CTL:
- /* Ignore invalid short buffers */
return 0;
case IEEE80211_FTYPE_DATA:
+ /* Ignore invalid short buffers */
if (length < sizeof(struct ieee80211_hdr_3addr))
return -EINVAL;
return is_data_packet_for_us(ieee, hdr);
@@ -993,6 +1144,7 @@ static void ieee_init(struct ieee80211_device *ieee)
static void softmac_init(struct ieee80211softmac_device *sm)
{
sm->set_channel = set_channel;
+ sm->bssinfo_change = bssinfo_change;
}
struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev)
@@ -1028,66 +1180,6 @@ struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev)
return iw_stats;
}
-#ifdef DEBUG
-static const char* decryption_types[] = {
- [ZD_RX_NO_WEP] = "none",
- [ZD_RX_WEP64] = "WEP64",
- [ZD_RX_TKIP] = "TKIP",
- [ZD_RX_AES] = "AES",
- [ZD_RX_WEP128] = "WEP128",
- [ZD_RX_WEP256] = "WEP256",
-};
-
-static const char *decryption_type_string(u8 type)
-{
- const char *s;
-
- if (type < ARRAY_SIZE(decryption_types)) {
- s = decryption_types[type];
- } else {
- s = NULL;
- }
- return s ? s : "unknown";
-}
-
-static int is_ofdm(u8 frame_status)
-{
- return (frame_status & ZD_RX_OFDM);
-}
-
-void zd_dump_rx_status(const struct rx_status *status)
-{
- const char* modulation;
- u8 quality;
-
- if (is_ofdm(status->frame_status)) {
- modulation = "ofdm";
- quality = status->signal_quality_ofdm;
- } else {
- modulation = "cck";
- quality = status->signal_quality_cck;
- }
- pr_debug("rx status %s strength %#04x qual %#04x decryption %s\n",
- modulation, status->signal_strength, quality,
- decryption_type_string(status->decryption_type));
- if (status->frame_status & ZD_RX_ERROR) {
- pr_debug("rx error %s%s%s%s%s%s\n",
- (status->frame_status & ZD_RX_TIMEOUT_ERROR) ?
- "timeout " : "",
- (status->frame_status & ZD_RX_FIFO_OVERRUN_ERROR) ?
- "fifo " : "",
- (status->frame_status & ZD_RX_DECRYPTION_ERROR) ?
- "decryption " : "",
- (status->frame_status & ZD_RX_CRC32_ERROR) ?
- "crc32 " : "",
- (status->frame_status & ZD_RX_NO_ADDR1_MATCH_ERROR) ?
- "addr1 " : "",
- (status->frame_status & ZD_RX_CRC16_ERROR) ?
- "crc16" : "");
- }
-}
-#endif /* DEBUG */
-
#define LINK_LED_WORK_DELAY HZ
static void link_led_handler(void *p)
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index e4dd40a6fec3..5dcfb251f02e 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -20,6 +20,7 @@
#include <linux/wireless.h>
#include <linux/kernel.h>
+#include <linux/workqueue.h>
#include <net/ieee80211.h>
#include <net/ieee80211softmac.h>
@@ -48,10 +49,11 @@ struct zd_ctrlset {
#define ZD_CS_CCK 0x00
#define ZD_CS_OFDM 0x10
-#define ZD_CS_CCK_RATE_1M 0x00
-#define ZD_CS_CCK_RATE_2M 0x01
-#define ZD_CS_CCK_RATE_5_5M 0x02
-#define ZD_CS_CCK_RATE_11M 0x03
+/* These are referred to as zd_rates */
+#define ZD_CCK_RATE_1M 0x00
+#define ZD_CCK_RATE_2M 0x01
+#define ZD_CCK_RATE_5_5M 0x02
+#define ZD_CCK_RATE_11M 0x03
/* The rates for OFDM are encoded as in the PLCP header. Use ZD_OFDM_RATE_*.
*/
@@ -116,10 +118,6 @@ struct rx_status {
#define ZD_RX_CRC16_ERROR 0x40
#define ZD_RX_ERROR 0x80
-enum mac_flags {
- MAC_FIXED_CHANNEL = 0x01,
-};
-
struct housekeeping {
struct work_struct link_led_work;
};
@@ -130,15 +128,33 @@ struct zd_mac {
struct zd_chip chip;
spinlock_t lock;
struct net_device *netdev;
+
/* Unlocked reading possible */
struct iw_statistics iw_stats;
+
struct housekeeping housekeeping;
+ struct work_struct set_rts_cts_work;
+ struct work_struct set_basic_rates_work;
+
unsigned int stats_count;
u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE];
u8 regdomain;
u8 default_regdomain;
u8 requested_channel;
+
+ /* A bitpattern of cr_rates */
+ u16 basic_rates;
+
+ /* A zd_rate */
+ u8 rts_rate;
+
+ /* Short preamble (used for RTS/CTS) */
+ unsigned int short_preamble:1;
+
+ /* flags to indicate update in progress */
+ unsigned int updating_rts_rate:1;
+ unsigned int updating_basic_rates:1;
};
static inline struct ieee80211_device *zd_mac_to_ieee80211(struct zd_mac *mac)
@@ -180,7 +196,7 @@ int zd_mac_set_regdomain(struct zd_mac *zd_mac, u8 regdomain);
u8 zd_mac_get_regdomain(struct zd_mac *zd_mac);
int zd_mac_request_channel(struct zd_mac *mac, u8 channel);
-int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags);
+u8 zd_mac_get_channel(struct zd_mac *mac);
int zd_mac_set_mode(struct zd_mac *mac, u32 mode);
int zd_mac_get_mode(struct zd_mac *mac, u32 *mode);
diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.c b/drivers/net/wireless/zd1211rw/zd_netdev.c
index af3a7b36d078..60f1b0f6d45b 100644
--- a/drivers/net/wireless/zd1211rw/zd_netdev.c
+++ b/drivers/net/wireless/zd1211rw/zd_netdev.c
@@ -107,21 +107,10 @@ static int iw_get_freq(struct net_device *netdev,
struct iw_request_info *info,
union iwreq_data *req, char *extra)
{
- int r;
struct zd_mac *mac = zd_netdev_mac(netdev);
struct iw_freq *freq = &req->freq;
- u8 channel;
- u8 flags;
-
- r = zd_mac_get_channel(mac, &channel, &flags);
- if (r)
- return r;
- freq->flags = (flags & MAC_FIXED_CHANNEL) ?
- IW_FREQ_FIXED : IW_FREQ_AUTO;
- dev_dbg_f(zd_mac_dev(mac), "channel %s\n",
- (flags & MAC_FIXED_CHANNEL) ? "fixed" : "auto");
- return zd_channel_to_freq(freq, channel);
+ return zd_channel_to_freq(freq, zd_mac_get_channel(mac));
}
static int iw_set_mode(struct net_device *netdev,
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index a15b09549245..aa782e88754b 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -47,11 +47,17 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0586, 0x3402), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0b3b, 0x5630), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0b05, 0x170c), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x1435, 0x0711), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x0586, 0x3409), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x0b3b, 0x1630), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x0586, 0x3401), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 },
/* ZD1211B */
{ USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
/* "Driverless" devices that need ejecting */
{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
{}
@@ -587,6 +593,8 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
unsigned int l, k, n;
for (i = 0, l = 0;; i++) {
k = le16_to_cpu(get_unaligned(&length_info->length[i]));
+ if (k == 0)
+ return;
n = l+k;
if (n > length)
return;
@@ -1110,27 +1118,28 @@ static int __init usb_init(void)
{
int r;
- pr_debug("usb_init()\n");
+ pr_debug("%s usb_init()\n", driver.name);
zd_workqueue = create_singlethread_workqueue(driver.name);
if (zd_workqueue == NULL) {
- printk(KERN_ERR "%s: couldn't create workqueue\n", driver.name);
+ printk(KERN_ERR "%s couldn't create workqueue\n", driver.name);
return -ENOMEM;
}
r = usb_register(&driver);
if (r) {
- printk(KERN_ERR "usb_register() failed. Error number %d\n", r);
+ printk(KERN_ERR "%s usb_register() failed. Error number %d\n",
+ driver.name, r);
return r;
}
- pr_debug("zd1211rw initialized\n");
+ pr_debug("%s initialized\n", driver.name);
return 0;
}
static void __exit usb_exit(void)
{
- pr_debug("usb_exit()\n");
+ pr_debug("%s usb_exit()\n", driver.name);
usb_deregister(&driver);
destroy_workqueue(zd_workqueue);
}
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index df04e050c647..d85e2ea0b6af 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -34,8 +34,16 @@
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include "8390.h"
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+#define ei_inb(port) in_8(port)
+#define ei_outb(val,port) out_8(port,val)
+#define ei_inb_p(port) in_8(port)
+#define ei_outb_p(val,port) out_8(port,val)
+static const char version[] =
+ "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include "lib8390.c"
#define DRV_NAME "zorro8390"
@@ -114,7 +122,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
break;
board = z->resource.start;
ioaddr = board+cards[i].offset;
- dev = alloc_ei_netdev();
+ dev = ____alloc_ei_netdev(0);
if (!dev)
return -ENOMEM;
SET_MODULE_OWNER(dev);
@@ -201,7 +209,7 @@ static int __devinit zorro8390_init(struct net_device *dev,
dev->irq = IRQ_AMIGA_PORTS;
/* Install the Interrupt handler */
- i = request_irq(IRQ_AMIGA_PORTS, ei_interrupt, IRQF_SHARED, DRV_NAME, dev);
+ i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, DRV_NAME, dev);
if (i) return i;
for(i = 0; i < ETHER_ADDR_LEN; i++) {
@@ -226,10 +234,10 @@ static int __devinit zorro8390_init(struct net_device *dev,
dev->open = &zorro8390_open;
dev->stop = &zorro8390_close;
#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = ei_poll;
+ dev->poll_controller = __ei_poll;
#endif
- NS8390_init(dev, 0);
+ __NS8390_init(dev, 0);
err = register_netdev(dev);
if (err) {
free_irq(IRQ_AMIGA_PORTS, dev);
@@ -246,7 +254,7 @@ static int __devinit zorro8390_init(struct net_device *dev,
static int zorro8390_open(struct net_device *dev)
{
- ei_open(dev);
+ __ei_open(dev);
return 0;
}
@@ -254,7 +262,7 @@ static int zorro8390_close(struct net_device *dev)
{
if (ei_debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
- ei_close(dev);
+ __ei_close(dev);
return 0;
}
@@ -405,7 +413,7 @@ static void zorro8390_block_output(struct net_device *dev, int count,
printk(KERN_ERR "%s: timeout waiting for Tx RDC.\n",
dev->name);
zorro8390_reset_8390(dev);
- NS8390_init(dev,1);
+ __NS8390_init(dev,1);
break;
}
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
index edfa012fad3a..aff25c000abf 100644
--- a/include/linux/mv643xx.h
+++ b/include/linux/mv643xx.h
@@ -724,7 +724,7 @@
#define MV643XX_ETH_RX_FIFO_URGENT_THRESHOLD_REG(port) (0x2470 + (port<<10))
#define MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(port) (0x2474 + (port<<10))
#define MV643XX_ETH_RX_MINIMAL_FRAME_SIZE_REG(port) (0x247c + (port<<10))
-#define MV643XX_ETH_RX_DISCARDED_FRAMES_COUNTER(port) (0x2484 + (port<<10)
+#define MV643XX_ETH_RX_DISCARDED_FRAMES_COUNTER(port) (0x2484 + (port<<10))
#define MV643XX_ETH_PORT_DEBUG_0_REG(port) (0x248c + (port<<10))
#define MV643XX_ETH_PORT_DEBUG_1_REG(port) (0x2490 + (port<<10))
#define MV643XX_ETH_PORT_INTERNAL_ADDR_ERROR_REG(port) (0x2494 + (port<<10))
@@ -1135,7 +1135,7 @@ struct mv64xxx_i2c_pdata {
#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_1 (1<<19)
#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_2 (1<<20)
#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_3 ((1<<20) | (1<<19))
-#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_4 ((1<<21)
+#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_4 (1<<21)
#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_5 ((1<<21) | (1<<19))
#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_6 ((1<<21) | (1<<20))
#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_7 ((1<<21) | (1<<20) | (1<<19))
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index e060a7637947..fd5033b8a927 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1213,6 +1213,10 @@
#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453
+#define PCI_DEVICE_ID_NVIDIA_NVENET_24 0x054C
+#define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054D
+#define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054E
+#define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054F
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560
#define PCI_VENDOR_ID_IMS 0x10e0
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 9447a57ee8a9..edd4c88ca7d8 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -20,6 +20,10 @@
#include <linux/spinlock.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
#define PHY_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
SUPPORTED_10baseT_Full | \
@@ -43,15 +47,26 @@
#define PHY_HAS_INTERRUPT 0x00000001
#define PHY_HAS_MAGICANEG 0x00000002
+/* Interface Mode definitions */
+typedef enum {
+ PHY_INTERFACE_MODE_MII,
+ PHY_INTERFACE_MODE_GMII,
+ PHY_INTERFACE_MODE_SGMII,
+ PHY_INTERFACE_MODE_TBI,
+ PHY_INTERFACE_MODE_RMII,
+ PHY_INTERFACE_MODE_RGMII,
+ PHY_INTERFACE_MODE_RTBI
+} phy_interface_t;
+
#define MII_BUS_MAX 4
-#define PHY_INIT_TIMEOUT 100000
+#define PHY_INIT_TIMEOUT 100000
#define PHY_STATE_TIME 1
#define PHY_FORCE_TIMEOUT 10
#define PHY_AN_TIMEOUT 10
-#define PHY_MAX_ADDR 32
+#define PHY_MAX_ADDR 32
/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
#define PHY_ID_FMT "%x:%02x"
@@ -83,8 +98,8 @@ struct mii_bus {
int *irq;
};
-#define PHY_INTERRUPT_DISABLED 0x0
-#define PHY_INTERRUPT_ENABLED 0x80000000
+#define PHY_INTERRUPT_DISABLED 0x0
+#define PHY_INTERRUPT_ENABLED 0x80000000
/* PHY state machine states:
*
@@ -226,6 +241,8 @@ struct phy_device {
u32 dev_flags;
+ phy_interface_t interface;
+
/* Bus address of the PHY (0-32) */
int addr;
@@ -341,9 +358,10 @@ struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
int phy_clear_interrupt(struct phy_device *phydev);
int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
struct phy_device * phy_attach(struct net_device *dev,
- const char *phy_id, u32 flags);
+ const char *phy_id, u32 flags, phy_interface_t interface);
struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
- void (*handler)(struct net_device *), u32 flags);
+ void (*handler)(struct net_device *), u32 flags,
+ phy_interface_t interface);
void phy_disconnect(struct phy_device *phydev);
void phy_detach(struct phy_device *phydev);
void phy_start(struct phy_device *phydev);
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index a50a0130fd9e..7c269f4992eb 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -546,6 +546,8 @@
/* MLME requests (SIOCSIWMLME / struct iw_mlme) */
#define IW_MLME_DEAUTH 0
#define IW_MLME_DISASSOC 1
+#define IW_MLME_AUTH 2
+#define IW_MLME_ASSOC 3
/* SIOCSIWAUTH/SIOCGIWAUTH struct iw_param flags */
#define IW_AUTH_INDEX 0x0FFF
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index b174ebb277a9..e6af381e206d 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -1037,6 +1037,10 @@ struct ieee80211_device {
/* host performs multicast decryption */
int host_mc_decrypt;
+ /* host should strip IV and ICV from protected frames */
+ /* meaningful only when hardware decryption is being used */
+ int host_strip_iv_icv;
+
int host_open_frag;
int host_build_iv;
int ieee802_1x; /* is IEEE 802.1X used */
@@ -1076,6 +1080,8 @@ struct ieee80211_device {
int perfect_rssi;
int worst_rssi;
+ u16 prev_seq_ctl; /* used to drop duplicate frames */
+
/* Callback functions */
void (*set_security) (struct net_device * dev,
struct ieee80211_security * sec);
diff --git a/net/core/dev.c b/net/core/dev.c
index 81c426adcd1e..411c2428d268 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3035,15 +3035,6 @@ int register_netdev(struct net_device *dev)
goto out;
}
- /*
- * Back compatibility hook. Kill this one in 2.5
- */
- if (dev->name[0] == 0 || dev->name[0] == ' ') {
- err = dev_alloc_name(dev, "eth%d");
- if (err < 0)
- goto out;
- }
-
err = register_netdevice(dev);
out:
rtnl_unlock();
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
index 13b1e5fff7e4..b1c6d1f717d9 100644
--- a/net/ieee80211/ieee80211_module.c
+++ b/net/ieee80211/ieee80211_module.c
@@ -67,7 +67,7 @@ static int ieee80211_networks_allocate(struct ieee80211_device *ieee)
return 0;
ieee->networks =
- kmalloc(MAX_NETWORK_COUNT * sizeof(struct ieee80211_network),
+ kzalloc(MAX_NETWORK_COUNT * sizeof(struct ieee80211_network),
GFP_KERNEL);
if (!ieee->networks) {
printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
@@ -75,9 +75,6 @@ static int ieee80211_networks_allocate(struct ieee80211_device *ieee)
return -ENOMEM;
}
- memset(ieee->networks, 0,
- MAX_NETWORK_COUNT * sizeof(struct ieee80211_network));
-
return 0;
}
@@ -118,6 +115,21 @@ static void ieee80211_networks_initialize(struct ieee80211_device *ieee)
&ieee->network_free_list);
}
+static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > IEEE80211_DATA_LEN))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static struct net_device_stats *ieee80211_generic_get_stats(
+ struct net_device *dev)
+{
+ struct ieee80211_device *ieee = netdev_priv(dev);
+ return &ieee->stats;
+}
+
struct net_device *alloc_ieee80211(int sizeof_priv)
{
struct ieee80211_device *ieee;
@@ -133,6 +145,11 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
}
ieee = netdev_priv(dev);
dev->hard_start_xmit = ieee80211_xmit;
+ dev->change_mtu = ieee80211_change_mtu;
+
+ /* Drivers are free to override this if the generic implementation
+ * does not meet their needs. */
+ dev->get_stats = ieee80211_generic_get_stats;
ieee->dev = dev;
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 2759312a4204..d97e5412e31b 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -415,17 +415,16 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
ieee->host_mc_decrypt : ieee->host_decrypt;
if (can_be_decrypted) {
- int idx = 0;
if (skb->len >= hdrlen + 3) {
/* Top two-bits of byte 3 are the key index */
- idx = skb->data[hdrlen + 3] >> 6;
+ keyidx = skb->data[hdrlen + 3] >> 6;
}
- /* ieee->crypt[] is WEP_KEY (4) in length. Given that idx
- * is only allowed 2-bits of storage, no value of idx can
- * be provided via above code that would result in idx
+ /* ieee->crypt[] is WEP_KEY (4) in length. Given that keyidx
+ * is only allowed 2-bits of storage, no value of keyidx can
+ * be provided via above code that would result in keyidx
* being out of range */
- crypt = ieee->crypt[idx];
+ crypt = ieee->crypt[keyidx];
#ifdef NOT_YET
sta = NULL;
@@ -479,6 +478,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
goto rx_exit;
}
#endif
+ /* drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.29) */
+ if (sc == ieee->prev_seq_ctl)
+ goto rx_dropped;
+ else
+ ieee->prev_seq_ctl = sc;
/* Data frame - extract src/dst addresses */
if (skb->len < IEEE80211_3ADDR_LEN)
@@ -655,6 +659,51 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
goto rx_dropped;
}
+ /* If the frame was decrypted in hardware, we may need to strip off
+ * any security data (IV, ICV, etc) that was left behind */
+ if (!can_be_decrypted && (fc & IEEE80211_FCTL_PROTECTED) &&
+ ieee->host_strip_iv_icv) {
+ int trimlen = 0;
+
+ /* Top two-bits of byte 3 are the key index */
+ if (skb->len >= hdrlen + 3)
+ keyidx = skb->data[hdrlen + 3] >> 6;
+
+ /* To strip off any security data which appears before the
+ * payload, we simply increase hdrlen (as the header gets
+ * chopped off immediately below). For the security data which
+ * appears after the payload, we use skb_trim. */
+
+ switch (ieee->sec.encode_alg[keyidx]) {
+ case SEC_ALG_WEP:
+ /* 4 byte IV */
+ hdrlen += 4;
+ /* 4 byte ICV */
+ trimlen = 4;
+ break;
+ case SEC_ALG_TKIP:
+ /* 4 byte IV, 4 byte ExtIV */
+ hdrlen += 8;
+ /* 8 byte MIC, 4 byte ICV */
+ trimlen = 12;
+ break;
+ case SEC_ALG_CCMP:
+ /* 8 byte CCMP header */
+ hdrlen += 8;
+ /* 8 byte MIC */
+ trimlen = 8;
+ break;
+ }
+
+ if (skb->len < trimlen)
+ goto rx_dropped;
+
+ __skb_trim(skb, skb->len - trimlen);
+
+ if (skb->len < hdrlen)
+ goto rx_dropped;
+ }
+
/* skb: hdr + (possible reassembled) full plaintext payload */
payload = skb->data + hdrlen;
@@ -1255,12 +1304,11 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element
case MFIE_TYPE_IBSS_DFS:
if (network->ibss_dfs)
break;
- network->ibss_dfs =
- kmalloc(info_element->len, GFP_ATOMIC);
+ network->ibss_dfs = kmemdup(info_element->data,
+ info_element->len,
+ GFP_ATOMIC);
if (!network->ibss_dfs)
return 1;
- memcpy(network->ibss_dfs, info_element->data,
- info_element->len);
network->flags |= NETWORK_HAS_IBSS_DFS;
break;
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 4cef39e171d0..0612015f1c78 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -158,7 +158,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
/* Make sure that we've got an auth queue item for this request */
if(aq == NULL)
{
- printkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2));
+ dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2));
/* Error #? */
return -1;
}
@@ -166,7 +166,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
/* Check for out of order authentication */
if(!net->authenticating)
{
- printkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2));
+ dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2));
return -1;
}
@@ -216,10 +216,16 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
net->challenge_len = *data++;
if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
net->challenge_len = WLAN_AUTH_CHALLENGE_LEN;
- if (net->challenge != NULL)
- kfree(net->challenge);
- net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC);
- memcpy(net->challenge, data, net->challenge_len);
+ kfree(net->challenge);
+ net->challenge = kmemdup(data, net->challenge_len,
+ GFP_ATOMIC);
+ if (net->challenge == NULL) {
+ printkl(KERN_NOTICE PFX "Shared Key "
+ "Authentication failed due to "
+ "memory shortage.\n");
+ spin_unlock_irqrestore(&mac->lock, flags);
+ break;
+ }
aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE;
/* We reuse the work struct from the auth request here.
@@ -342,7 +348,7 @@ ieee80211softmac_deauth_req(struct ieee80211softmac_device *mac,
/* Make sure the network is authenticated */
if (!net->authenticated)
{
- printkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n");
+ dprintkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n");
/* Error okay? */
return -EPERM;
}
@@ -376,7 +382,7 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2);
if (net == NULL) {
- printkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n",
+ dprintkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n",
MAC_ARG(deauth->header.addr2));
return 0;
}
@@ -384,7 +390,7 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
/* Make sure the network is authenticated */
if(!net->authenticated)
{
- printkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n");
+ dprintkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n");
/* Error okay? */
return -EPERM;
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c
index ad67368b58ed..5507feab32de 100644
--- a/net/ieee80211/softmac/ieee80211softmac_scan.c
+++ b/net/ieee80211/softmac/ieee80211softmac_scan.c
@@ -134,7 +134,8 @@ void ieee80211softmac_scan(void *d)
si->started = 0;
spin_unlock_irqrestore(&sm->lock, flags);
- dprintk(PFX "Scanning finished\n");
+ dprintk(PFX "Scanning finished: scanned %d channels starting with channel %d\n",
+ sm->scaninfo->number_channels, sm->scaninfo->channels[0].channel);
ieee80211softmac_scan_finished(sm);
complete_all(&sm->scaninfo->finished);
}
@@ -182,8 +183,6 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev)
sm->scaninfo->channels = sm->ieee->geo.bg;
sm->scaninfo->number_channels = sm->ieee->geo.bg_channels;
}
- dprintk(PFX "Start scanning with channel: %d\n", sm->scaninfo->channels[0].channel);
- dprintk(PFX "Scanning %d channels\n", sm->scaninfo->number_channels);
sm->scaninfo->current_channel_idx = 0;
sm->scaninfo->started = 1;
sm->scaninfo->stop = 0;