broadcom: Move the Broadcom drivers

Moves the drivers for Broadcom devices into
drivers/net/ethernet/broadcom/ and the necessary Kconfig and Makefile
changes.

CC: Eilon Greenstein <eilong@broadcom.com>
CC: Michael Chan <mchan@broadcom.com>
CC: Matt Carlson <mcarlson@broadcom.com>
CC: Gary Zambrano <zambrano@broadcom.com>
CC: "Maciej W. Rozycki" <macro@linux-mips.org>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Jeff Kirsher
2011-04-07 06:03:04 -07:00
parent 644570b830
commit adfc5217e9
44 changed files with 141 additions and 115 deletions

View File

@@ -0,0 +1,119 @@
#
# Broadcom device configuration
#
config NET_VENDOR_BROADCOM
bool "Broadcom devices"
depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
SIBYTE_SB1xxx_SOC
---help---
If you have a network (Ethernet) chipset belonging to this class,
say Y.
Note that the answer to this question does not directly affect
the kernel: saying N will just case the configurator to skip all
the questions regarding AMD chipsets. If you say Y, you will be asked
for your specific chipset/driver in the following questions.
if NET_VENDOR_BROADCOM
config B44
tristate "Broadcom 440x/47xx ethernet support"
depends on SSB_POSSIBLE && HAS_DMA
select SSB
select MII
---help---
If you have a network (Ethernet) controller of this type, say Y
or M and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
To compile this driver as a module, choose M here. The module
will be called b44.
# Auto-select SSB PCI-HOST support, if possible
config B44_PCI_AUTOSELECT
bool
depends on B44 && SSB_PCIHOST_POSSIBLE
select SSB_PCIHOST
default y
# Auto-select SSB PCICORE driver, if possible
config B44_PCICORE_AUTOSELECT
bool
depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE
select SSB_DRIVER_PCICORE
default y
config B44_PCI
bool
depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
default y
config BCM63XX_ENET
tristate "Broadcom 63xx internal mac support"
depends on BCM63XX
select MII
select PHYLIB
help
This driver supports the ethernet MACs in the Broadcom 63xx
MIPS chipset family (BCM63XX).
config BNX2
tristate "Broadcom NetXtremeII support"
depends on PCI
select CRC32
select FW_LOADER
---help---
This driver supports Broadcom NetXtremeII gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called bnx2. This is recommended.
config CNIC
tristate "Broadcom CNIC support"
depends on PCI
select BNX2
select UIO
---help---
This driver supports offload features of Broadcom NetXtremeII
gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called cnic. This is recommended.
config SB1250_MAC
tristate "SB1250 Gigabit Ethernet support"
depends on SIBYTE_SB1xxx_SOC
select PHYLIB
---help---
This driver supports Gigabit Ethernet interfaces based on the
Broadcom SiByte family of System-On-a-Chip parts. They include
the BCM1120, BCM1125, BCM1125H, BCM1250, BCM1255, BCM1280, BCM1455
and BCM1480 chips.
To compile this driver as a module, choose M here: the module
will be called sb1250-mac.
config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
select PHYLIB
---help---
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called tg3. This is recommended.
config BNX2X
tristate "Broadcom NetXtremeII 10Gb support"
depends on PCI
select FW_LOADER
select ZLIB_INFLATE
select LIBCRC32C
select MDIO
---help---
This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called bnx2x. This is recommended.
endif # NET_VENDOR_BROADCOM

View File

@@ -0,0 +1,11 @@
#
# Makefile for the Broadcom network device drivers.
#
obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
obj-$(CONFIG_BNX2) += bnx2.o
obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x/
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,401 @@
#ifndef _B44_H
#define _B44_H
/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */
#define B44_DEVCTRL 0x0000UL /* Device Control */
#define DEVCTRL_MPM 0x00000040 /* Magic Packet PME Enable (B0 only) */
#define DEVCTRL_PFE 0x00000080 /* Pattern Filtering Enable */
#define DEVCTRL_IPP 0x00000400 /* Internal EPHY Present */
#define DEVCTRL_EPR 0x00008000 /* EPHY Reset */
#define DEVCTRL_PME 0x00001000 /* PHY Mode Enable */
#define DEVCTRL_PMCE 0x00002000 /* PHY Mode Clocks Enable */
#define DEVCTRL_PADDR 0x0007c000 /* PHY Address */
#define DEVCTRL_PADDR_SHIFT 18
#define B44_BIST_STAT 0x000CUL /* Built-In Self-Test Status */
#define B44_WKUP_LEN 0x0010UL /* Wakeup Length */
#define WKUP_LEN_P0_MASK 0x0000007f /* Pattern 0 */
#define WKUP_LEN_D0 0x00000080
#define WKUP_LEN_P1_MASK 0x00007f00 /* Pattern 1 */
#define WKUP_LEN_P1_SHIFT 8
#define WKUP_LEN_D1 0x00008000
#define WKUP_LEN_P2_MASK 0x007f0000 /* Pattern 2 */
#define WKUP_LEN_P2_SHIFT 16
#define WKUP_LEN_D2 0x00000000
#define WKUP_LEN_P3_MASK 0x7f000000 /* Pattern 3 */
#define WKUP_LEN_P3_SHIFT 24
#define WKUP_LEN_D3 0x80000000
#define WKUP_LEN_DISABLE 0x80808080
#define WKUP_LEN_ENABLE_TWO 0x80800000
#define WKUP_LEN_ENABLE_THREE 0x80000000
#define B44_ISTAT 0x0020UL /* Interrupt Status */
#define ISTAT_LS 0x00000020 /* Link Change (B0 only) */
#define ISTAT_PME 0x00000040 /* Power Management Event */
#define ISTAT_TO 0x00000080 /* General Purpose Timeout */
#define ISTAT_DSCE 0x00000400 /* Descriptor Error */
#define ISTAT_DATAE 0x00000800 /* Data Error */
#define ISTAT_DPE 0x00001000 /* Descr. Protocol Error */
#define ISTAT_RDU 0x00002000 /* Receive Descr. Underflow */
#define ISTAT_RFO 0x00004000 /* Receive FIFO Overflow */
#define ISTAT_TFU 0x00008000 /* Transmit FIFO Underflow */
#define ISTAT_RX 0x00010000 /* RX Interrupt */
#define ISTAT_TX 0x01000000 /* TX Interrupt */
#define ISTAT_EMAC 0x04000000 /* EMAC Interrupt */
#define ISTAT_MII_WRITE 0x08000000 /* MII Write Interrupt */
#define ISTAT_MII_READ 0x10000000 /* MII Read Interrupt */
#define ISTAT_ERRORS (ISTAT_DSCE|ISTAT_DATAE|ISTAT_DPE|ISTAT_RDU|ISTAT_RFO|ISTAT_TFU)
#define B44_IMASK 0x0024UL /* Interrupt Mask */
#define IMASK_DEF (ISTAT_ERRORS | ISTAT_TO | ISTAT_RX | ISTAT_TX)
#define B44_GPTIMER 0x0028UL /* General Purpose Timer */
#define B44_ADDR_LO 0x0088UL /* ENET Address Lo (B0 only) */
#define B44_ADDR_HI 0x008CUL /* ENET Address Hi (B0 only) */
#define B44_FILT_ADDR 0x0090UL /* ENET Filter Address */
#define B44_FILT_DATA 0x0094UL /* ENET Filter Data */
#define B44_TXBURST 0x00A0UL /* TX Max Burst Length */
#define B44_RXBURST 0x00A4UL /* RX Max Burst Length */
#define B44_MAC_CTRL 0x00A8UL /* MAC Control */
#define MAC_CTRL_CRC32_ENAB 0x00000001 /* CRC32 Generation Enable */
#define MAC_CTRL_PHY_PDOWN 0x00000004 /* Onchip EPHY Powerdown */
#define MAC_CTRL_PHY_EDET 0x00000008 /* Onchip EPHY Energy Detected */
#define MAC_CTRL_PHY_LEDCTRL 0x000000e0 /* Onchip EPHY LED Control */
#define MAC_CTRL_PHY_LEDCTRL_SHIFT 5
#define B44_MAC_FLOW 0x00ACUL /* MAC Flow Control */
#define MAC_FLOW_RX_HI_WATER 0x000000ff /* Receive FIFO HI Water Mark */
#define MAC_FLOW_PAUSE_ENAB 0x00008000 /* Enable Pause Frame Generation */
#define B44_RCV_LAZY 0x0100UL /* Lazy Interrupt Control */
#define RCV_LAZY_TO_MASK 0x00ffffff /* Timeout */
#define RCV_LAZY_FC_MASK 0xff000000 /* Frame Count */
#define RCV_LAZY_FC_SHIFT 24
#define B44_DMATX_CTRL 0x0200UL /* DMA TX Control */
#define DMATX_CTRL_ENABLE 0x00000001 /* Enable */
#define DMATX_CTRL_SUSPEND 0x00000002 /* Suepend Request */
#define DMATX_CTRL_LPBACK 0x00000004 /* Loopback Enable */
#define DMATX_CTRL_FAIRPRIOR 0x00000008 /* Fair Priority */
#define DMATX_CTRL_FLUSH 0x00000010 /* Flush Request */
#define B44_DMATX_ADDR 0x0204UL /* DMA TX Descriptor Ring Address */
#define B44_DMATX_PTR 0x0208UL /* DMA TX Last Posted Descriptor */
#define B44_DMATX_STAT 0x020CUL /* DMA TX Current Active Desc. + Status */
#define DMATX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
#define DMATX_STAT_SMASK 0x0000f000 /* State Mask */
#define DMATX_STAT_SDISABLED 0x00000000 /* State Disabled */
#define DMATX_STAT_SACTIVE 0x00001000 /* State Active */
#define DMATX_STAT_SIDLE 0x00002000 /* State Idle Wait */
#define DMATX_STAT_SSTOPPED 0x00003000 /* State Stopped */
#define DMATX_STAT_SSUSP 0x00004000 /* State Suspend Pending */
#define DMATX_STAT_EMASK 0x000f0000 /* Error Mask */
#define DMATX_STAT_ENONE 0x00000000 /* Error None */
#define DMATX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */
#define DMATX_STAT_EDFU 0x00020000 /* Error Data FIFO Underrun */
#define DMATX_STAT_EBEBR 0x00030000 /* Error Bus Error on Buffer Read */
#define DMATX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */
#define DMATX_STAT_FLUSHED 0x00100000 /* Flushed */
#define B44_DMARX_CTRL 0x0210UL /* DMA RX Control */
#define DMARX_CTRL_ENABLE 0x00000001 /* Enable */
#define DMARX_CTRL_ROMASK 0x000000fe /* Receive Offset Mask */
#define DMARX_CTRL_ROSHIFT 1 /* Receive Offset Shift */
#define B44_DMARX_ADDR 0x0214UL /* DMA RX Descriptor Ring Address */
#define B44_DMARX_PTR 0x0218UL /* DMA RX Last Posted Descriptor */
#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */
#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */
#define DMARX_STAT_SDISABLED 0x00000000 /* State Disabled */
#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */
#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */
#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */
#define DMARX_STAT_EMASK 0x000f0000 /* Error Mask */
#define DMARX_STAT_ENONE 0x00000000 /* Error None */
#define DMARX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */
#define DMARX_STAT_EDFO 0x00020000 /* Error Data FIFO Overflow */
#define DMARX_STAT_EBEBW 0x00030000 /* Error Bus Error on Buffer Write */
#define DMARX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */
#define B44_DMAFIFO_AD 0x0220UL /* DMA FIFO Diag Address */
#define DMAFIFO_AD_OMASK 0x0000ffff /* Offset Mask */
#define DMAFIFO_AD_SMASK 0x000f0000 /* Select Mask */
#define DMAFIFO_AD_SXDD 0x00000000 /* Select Transmit DMA Data */
#define DMAFIFO_AD_SXDP 0x00010000 /* Select Transmit DMA Pointers */
#define DMAFIFO_AD_SRDD 0x00040000 /* Select Receive DMA Data */
#define DMAFIFO_AD_SRDP 0x00050000 /* Select Receive DMA Pointers */
#define DMAFIFO_AD_SXFD 0x00080000 /* Select Transmit FIFO Data */
#define DMAFIFO_AD_SXFP 0x00090000 /* Select Transmit FIFO Pointers */
#define DMAFIFO_AD_SRFD 0x000c0000 /* Select Receive FIFO Data */
#define DMAFIFO_AD_SRFP 0x000c0000 /* Select Receive FIFO Pointers */
#define B44_DMAFIFO_LO 0x0224UL /* DMA FIFO Diag Low Data */
#define B44_DMAFIFO_HI 0x0228UL /* DMA FIFO Diag High Data */
#define B44_RXCONFIG 0x0400UL /* EMAC RX Config */
#define RXCONFIG_DBCAST 0x00000001 /* Disable Broadcast */
#define RXCONFIG_ALLMULTI 0x00000002 /* Accept All Multicast */
#define RXCONFIG_NORX_WHILE_TX 0x00000004 /* Receive Disable While Transmitting */
#define RXCONFIG_PROMISC 0x00000008 /* Promiscuous Enable */
#define RXCONFIG_LPBACK 0x00000010 /* Loopback Enable */
#define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */
#define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */
#define RXCONFIG_RFILT 0x00000080 /* Reject Filter */
#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */
#define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */
#define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */
#define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */
#define MDIO_CTRL_MAXF_MASK 0x0000007f /* MDC Frequency */
#define MDIO_CTRL_PREAMBLE 0x00000080 /* MII Preamble Enable */
#define B44_MDIO_DATA 0x0414UL /* EMAC MDIO Data */
#define MDIO_DATA_DATA 0x0000ffff /* R/W Data */
#define MDIO_DATA_TA_MASK 0x00030000 /* Turnaround Value */
#define MDIO_DATA_TA_SHIFT 16
#define MDIO_TA_VALID 2
#define MDIO_DATA_RA_MASK 0x007c0000 /* Register Address */
#define MDIO_DATA_RA_SHIFT 18
#define MDIO_DATA_PMD_MASK 0x0f800000 /* Physical Media Device */
#define MDIO_DATA_PMD_SHIFT 23
#define MDIO_DATA_OP_MASK 0x30000000 /* Opcode */
#define MDIO_DATA_OP_SHIFT 28
#define MDIO_OP_WRITE 1
#define MDIO_OP_READ 2
#define MDIO_DATA_SB_MASK 0xc0000000 /* Start Bits */
#define MDIO_DATA_SB_SHIFT 30
#define MDIO_DATA_SB_START 0x40000000 /* Start Of Frame */
#define B44_EMAC_IMASK 0x0418UL /* EMAC Interrupt Mask */
#define B44_EMAC_ISTAT 0x041CUL /* EMAC Interrupt Status */
#define EMAC_INT_MII 0x00000001 /* MII MDIO Interrupt */
#define EMAC_INT_MIB 0x00000002 /* MIB Interrupt */
#define EMAC_INT_FLOW 0x00000003 /* Flow Control Interrupt */
#define B44_CAM_DATA_LO 0x0420UL /* EMAC CAM Data Low */
#define B44_CAM_DATA_HI 0x0424UL /* EMAC CAM Data High */
#define CAM_DATA_HI_VALID 0x00010000 /* Valid Bit */
#define B44_CAM_CTRL 0x0428UL /* EMAC CAM Control */
#define CAM_CTRL_ENABLE 0x00000001 /* CAM Enable */
#define CAM_CTRL_MSEL 0x00000002 /* Mask Select */
#define CAM_CTRL_READ 0x00000004 /* Read */
#define CAM_CTRL_WRITE 0x00000008 /* Read */
#define CAM_CTRL_INDEX_MASK 0x003f0000 /* Index Mask */
#define CAM_CTRL_INDEX_SHIFT 16
#define CAM_CTRL_BUSY 0x80000000 /* CAM Busy */
#define B44_ENET_CTRL 0x042CUL /* EMAC ENET Control */
#define ENET_CTRL_ENABLE 0x00000001 /* EMAC Enable */
#define ENET_CTRL_DISABLE 0x00000002 /* EMAC Disable */
#define ENET_CTRL_SRST 0x00000004 /* EMAC Soft Reset */
#define ENET_CTRL_EPSEL 0x00000008 /* External PHY Select */
#define B44_TX_CTRL 0x0430UL /* EMAC TX Control */
#define TX_CTRL_DUPLEX 0x00000001 /* Full Duplex */
#define TX_CTRL_FMODE 0x00000002 /* Flow Mode */
#define TX_CTRL_SBENAB 0x00000004 /* Single Backoff Enable */
#define TX_CTRL_SMALL_SLOT 0x00000008 /* Small Slottime */
#define B44_TX_WMARK 0x0434UL /* EMAC TX Watermark */
#define B44_MIB_CTRL 0x0438UL /* EMAC MIB Control */
#define MIB_CTRL_CLR_ON_READ 0x00000001 /* Autoclear on Read */
#define B44_TX_GOOD_O 0x0500UL /* MIB TX Good Octets */
#define B44_TX_GOOD_P 0x0504UL /* MIB TX Good Packets */
#define B44_TX_O 0x0508UL /* MIB TX Octets */
#define B44_TX_P 0x050CUL /* MIB TX Packets */
#define B44_TX_BCAST 0x0510UL /* MIB TX Broadcast Packets */
#define B44_TX_MCAST 0x0514UL /* MIB TX Multicast Packets */
#define B44_TX_64 0x0518UL /* MIB TX <= 64 byte Packets */
#define B44_TX_65_127 0x051CUL /* MIB TX 65 to 127 byte Packets */
#define B44_TX_128_255 0x0520UL /* MIB TX 128 to 255 byte Packets */
#define B44_TX_256_511 0x0524UL /* MIB TX 256 to 511 byte Packets */
#define B44_TX_512_1023 0x0528UL /* MIB TX 512 to 1023 byte Packets */
#define B44_TX_1024_MAX 0x052CUL /* MIB TX 1024 to max byte Packets */
#define B44_TX_JABBER 0x0530UL /* MIB TX Jabber Packets */
#define B44_TX_OSIZE 0x0534UL /* MIB TX Oversize Packets */
#define B44_TX_FRAG 0x0538UL /* MIB TX Fragment Packets */
#define B44_TX_URUNS 0x053CUL /* MIB TX Underruns */
#define B44_TX_TCOLS 0x0540UL /* MIB TX Total Collisions */
#define B44_TX_SCOLS 0x0544UL /* MIB TX Single Collisions */
#define B44_TX_MCOLS 0x0548UL /* MIB TX Multiple Collisions */
#define B44_TX_ECOLS 0x054CUL /* MIB TX Excessive Collisions */
#define B44_TX_LCOLS 0x0550UL /* MIB TX Late Collisions */
#define B44_TX_DEFERED 0x0554UL /* MIB TX Defered Packets */
#define B44_TX_CLOST 0x0558UL /* MIB TX Carrier Lost */
#define B44_TX_PAUSE 0x055CUL /* MIB TX Pause Packets */
#define B44_RX_GOOD_O 0x0580UL /* MIB RX Good Octets */
#define B44_RX_GOOD_P 0x0584UL /* MIB RX Good Packets */
#define B44_RX_O 0x0588UL /* MIB RX Octets */
#define B44_RX_P 0x058CUL /* MIB RX Packets */
#define B44_RX_BCAST 0x0590UL /* MIB RX Broadcast Packets */
#define B44_RX_MCAST 0x0594UL /* MIB RX Multicast Packets */
#define B44_RX_64 0x0598UL /* MIB RX <= 64 byte Packets */
#define B44_RX_65_127 0x059CUL /* MIB RX 65 to 127 byte Packets */
#define B44_RX_128_255 0x05A0UL /* MIB RX 128 to 255 byte Packets */
#define B44_RX_256_511 0x05A4UL /* MIB RX 256 to 511 byte Packets */
#define B44_RX_512_1023 0x05A8UL /* MIB RX 512 to 1023 byte Packets */
#define B44_RX_1024_MAX 0x05ACUL /* MIB RX 1024 to max byte Packets */
#define B44_RX_JABBER 0x05B0UL /* MIB RX Jabber Packets */
#define B44_RX_OSIZE 0x05B4UL /* MIB RX Oversize Packets */
#define B44_RX_FRAG 0x05B8UL /* MIB RX Fragment Packets */
#define B44_RX_MISS 0x05BCUL /* MIB RX Missed Packets */
#define B44_RX_CRCA 0x05C0UL /* MIB RX CRC Align Errors */
#define B44_RX_USIZE 0x05C4UL /* MIB RX Undersize Packets */
#define B44_RX_CRC 0x05C8UL /* MIB RX CRC Errors */
#define B44_RX_ALIGN 0x05CCUL /* MIB RX Align Errors */
#define B44_RX_SYM 0x05D0UL /* MIB RX Symbol Errors */
#define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */
#define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */
/* 4400 PHY registers */
#define B44_MII_AUXCTRL 24 /* Auxiliary Control */
#define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */
#define MII_AUXCTRL_SPEED 0x0002 /* 1=100Mbps, 0=10Mbps */
#define MII_AUXCTRL_FORCED 0x0004 /* Forced 10/100 */
#define B44_MII_ALEDCTRL 26 /* Activity LED */
#define MII_ALEDCTRL_ALLMSK 0x7fff
#define B44_MII_TLEDCTRL 27 /* Traffic Meter LED */
#define MII_TLEDCTRL_ENABLE 0x0040
struct dma_desc {
__le32 ctrl;
__le32 addr;
};
/* There are only 12 bits in the DMA engine for descriptor offsetting
* so the table must be aligned on a boundary of this.
*/
#define DMA_TABLE_BYTES 4096
#define DESC_CTRL_LEN 0x00001fff
#define DESC_CTRL_CMASK 0x0ff00000 /* Core specific bits */
#define DESC_CTRL_EOT 0x10000000 /* End of Table */
#define DESC_CTRL_IOC 0x20000000 /* Interrupt On Completion */
#define DESC_CTRL_EOF 0x40000000 /* End of Frame */
#define DESC_CTRL_SOF 0x80000000 /* Start of Frame */
#define RX_COPY_THRESHOLD 256
struct rx_header {
__le16 len;
__le16 flags;
__le16 pad[12];
};
#define RX_HEADER_LEN 28
#define RX_FLAG_OFIFO 0x00000001 /* FIFO Overflow */
#define RX_FLAG_CRCERR 0x00000002 /* CRC Error */
#define RX_FLAG_SERR 0x00000004 /* Receive Symbol Error */
#define RX_FLAG_ODD 0x00000008 /* Frame has odd number of nibbles */
#define RX_FLAG_LARGE 0x00000010 /* Frame is > RX MAX Length */
#define RX_FLAG_MCAST 0x00000020 /* Dest is Multicast Address */
#define RX_FLAG_BCAST 0x00000040 /* Dest is Broadcast Address */
#define RX_FLAG_MISS 0x00000080 /* Received due to promisc mode */
#define RX_FLAG_LAST 0x00000800 /* Last buffer in frame */
#define RX_FLAG_ERRORS (RX_FLAG_ODD | RX_FLAG_SERR | RX_FLAG_CRCERR | RX_FLAG_OFIFO)
struct ring_info {
struct sk_buff *skb;
dma_addr_t mapping;
};
#define B44_MCAST_TABLE_SIZE 32
#define B44_PHY_ADDR_NO_PHY 30
#define B44_MDC_RATIO 5000000
#define B44_STAT_REG_DECLARE \
_B44(tx_good_octets) \
_B44(tx_good_pkts) \
_B44(tx_octets) \
_B44(tx_pkts) \
_B44(tx_broadcast_pkts) \
_B44(tx_multicast_pkts) \
_B44(tx_len_64) \
_B44(tx_len_65_to_127) \
_B44(tx_len_128_to_255) \
_B44(tx_len_256_to_511) \
_B44(tx_len_512_to_1023) \
_B44(tx_len_1024_to_max) \
_B44(tx_jabber_pkts) \
_B44(tx_oversize_pkts) \
_B44(tx_fragment_pkts) \
_B44(tx_underruns) \
_B44(tx_total_cols) \
_B44(tx_single_cols) \
_B44(tx_multiple_cols) \
_B44(tx_excessive_cols) \
_B44(tx_late_cols) \
_B44(tx_defered) \
_B44(tx_carrier_lost) \
_B44(tx_pause_pkts) \
_B44(rx_good_octets) \
_B44(rx_good_pkts) \
_B44(rx_octets) \
_B44(rx_pkts) \
_B44(rx_broadcast_pkts) \
_B44(rx_multicast_pkts) \
_B44(rx_len_64) \
_B44(rx_len_65_to_127) \
_B44(rx_len_128_to_255) \
_B44(rx_len_256_to_511) \
_B44(rx_len_512_to_1023) \
_B44(rx_len_1024_to_max) \
_B44(rx_jabber_pkts) \
_B44(rx_oversize_pkts) \
_B44(rx_fragment_pkts) \
_B44(rx_missed_pkts) \
_B44(rx_crc_align_errs) \
_B44(rx_undersize) \
_B44(rx_crc_errs) \
_B44(rx_align_errs) \
_B44(rx_symbol_errs) \
_B44(rx_pause_pkts) \
_B44(rx_nonpause_pkts)
/* SW copy of device statistics, kept up to date by periodic timer
* which probes HW values. Check b44_stats_update if you mess with
* the layout
*/
struct b44_hw_stats {
#define _B44(x) u32 x;
B44_STAT_REG_DECLARE
#undef _B44
};
struct ssb_device;
struct b44 {
spinlock_t lock;
u32 imask, istat;
struct dma_desc *rx_ring, *tx_ring;
u32 tx_prod, tx_cons;
u32 rx_prod, rx_cons;
struct ring_info *rx_buffers;
struct ring_info *tx_buffers;
struct napi_struct napi;
u32 dma_offset;
u32 flags;
#define B44_FLAG_B0_ANDLATER 0x00000001
#define B44_FLAG_BUGGY_TXPTR 0x00000002
#define B44_FLAG_REORDER_BUG 0x00000004
#define B44_FLAG_PAUSE_AUTO 0x00008000
#define B44_FLAG_FULL_DUPLEX 0x00010000
#define B44_FLAG_100_BASE_T 0x00020000
#define B44_FLAG_TX_PAUSE 0x00040000
#define B44_FLAG_RX_PAUSE 0x00080000
#define B44_FLAG_FORCE_LINK 0x00100000
#define B44_FLAG_ADV_10HALF 0x01000000
#define B44_FLAG_ADV_10FULL 0x02000000
#define B44_FLAG_ADV_100HALF 0x04000000
#define B44_FLAG_ADV_100FULL 0x08000000
#define B44_FLAG_INTERNAL_PHY 0x10000000
#define B44_FLAG_RX_RING_HACK 0x20000000
#define B44_FLAG_TX_RING_HACK 0x40000000
#define B44_FLAG_WOL_ENABLE 0x80000000
u32 msg_enable;
struct timer_list timer;
struct b44_hw_stats hw_stats;
struct ssb_device *sdev;
struct net_device *dev;
dma_addr_t rx_ring_dma, tx_ring_dma;
u32 rx_pending;
u32 tx_pending;
u8 phy_addr;
u8 force_copybreak;
struct mii_if_info mii_if;
};
#endif /* _B44_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,302 @@
#ifndef BCM63XX_ENET_H_
#define BCM63XX_ENET_H_
#include <linux/types.h>
#include <linux/mii.h>
#include <linux/mutex.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_irq.h>
#include <bcm63xx_io.h>
/* default number of descriptor */
#define BCMENET_DEF_RX_DESC 64
#define BCMENET_DEF_TX_DESC 32
/* maximum burst len for dma (4 bytes unit) */
#define BCMENET_DMA_MAXBURST 16
/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value
* must be low enough so that a DMA transfer of above burst length can
* not overflow the fifo */
#define BCMENET_TX_FIFO_TRESH 32
/*
* hardware maximum rx/tx packet size including FCS, max mtu is
* actually 2047, but if we set max rx size register to 2047 we won't
* get overflow information if packet size is 2048 or above
*/
#define BCMENET_MAX_MTU 2046
/*
* rx/tx dma descriptor
*/
struct bcm_enet_desc {
u32 len_stat;
u32 address;
};
#define DMADESC_LENGTH_SHIFT 16
#define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
#define DMADESC_OWNER_MASK (1 << 15)
#define DMADESC_EOP_MASK (1 << 14)
#define DMADESC_SOP_MASK (1 << 13)
#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
#define DMADESC_WRAP_MASK (1 << 12)
#define DMADESC_UNDER_MASK (1 << 9)
#define DMADESC_APPEND_CRC (1 << 8)
#define DMADESC_OVSIZE_MASK (1 << 4)
#define DMADESC_RXER_MASK (1 << 2)
#define DMADESC_CRC_MASK (1 << 1)
#define DMADESC_OV_MASK (1 << 0)
#define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
DMADESC_OVSIZE_MASK | \
DMADESC_RXER_MASK | \
DMADESC_CRC_MASK | \
DMADESC_OV_MASK)
/*
* MIB Counters register definitions
*/
#define ETH_MIB_TX_GD_OCTETS 0
#define ETH_MIB_TX_GD_PKTS 1
#define ETH_MIB_TX_ALL_OCTETS 2
#define ETH_MIB_TX_ALL_PKTS 3
#define ETH_MIB_TX_BRDCAST 4
#define ETH_MIB_TX_MULT 5
#define ETH_MIB_TX_64 6
#define ETH_MIB_TX_65_127 7
#define ETH_MIB_TX_128_255 8
#define ETH_MIB_TX_256_511 9
#define ETH_MIB_TX_512_1023 10
#define ETH_MIB_TX_1024_MAX 11
#define ETH_MIB_TX_JAB 12
#define ETH_MIB_TX_OVR 13
#define ETH_MIB_TX_FRAG 14
#define ETH_MIB_TX_UNDERRUN 15
#define ETH_MIB_TX_COL 16
#define ETH_MIB_TX_1_COL 17
#define ETH_MIB_TX_M_COL 18
#define ETH_MIB_TX_EX_COL 19
#define ETH_MIB_TX_LATE 20
#define ETH_MIB_TX_DEF 21
#define ETH_MIB_TX_CRS 22
#define ETH_MIB_TX_PAUSE 23
#define ETH_MIB_RX_GD_OCTETS 32
#define ETH_MIB_RX_GD_PKTS 33
#define ETH_MIB_RX_ALL_OCTETS 34
#define ETH_MIB_RX_ALL_PKTS 35
#define ETH_MIB_RX_BRDCAST 36
#define ETH_MIB_RX_MULT 37
#define ETH_MIB_RX_64 38
#define ETH_MIB_RX_65_127 39
#define ETH_MIB_RX_128_255 40
#define ETH_MIB_RX_256_511 41
#define ETH_MIB_RX_512_1023 42
#define ETH_MIB_RX_1024_MAX 43
#define ETH_MIB_RX_JAB 44
#define ETH_MIB_RX_OVR 45
#define ETH_MIB_RX_FRAG 46
#define ETH_MIB_RX_DROP 47
#define ETH_MIB_RX_CRC_ALIGN 48
#define ETH_MIB_RX_UND 49
#define ETH_MIB_RX_CRC 50
#define ETH_MIB_RX_ALIGN 51
#define ETH_MIB_RX_SYM 52
#define ETH_MIB_RX_PAUSE 53
#define ETH_MIB_RX_CNTRL 54
struct bcm_enet_mib_counters {
u64 tx_gd_octets;
u32 tx_gd_pkts;
u32 tx_all_octets;
u32 tx_all_pkts;
u32 tx_brdcast;
u32 tx_mult;
u32 tx_64;
u32 tx_65_127;
u32 tx_128_255;
u32 tx_256_511;
u32 tx_512_1023;
u32 tx_1024_max;
u32 tx_jab;
u32 tx_ovr;
u32 tx_frag;
u32 tx_underrun;
u32 tx_col;
u32 tx_1_col;
u32 tx_m_col;
u32 tx_ex_col;
u32 tx_late;
u32 tx_def;
u32 tx_crs;
u32 tx_pause;
u64 rx_gd_octets;
u32 rx_gd_pkts;
u32 rx_all_octets;
u32 rx_all_pkts;
u32 rx_brdcast;
u32 rx_mult;
u32 rx_64;
u32 rx_65_127;
u32 rx_128_255;
u32 rx_256_511;
u32 rx_512_1023;
u32 rx_1024_max;
u32 rx_jab;
u32 rx_ovr;
u32 rx_frag;
u32 rx_drop;
u32 rx_crc_align;
u32 rx_und;
u32 rx_crc;
u32 rx_align;
u32 rx_sym;
u32 rx_pause;
u32 rx_cntrl;
};
struct bcm_enet_priv {
/* mac id (from platform device id) */
int mac_id;
/* base remapped address of device */
void __iomem *base;
/* mac irq, rx_dma irq, tx_dma irq */
int irq;
int irq_rx;
int irq_tx;
/* hw view of rx & tx dma ring */
dma_addr_t rx_desc_dma;
dma_addr_t tx_desc_dma;
/* allocated size (in bytes) for rx & tx dma ring */
unsigned int rx_desc_alloc_size;
unsigned int tx_desc_alloc_size;
struct napi_struct napi;
/* dma channel id for rx */
int rx_chan;
/* number of dma desc in rx ring */
int rx_ring_size;
/* cpu view of rx dma ring */
struct bcm_enet_desc *rx_desc_cpu;
/* current number of armed descriptor given to hardware for rx */
int rx_desc_count;
/* next rx descriptor to fetch from hardware */
int rx_curr_desc;
/* next dirty rx descriptor to refill */
int rx_dirty_desc;
/* size of allocated rx skbs */
unsigned int rx_skb_size;
/* list of skb given to hw for rx */
struct sk_buff **rx_skb;
/* used when rx skb allocation failed, so we defer rx queue
* refill */
struct timer_list rx_timeout;
/* lock rx_timeout against rx normal operation */
spinlock_t rx_lock;
/* dma channel id for tx */
int tx_chan;
/* number of dma desc in tx ring */
int tx_ring_size;
/* cpu view of rx dma ring */
struct bcm_enet_desc *tx_desc_cpu;
/* number of available descriptor for tx */
int tx_desc_count;
/* next tx descriptor avaiable */
int tx_curr_desc;
/* next dirty tx descriptor to reclaim */
int tx_dirty_desc;
/* list of skb given to hw for tx */
struct sk_buff **tx_skb;
/* lock used by tx reclaim and xmit */
spinlock_t tx_lock;
/* set if internal phy is ignored and external mii interface
* is selected */
int use_external_mii;
/* set if a phy is connected, phy address must be known,
* probing is not possible */
int has_phy;
int phy_id;
/* set if connected phy has an associated irq */
int has_phy_interrupt;
int phy_interrupt;
/* used when a phy is connected (phylib used) */
struct mii_bus *mii_bus;
struct phy_device *phydev;
int old_link;
int old_duplex;
int old_pause;
/* used when no phy is connected */
int force_speed_100;
int force_duplex_full;
/* pause parameters */
int pause_auto;
int pause_rx;
int pause_tx;
/* stats */
struct bcm_enet_mib_counters mib;
/* after mib interrupt, mib registers update is done in this
* work queue */
struct work_struct mib_update_task;
/* lock mib update between userspace request and workqueue */
struct mutex mib_update_lock;
/* mac clock */
struct clk *mac_clk;
/* phy clock if internal phy is used */
struct clk *phy_clk;
/* network device reference */
struct net_device *net_dev;
/* platform device reference */
struct platform_device *pdev;
/* maximum hardware transmit/receive size */
unsigned int hw_mtu;
};
#endif /* ! BCM63XX_ENET_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,88 @@
/* bnx2_fw.h: Broadcom NX2 network driver.
*
* Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
/* Initialized Values for the Completion Processor. */
static const struct cpu_reg cpu_reg_com = {
.mode = BNX2_COM_CPU_MODE,
.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT,
.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA,
.state = BNX2_COM_CPU_STATE,
.state_value_clear = 0xffffff,
.gpr0 = BNX2_COM_CPU_REG_FILE,
.evmask = BNX2_COM_CPU_EVENT_MASK,
.pc = BNX2_COM_CPU_PROGRAM_COUNTER,
.inst = BNX2_COM_CPU_INSTRUCTION,
.bp = BNX2_COM_CPU_HW_BREAKPOINT,
.spad_base = BNX2_COM_SCRATCH,
.mips_view_base = 0x8000000,
};
/* Initialized Values the Command Processor. */
static const struct cpu_reg cpu_reg_cp = {
.mode = BNX2_CP_CPU_MODE,
.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT,
.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA,
.state = BNX2_CP_CPU_STATE,
.state_value_clear = 0xffffff,
.gpr0 = BNX2_CP_CPU_REG_FILE,
.evmask = BNX2_CP_CPU_EVENT_MASK,
.pc = BNX2_CP_CPU_PROGRAM_COUNTER,
.inst = BNX2_CP_CPU_INSTRUCTION,
.bp = BNX2_CP_CPU_HW_BREAKPOINT,
.spad_base = BNX2_CP_SCRATCH,
.mips_view_base = 0x8000000,
};
/* Initialized Values for the RX Processor. */
static const struct cpu_reg cpu_reg_rxp = {
.mode = BNX2_RXP_CPU_MODE,
.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT,
.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA,
.state = BNX2_RXP_CPU_STATE,
.state_value_clear = 0xffffff,
.gpr0 = BNX2_RXP_CPU_REG_FILE,
.evmask = BNX2_RXP_CPU_EVENT_MASK,
.pc = BNX2_RXP_CPU_PROGRAM_COUNTER,
.inst = BNX2_RXP_CPU_INSTRUCTION,
.bp = BNX2_RXP_CPU_HW_BREAKPOINT,
.spad_base = BNX2_RXP_SCRATCH,
.mips_view_base = 0x8000000,
};
/* Initialized Values for the TX Patch-up Processor. */
static const struct cpu_reg cpu_reg_tpat = {
.mode = BNX2_TPAT_CPU_MODE,
.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT,
.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA,
.state = BNX2_TPAT_CPU_STATE,
.state_value_clear = 0xffffff,
.gpr0 = BNX2_TPAT_CPU_REG_FILE,
.evmask = BNX2_TPAT_CPU_EVENT_MASK,
.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER,
.inst = BNX2_TPAT_CPU_INSTRUCTION,
.bp = BNX2_TPAT_CPU_HW_BREAKPOINT,
.spad_base = BNX2_TPAT_SCRATCH,
.mips_view_base = 0x8000000,
};
/* Initialized Values for the TX Processor. */
static const struct cpu_reg cpu_reg_txp = {
.mode = BNX2_TXP_CPU_MODE,
.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT,
.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA,
.state = BNX2_TXP_CPU_STATE,
.state_value_clear = 0xffffff,
.gpr0 = BNX2_TXP_CPU_REG_FILE,
.evmask = BNX2_TXP_CPU_EVENT_MASK,
.pc = BNX2_TXP_CPU_PROGRAM_COUNTER,
.inst = BNX2_TXP_CPU_INSTRUCTION,
.bp = BNX2_TXP_CPU_HW_BREAKPOINT,
.spad_base = BNX2_TXP_SCRATCH,
.mips_view_base = 0x8000000,
};

View File

@@ -0,0 +1,7 @@
#
# Makefile for Broadcom 10-Gigabit ethernet driver
#
obj-$(CONFIG_BNX2X) += bnx2x.o
bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,203 @@
/* bnx2x_dcb.h: Broadcom Everest network driver.
*
* Copyright 2009-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
* Written by: Dmitry Kravkov
*
*/
#ifndef BNX2X_DCB_H
#define BNX2X_DCB_H
#include "bnx2x_hsi.h"
#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */
struct bnx2x_dcbx_app_params {
u32 enabled;
u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
};
#define DCBX_COS_MAX_NUM_E2 DCBX_E2E3_MAX_NUM_COS
/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */
#define BNX2X_MAX_COS_SUPPORT 3
#define DCBX_COS_MAX_NUM_E3B0 BNX2X_MAX_COS_SUPPORT
#define DCBX_COS_MAX_NUM BNX2X_MAX_COS_SUPPORT
struct bnx2x_dcbx_cos_params {
u32 bw_tbl;
u32 pri_bitmask;
/*
* strict priority: valid values are 0..5; 0 is highest priority.
* There can't be two COSes with the same priority.
*/
u8 strict;
#define BNX2X_DCBX_STRICT_INVALID DCBX_COS_MAX_NUM
#define BNX2X_DCBX_STRICT_COS_HIGHEST 0
#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp) ((sp) + 1)
u8 pauseable;
};
struct bnx2x_dcbx_pg_params {
u32 enabled;
u8 num_of_cos; /* valid COS entries */
struct bnx2x_dcbx_cos_params cos_params[DCBX_COS_MAX_NUM];
};
struct bnx2x_dcbx_pfc_params {
u32 enabled;
u32 priority_non_pauseable_mask;
};
struct bnx2x_dcbx_port_params {
struct bnx2x_dcbx_pfc_params pfc;
struct bnx2x_dcbx_pg_params ets;
struct bnx2x_dcbx_app_params app;
};
#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF)
#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0
#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\
(bp)->dcbx_port_params.ets.enabled)
struct bnx2x_config_lldp_params {
u32 overwrite_settings;
u32 msg_tx_hold;
u32 msg_fast_tx;
u32 tx_credit_max;
u32 msg_tx_interval;
u32 tx_fast;
};
struct bnx2x_admin_priority_app_table {
u32 valid;
u32 priority;
#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF)
u32 traffic_type;
#define TRAFFIC_TYPE_ETH 0
#define TRAFFIC_TYPE_PORT 1
u32 app_id;
};
struct bnx2x_config_dcbx_params {
u32 overwrite_settings;
u32 admin_dcbx_version;
u32 admin_ets_enable;
u32 admin_pfc_enable;
u32 admin_tc_supported_tx_enable;
u32 admin_ets_configuration_tx_enable;
u32 admin_ets_recommendation_tx_enable;
u32 admin_pfc_tx_enable;
u32 admin_application_priority_tx_enable;
u32 admin_ets_willing;
u32 admin_ets_reco_valid;
u32 admin_pfc_willing;
u32 admin_app_priority_willing;
u32 admin_configuration_bw_precentage[8];
u32 admin_configuration_ets_pg[8];
u32 admin_recommendation_bw_precentage[8];
u32 admin_recommendation_ets_pg[8];
u32 admin_pfc_bitmap;
struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
u32 admin_default_priority;
};
#define GET_FLAGS(flags, bits) ((flags) & (bits))
#define SET_FLAGS(flags, bits) ((flags) |= (bits))
#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits))
enum {
DCBX_READ_LOCAL_MIB,
DCBX_READ_REMOTE_MIB
};
#define ETH_TYPE_FCOE (0x8906)
#define TCP_PORT_ISCSI (0xCBC)
#define PFC_VALUE_FRAME_SIZE (512)
#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \
((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed))
#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130
#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170
struct cos_entry_help_data {
u32 pri_join_mask;
u32 cos_bw;
u8 strict;
bool pausable;
};
struct cos_help_data {
struct cos_entry_help_data data[DCBX_COS_MAX_NUM];
u8 num_of_cos;
};
#define DCBX_ILLEGAL_PG (0xFF)
#define DCBX_PFC_PRI_MASK (0xFF)
#define DCBX_STRICT_PRIORITY (15)
#define DCBX_INVALID_COS_BW (0xFFFFFFFF)
#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \
((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
#define DCBX_PFC_PRI_PAUSE_MASK(bp) \
((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \
((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \
(DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \
(0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri))
#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \
(pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \
(!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
struct pg_entry_help_data {
u8 num_of_dif_pri;
u8 pg;
u32 pg_priority;
};
struct pg_help_data {
struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
u8 num_of_pg;
};
/* forward DCB/PFC related declarations */
struct bnx2x;
void bnx2x_dcbx_update(struct work_struct *work);
void bnx2x_dcbx_init_params(struct bnx2x *bp);
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
enum {
BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
BNX2X_DCBX_STATE_TX_PAUSED,
BNX2X_DCBX_STATE_TX_RELEASED
};
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
void bnx2x_dcbx_pmf_update(struct bnx2x *bp);
/* DCB netlink */
#ifdef BCM_DCBNL
extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
#endif /* BCM_DCBNL */
#endif /* BNX2X_DCB_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,410 @@
/* bnx2x_fw_defs.h: Broadcom Everest network driver.
*
* Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNX2X_FW_DEFS_H
#define BNX2X_FW_DEFS_H
#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base)
#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[147].base + ((assertListEntry) * IRO[147].m1))
#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
(IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
IRO[153].m2))
#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
(IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
IRO[154].m2))
#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
(IRO[159].base + ((funcId) * IRO[159].m1))
#define CSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[149].base + ((funcId) * IRO[149].m1))
#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[315].base + ((pfId) * IRO[315].m1))
#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[316].base + ((pfId) * IRO[316].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
(IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
(IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
(IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
(IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
(IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
(IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(IRO[314].base + ((pfId) * IRO[314].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[306].base + ((pfId) * IRO[306].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[305].base + ((pfId) * IRO[305].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[304].base + ((pfId) * IRO[304].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[151].base + ((funcId) * IRO[151].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
(IRO[142].base + ((pfId) * IRO[142].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
(IRO[143].base + ((pfId) * IRO[143].m1))
#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
(IRO[141].base + ((pfId) * IRO[141].m1))
#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
(IRO[144].base + ((pfId) * IRO[144].m1))
#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
(IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
(IRO[133].base + ((sbId) * IRO[133].m1))
#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
(IRO[134].base + ((sbId) * IRO[134].m1))
#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
(IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
(IRO[132].base + ((sbId) * IRO[132].m1))
#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
(IRO[137].base + ((sbId) * IRO[137].m1))
#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
(IRO[155].base + ((vfId) * IRO[155].m1))
#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
(IRO[156].base + ((vfId) * IRO[156].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[150].base + ((funcId) * IRO[150].m1))
#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IRO[203].base + ((pfId) * IRO[203].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[101].base + ((assertListEntry) * IRO[101].m1))
#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
(IRO[108].base)
#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
(IRO[201].base + ((pfId) * IRO[201].m1))
#define TSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[103].base + ((funcId) * IRO[103].m1))
#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(IRO[271].base + ((pfId) * IRO[271].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
(IRO[272].base + ((pfId) * IRO[272].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
(IRO[273].base + ((pfId) * IRO[273].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
(IRO[274].base + ((pfId) * IRO[274].m1))
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[270].base + ((pfId) * IRO[270].m1))
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[269].base + ((pfId) * IRO[269].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[268].base + ((pfId) * IRO[268].m1))
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[267].base + ((pfId) * IRO[267].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
(IRO[276].base + ((pfId) * IRO[276].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[263].base + ((pfId) * IRO[263].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[264].base + ((pfId) * IRO[264].m1))
#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[265].base + ((pfId) * IRO[265].m1))
#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[266].base + ((pfId) * IRO[266].m1))
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
(IRO[202].base + ((pfId) * IRO[202].m1))
#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[105].base + ((funcId) * IRO[105].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
(IRO[216].base + ((pfId) * IRO[216].m1))
#define TSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[104].base + ((funcId) * IRO[104].m1))
#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
#define USTORM_AGG_DATA_SIZE (IRO[206].size)
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[176].base + ((assertListEntry) * IRO[176].m1))
#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
(IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
IRO[205].m2))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[183].base + ((portId) * IRO[183].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
(IRO[317].base + ((pfId) * IRO[317].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[178].base + ((funcId) * IRO[178].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[281].base + ((pfId) * IRO[281].m1))
#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(IRO[286].base + ((pfId) * IRO[286].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
(IRO[283].base + ((pfId) * IRO[283].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[279].base + ((pfId) * IRO[279].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[278].base + ((pfId) * IRO[278].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[277].base + ((pfId) * IRO[277].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IRO[280].base + ((pfId) * IRO[280].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
(IRO[284].base + ((pfId) * IRO[284].m1))
#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[182].base + ((pfId) * IRO[182].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[180].base + ((funcId) * IRO[180].m1))
#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
(IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
IRO[209].m2))
#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
(IRO[210].base + ((qzoneId) * IRO[210].m1))
#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
#define USTORM_TPA_BTR_SIZE (IRO[207].size)
#define USTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[179].base + ((funcId) * IRO[179].m1))
#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base)
#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[50].base + ((assertListEntry) * IRO[50].m1))
#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
(IRO[43].base + ((portId) * IRO[43].m1))
#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
(IRO[45].base + ((pfId) * IRO[45].m1))
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(IRO[294].base + ((pfId) * IRO[294].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
(IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[300].base + ((pfId) * IRO[300].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[301].base + ((pfId) * IRO[301].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[302].base + ((pfId) * IRO[302].m1))
#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
(IRO[303].base + ((pfId) * IRO[303].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[293].base + ((pfId) * IRO[293].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[292].base + ((pfId) * IRO[292].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[291].base + ((pfId) * IRO[291].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
(IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
(IRO[290].base + ((pfId) * IRO[290].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[289].base + ((pfId) * IRO[289].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
(IRO[288].base + ((pfId) * IRO[288].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
(IRO[287].base + ((pfId) * IRO[287].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[49].base + ((funcId) * IRO[49].m1))
#define XSTORM_SPQ_DATA_OFFSET(funcId) \
(IRO[32].base + ((funcId) * IRO[32].m1))
#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
(IRO[30].base + ((funcId) * IRO[30].m1))
#define XSTORM_SPQ_PROD_OFFSET(funcId) \
(IRO[31].base + ((funcId) * IRO[31].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
(IRO[211].base + ((portId) * IRO[211].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
(IRO[212].base + ((portId) * IRO[212].m1))
#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
(IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
IRO[214].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
/**
* This file defines HSI constants for the ETH flow
*/
#ifdef _EVEREST_MICROCODE
#include "Microcode\Generated\DataTypes\eth_rx_bd.h"
#include "Microcode\Generated\DataTypes\eth_tx_bd.h"
#include "Microcode\Generated\DataTypes\eth_rx_cqe.h"
#include "Microcode\Generated\DataTypes\eth_rx_sge.h"
#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h"
#endif
/* Ethernet Ring parameters */
#define X_ETH_LOCAL_RING_SIZE 13
#define FIRST_BD_IN_PKT 0
#define PARSE_BD_INDEX 1
#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
#define U_ETH_NUM_OF_SGES_TO_FETCH 8
#define U_ETH_MAX_SGES_FOR_PACKET 3
/* Rx ring params */
#define U_ETH_LOCAL_BD_RING_SIZE 8
#define U_ETH_LOCAL_SGE_RING_SIZE 10
#define U_ETH_SGL_SIZE 8
/* The fw will padd the buffer with this value, so the IP header \
will be align to 4 Byte */
#define IP_HEADER_ALIGNMENT_PADDING 2
#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
(0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
#define U_ETH_UNDEFINED_Q 0xFF
#define T_ETH_INDIRECTION_TABLE_SIZE 128
#define T_ETH_RSS_KEY 10
#define ETH_NUM_OF_RSS_ENGINES_E2 72
#define FILTER_RULES_COUNT 16
#define MULTICAST_RULES_COUNT 16
#define CLASSIFY_RULES_COUNT 16
/*The CRC32 seed, that is used for the hash(reduction) multicast address */
#define ETH_CRC32_HASH_SEED 0x00000000
#define ETH_CRC32_HASH_BIT_SIZE (8)
#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
/* Maximal L2 clients supported */
#define ETH_MAX_RX_CLIENTS_E1 18
#define ETH_MAX_RX_CLIENTS_E1H 28
#define ETH_MAX_RX_CLIENTS_E2 152
/* Maximal statistics client Ids */
#define MAX_STAT_COUNTER_ID_E1 36
#define MAX_STAT_COUNTER_ID_E1H 56
#define MAX_STAT_COUNTER_ID_E2 140
#define MAX_MAC_CREDIT_E1 192 /* Per Chip */
#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
#define MAX_MAC_CREDIT_E2 272 /* Per Path */
#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */
#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
/* Maximal aggregation queues supported */
#define ETH_MAX_AGGREGATION_QUEUES_E1 32
#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
#define ETH_NUM_OF_MCAST_BINS 256
#define ETH_NUM_OF_MCAST_ENGINES_E2 72
#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
#define ETH_MIN_RX_CQES_WITH_TPA_E1 \
(ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA)
#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
(ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
/**
* This file defines HSI constants common to all microcode flows
*/
#define PROTOCOL_STATE_BIT_OFFSET 6
#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
/* microcode fixed page page size 4K (chains and ring segments) */
#define MC_PAGE_SIZE 4096
/* Number of indices per slow-path SB */
#define HC_SP_SB_MAX_INDICES 16
/* Number of indices per SB */
#define HC_SB_MAX_INDICES_E1X 8
#define HC_SB_MAX_INDICES_E2 8
#define HC_SB_MAX_SB_E1X 32
#define HC_SB_MAX_SB_E2 136
#define HC_SP_SB_ID 0xde
#define HC_SB_MAX_SM 2
#define HC_SB_MAX_DYNAMIC_INDICES 4
/* max number of slow path commands per port */
#define MAX_RAMRODS_PER_PORT 8
/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
#define TIMERS_TICK_SIZE_CHIP (1e-3)
#define TSEMI_CLK1_RESUL_CHIP (1e-3)
#define XSEMI_CLK1_RESUL_CHIP (1e-3)
#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
#define XSTORM_IP_ID_ROLL_HALF 0x8000
#define XSTORM_IP_ID_ROLL_ALL 0
#define FW_LOG_LIST_SIZE 50
#define NUM_OF_SAFC_BITS 16
#define MAX_COS_NUMBER 4
#define MAX_TRAFFIC_TYPES 8
#define MAX_PFC_PRIORITIES 8
/* used by array traffic_type_to_priority[] to mark traffic type \
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
#define C_ERES_PER_PAGE \
(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
#define STATS_QUERY_CMD_COUNT 16
#define NIV_LIST_TABLE_SIZE 4096
#define INVALID_VNIC_ID 0xFF
#define UNDEF_IRO 0x80000000
#endif /* BNX2X_FW_DEFS_H */

View File

@@ -0,0 +1,38 @@
/* bnx2x_fw_file_hdr.h: FW binary file header structure.
*
* Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
* Written by: Vladislav Zolotarov <vladz@broadcom.com>
* Based on the original idea of John Wright <john.wright@hp.com>.
*/
#ifndef BNX2X_INIT_FILE_HDR_H
#define BNX2X_INIT_FILE_HDR_H
struct bnx2x_fw_file_section {
__be32 len;
__be32 offset;
};
struct bnx2x_fw_file_hdr {
struct bnx2x_fw_file_section init_ops;
struct bnx2x_fw_file_section init_ops_offsets;
struct bnx2x_fw_file_section init_data;
struct bnx2x_fw_file_section tsem_int_table_data;
struct bnx2x_fw_file_section tsem_pram_data;
struct bnx2x_fw_file_section usem_int_table_data;
struct bnx2x_fw_file_section usem_pram_data;
struct bnx2x_fw_file_section csem_int_table_data;
struct bnx2x_fw_file_section csem_pram_data;
struct bnx2x_fw_file_section xsem_int_table_data;
struct bnx2x_fw_file_section xsem_pram_data;
struct bnx2x_fw_file_section iro_arr;
struct bnx2x_fw_file_section fw_version;
};
#endif /* BNX2X_INIT_FILE_HDR_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,567 @@
/* bnx2x_init.h: Broadcom Everest network driver.
* Structures and macroes needed during the initialization.
*
* Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
* Written by: Eliezer Tamir
* Modified by: Vladislav Zolotarov <vladz@broadcom.com>
*/
#ifndef BNX2X_INIT_H
#define BNX2X_INIT_H
/* Init operation types and structures */
enum {
OP_RD = 0x1, /* read a single register */
OP_WR, /* write a single register */
OP_SW, /* copy a string to the device */
OP_ZR, /* clear memory */
OP_ZP, /* unzip then copy with DMAE */
OP_WR_64, /* write 64 bit pattern */
OP_WB, /* copy a string using DMAE */
OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */
/* Skip the following ops if all of the init modes don't match */
OP_IF_MODE_OR,
/* Skip the following ops if any of the init modes don't match */
OP_IF_MODE_AND,
OP_MAX
};
enum {
STAGE_START,
STAGE_END,
};
/* Returns the index of start or end of a specific block stage in ops array*/
#define BLOCK_OPS_IDX(block, stage, end) \
(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
/* structs for the various opcodes */
struct raw_op {
u32 op:8;
u32 offset:24;
u32 raw_data;
};
struct op_read {
u32 op:8;
u32 offset:24;
u32 val;
};
struct op_write {
u32 op:8;
u32 offset:24;
u32 val;
};
struct op_arr_write {
u32 op:8;
u32 offset:24;
#ifdef __BIG_ENDIAN
u16 data_len;
u16 data_off;
#else /* __LITTLE_ENDIAN */
u16 data_off;
u16 data_len;
#endif
};
struct op_zero {
u32 op:8;
u32 offset:24;
u32 len;
};
struct op_if_mode {
u32 op:8;
u32 cmd_offset:24;
u32 mode_bit_map;
};
union init_op {
struct op_read read;
struct op_write write;
struct op_arr_write arr_wr;
struct op_zero zero;
struct raw_op raw;
struct op_if_mode if_mode;
};
/* Init Phases */
enum {
PHASE_COMMON,
PHASE_PORT0,
PHASE_PORT1,
PHASE_PF0,
PHASE_PF1,
PHASE_PF2,
PHASE_PF3,
PHASE_PF4,
PHASE_PF5,
PHASE_PF6,
PHASE_PF7,
NUM_OF_INIT_PHASES
};
/* Init Modes */
enum {
MODE_ASIC = 0x00000001,
MODE_FPGA = 0x00000002,
MODE_EMUL = 0x00000004,
MODE_E2 = 0x00000008,
MODE_E3 = 0x00000010,
MODE_PORT2 = 0x00000020,
MODE_PORT4 = 0x00000040,
MODE_SF = 0x00000080,
MODE_MF = 0x00000100,
MODE_MF_SD = 0x00000200,
MODE_MF_SI = 0x00000400,
MODE_MF_NIV = 0x00000800,
MODE_E3_A0 = 0x00001000,
MODE_E3_B0 = 0x00002000,
MODE_COS3 = 0x00004000,
MODE_COS6 = 0x00008000,
MODE_LITTLE_ENDIAN = 0x00010000,
MODE_BIG_ENDIAN = 0x00020000,
};
/* Init Blocks */
enum {
BLOCK_ATC,
BLOCK_BRB1,
BLOCK_CCM,
BLOCK_CDU,
BLOCK_CFC,
BLOCK_CSDM,
BLOCK_CSEM,
BLOCK_DBG,
BLOCK_DMAE,
BLOCK_DORQ,
BLOCK_HC,
BLOCK_IGU,
BLOCK_MISC,
BLOCK_NIG,
BLOCK_PBF,
BLOCK_PGLUE_B,
BLOCK_PRS,
BLOCK_PXP2,
BLOCK_PXP,
BLOCK_QM,
BLOCK_SRC,
BLOCK_TCM,
BLOCK_TM,
BLOCK_TSDM,
BLOCK_TSEM,
BLOCK_UCM,
BLOCK_UPB,
BLOCK_USDM,
BLOCK_USEM,
BLOCK_XCM,
BLOCK_XPB,
BLOCK_XSDM,
BLOCK_XSEM,
BLOCK_MISC_AEU,
NUM_OF_INIT_BLOCKS
};
/* QM queue numbers */
#define BNX2X_ETH_Q 0
#define BNX2X_TOE_Q 3
#define BNX2X_TOE_ACK_Q 6
#define BNX2X_ISCSI_Q 9
#define BNX2X_ISCSI_ACK_Q 11
#define BNX2X_FCOE_Q 10
/* Vnics per mode */
#define BNX2X_PORT2_MODE_NUM_VNICS 4
#define BNX2X_PORT4_MODE_NUM_VNICS 2
/* COS offset for port1 in E3 B0 4port mode */
#define BNX2X_E3B0_PORT1_COS_OFFSET 3
/* QM Register addresses */
#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\
(QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\
(QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\
(QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
/* extracts the QM queue number for the specified port and vnic */
#define BNX2X_PF_Q_NUM(q_num, port, vnic)\
((((port) << 1) | (vnic)) * 16 + (q_num))
/* Maps the specified queue to the specified COS */
static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
{
/* find current COS mapping */
u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4);
/* check if queue->COS mapping has changed */
if (curr_cos != new_cos) {
u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS;
u32 reg_addr, reg_bit_map, vnic;
/* update parameters for 4port mode */
if (INIT_MODE_FLAGS(bp) & MODE_PORT4) {
num_vnics = BNX2X_PORT4_MODE_NUM_VNICS;
if (BP_PORT(bp)) {
curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
new_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
}
}
/* change queue mapping for each VNIC */
for (vnic = 0; vnic < num_vnics; vnic++) {
u32 pf_q_num =
BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic);
u32 q_bit_map = 1 << (pf_q_num & 0x1f);
/* overwrite queue->VOQ mapping */
REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
/* clear queue bit from current COS bit map */
reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
reg_bit_map = REG_RD(bp, reg_addr);
REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map));
/* set queue bit in new COS bit map */
reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
reg_bit_map = REG_RD(bp, reg_addr);
REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
/* set/clear queue bit in command-queue bit map
(E2/E3A0 only, valid COS values are 0/1) */
if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
reg_bit_map = REG_RD(bp, reg_addr);
q_bit_map = 1 << (2 * (pf_q_num & 0xf));
reg_bit_map = new_cos ?
(reg_bit_map | q_bit_map) :
(reg_bit_map & (~q_bit_map));
REG_WR(bp, reg_addr, reg_bit_map);
}
}
}
}
/* Configures the QM according to the specified per-traffic-type COSes */
static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
struct priority_cos *traffic_cos)
{
bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
if (mode != STATIC_COS) {
/* required only in backward compatible COS mode */
bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
bnx2x_map_q_cos(bp, BNX2X_TOE_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
}
}
/* Returns the index of start or end of a specific block stage in ops array*/
#define BLOCK_OPS_IDX(block, stage, end) \
(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
#define INITOP_SET 0 /* set the HW directly */
#define INITOP_CLEAR 1 /* clear the HW directly */
#define INITOP_INIT 2 /* set the init-value array */
/****************************************************************************
* ILT management
****************************************************************************/
struct ilt_line {
dma_addr_t page_mapping;
void *page;
u32 size;
};
struct ilt_client_info {
u32 page_size;
u16 start;
u16 end;
u16 client_num;
u16 flags;
#define ILT_CLIENT_SKIP_INIT 0x1
#define ILT_CLIENT_SKIP_MEM 0x2
};
struct bnx2x_ilt {
u32 start_line;
struct ilt_line *lines;
struct ilt_client_info clients[4];
#define ILT_CLIENT_CDU 0
#define ILT_CLIENT_QM 1
#define ILT_CLIENT_SRC 2
#define ILT_CLIENT_TM 3
};
/****************************************************************************
* SRC configuration
****************************************************************************/
struct src_ent {
u8 opaque[56];
u64 next;
};
/****************************************************************************
* Parity configuration
****************************************************************************/
#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
{ \
block##_REG_##block##_PRTY_MASK, \
block##_REG_##block##_PRTY_STS_CLR, \
en_mask, {m1, m1h, m2, m3}, #block \
}
#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
{ \
block##_REG_##block##_PRTY_MASK_0, \
block##_REG_##block##_PRTY_STS_CLR_0, \
en_mask, {m1, m1h, m2, m3}, #block"_0" \
}
#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
{ \
block##_REG_##block##_PRTY_MASK_1, \
block##_REG_##block##_PRTY_STS_CLR_1, \
en_mask, {m1, m1h, m2, m3}, #block"_1" \
}
static const struct {
u32 mask_addr;
u32 sts_clr_addr;
u32 en_mask; /* Mask to enable parity attentions */
struct {
u32 e1; /* 57710 */
u32 e1h; /* 57711 */
u32 e2; /* 57712 */
u32 e3; /* 578xx */
} reg_mask; /* Register mask (all valid bits) */
char name[7]; /* Block's longest name is 6 characters long
* (name + suffix)
*/
} bnx2x_blocks_parity_data[] = {
/* bit 19 masked */
/* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
/* bit 5,18,20-31 */
/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
/* bit 5 */
/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
/* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
/* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
* want to handle "system kill" flow at the moment.
*/
BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
0x7ffffff),
BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff),
BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xff, 0xffff),
BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
{0xf, 0xf, 0xf, 0xf}, "UPB"},
{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
{0xf, 0xf, 0xf, 0xf}, "XPB"},
BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff),
BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff),
BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff),
BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff),
BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff),
BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
};
/* [28] MCP Latched rom_parity
* [29] MCP Latched ump_rx_parity
* [30] MCP Latched ump_tx_parity
* [31] MCP Latched scpad_parity
*/
#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
/* Below registers control the MCP parity attention output. When
* MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
* enabled, when cleared - disabled.
*/
static const u32 mcp_attn_ctl_regs[] = {
MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
MISC_REG_AEU_ENABLE4_NIG_0,
MISC_REG_AEU_ENABLE4_PXP_0,
MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
MISC_REG_AEU_ENABLE4_NIG_1,
MISC_REG_AEU_ENABLE4_PXP_1
};
static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
{
int i;
u32 reg_val;
for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
if (enable)
reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
else
reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
}
}
static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
{
if (CHIP_IS_E1(bp))
return bnx2x_blocks_parity_data[idx].reg_mask.e1;
else if (CHIP_IS_E1H(bp))
return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
else if (CHIP_IS_E2(bp))
return bnx2x_blocks_parity_data[idx].reg_mask.e2;
else /* CHIP_IS_E3 */
return bnx2x_blocks_parity_data[idx].reg_mask.e3;
}
static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
{
int i;
for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
if (dis_mask) {
REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
dis_mask);
DP(NETIF_MSG_HW, "Setting parity mask "
"for %s to\t\t0x%x\n",
bnx2x_blocks_parity_data[i].name, dis_mask);
}
}
/* Disable MCP parity attentions */
bnx2x_set_mcp_parity(bp, false);
}
/**
* Clear the parity error status registers.
*/
static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
{
int i;
u32 reg_val, mcp_aeu_bits =
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
/* Clear SEM_FAST parities */
REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
if (reg_mask) {
reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
sts_clr_addr);
if (reg_val & reg_mask)
DP(NETIF_MSG_HW,
"Parity errors in %s: 0x%x\n",
bnx2x_blocks_parity_data[i].name,
reg_val & reg_mask);
}
}
/* Check if there were parity attentions in MCP */
reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
if (reg_val & mcp_aeu_bits)
DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
reg_val & mcp_aeu_bits);
/* Clear parity attentions in MCP:
* [7] clears Latched rom_parity
* [8] clears Latched ump_rx_parity
* [9] clears Latched ump_tx_parity
* [10] clears Latched scpad_parity (both ports)
*/
REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
}
static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
{
int i;
for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
if (reg_mask)
REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
bnx2x_blocks_parity_data[i].en_mask & reg_mask);
}
/* Enable MCP parity attentions */
bnx2x_set_mcp_parity(bp, true);
}
#endif /* BNX2X_INIT_H */

View File

@@ -0,0 +1,912 @@
/* bnx2x_init_ops.h: Broadcom Everest network driver.
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
* Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
* Written by: Vladislav Zolotarov <vladz@broadcom.com>
*/
#ifndef BNX2X_INIT_OPS_H
#define BNX2X_INIT_OPS_H
#ifndef BP_ILT
#define BP_ILT(bp) NULL
#endif
#ifndef BP_FUNC
#define BP_FUNC(bp) 0
#endif
#ifndef BP_PORT
#define BP_PORT(bp) 0
#endif
#ifndef BNX2X_ILT_FREE
#define BNX2X_ILT_FREE(x, y, sz)
#endif
#ifndef BNX2X_ILT_ZALLOC
#define BNX2X_ILT_ZALLOC(x, y, sz)
#endif
#ifndef ILOG2
#define ILOG2(x) x
#endif
static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
dma_addr_t phys_addr, u32 addr,
u32 len);
static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
const u32 *data, u32 len)
{
u32 i;
for (i = 0; i < len; i++)
REG_WR(bp, addr + i*4, data[i]);
}
static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
const u32 *data, u32 len)
{
u32 i;
for (i = 0; i < len; i++)
bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
}
static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
u8 wb)
{
if (bp->dmae_ready)
bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
else if (wb)
/*
* Wide bus registers with no dmae need to be written
* using indirect write.
*/
bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
else
bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
}
static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
u32 len, u8 wb)
{
u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
u32 buf_len32 = buf_len/4;
u32 i;
memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
for (i = 0; i < len; i += buf_len32) {
u32 cur_len = min(buf_len32, len - i);
bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
}
}
static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
{
if (bp->dmae_ready)
bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
else
bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
}
static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
const u32 *data, u32 len64)
{
u32 buf_len32 = FW_BUF_SIZE/4;
u32 len = len64*2;
u64 data64 = 0;
u32 i;
/* 64 bit value is in a blob: first low DWORD, then high DWORD */
data64 = HILO_U64((*(data + 1)), (*data));
len64 = min((u32)(FW_BUF_SIZE/8), len64);
for (i = 0; i < len64; i++) {
u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
*pdata = data64;
}
for (i = 0; i < len; i += buf_len32) {
u32 cur_len = min(buf_len32, len - i);
bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
}
}
/*********************************************************
There are different blobs for each PRAM section.
In addition, each blob write operation is divided into a few operations
in order to decrease the amount of phys. contiguous buffer needed.
Thus, when we select a blob the address may be with some offset
from the beginning of PRAM section.
The same holds for the INT_TABLE sections.
**********************************************************/
#define IF_IS_INT_TABLE_ADDR(base, addr) \
if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
#define IF_IS_PRAM_ADDR(base, addr) \
if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
const u8 *data)
{
IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
data = INIT_TSEM_INT_TABLE_DATA(bp);
else
IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
data = INIT_CSEM_INT_TABLE_DATA(bp);
else
IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
data = INIT_USEM_INT_TABLE_DATA(bp);
else
IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
data = INIT_XSEM_INT_TABLE_DATA(bp);
else
IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
data = INIT_TSEM_PRAM_DATA(bp);
else
IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
data = INIT_CSEM_PRAM_DATA(bp);
else
IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
data = INIT_USEM_PRAM_DATA(bp);
else
IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
data = INIT_XSEM_PRAM_DATA(bp);
return data;
}
static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
const u32 *data, u32 len)
{
if (bp->dmae_ready)
VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
else
bnx2x_init_ind_wr(bp, addr, data, len);
}
static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
u32 val_hi)
{
u32 wb_write[2];
wb_write[0] = val_lo;
wb_write[1] = val_hi;
REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
}
static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
u32 blob_off)
{
const u8 *data = NULL;
int rc;
u32 i;
data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
rc = bnx2x_gunzip(bp, data, len);
if (rc)
return;
/* gunzip_outlen is in dwords */
len = GUNZIP_OUTLEN(bp);
for (i = 0; i < len; i++)
((u32 *)GUNZIP_BUF(bp))[i] =
cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
bnx2x_write_big_buf_wb(bp, addr, len);
}
static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
{
u16 op_start =
INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
STAGE_START)];
u16 op_end =
INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
STAGE_END)];
union init_op *op;
u32 op_idx, op_type, addr, len;
const u32 *data, *data_base;
/* If empty block */
if (op_start == op_end)
return;
data_base = INIT_DATA(bp);
for (op_idx = op_start; op_idx < op_end; op_idx++) {
op = (union init_op *)&(INIT_OPS(bp)[op_idx]);
/* Get generic data */
op_type = op->raw.op;
addr = op->raw.offset;
/* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
* OP_WR64 (we assume that op_arr_write and op_write have the
* same structure).
*/
len = op->arr_wr.data_len;
data = data_base + op->arr_wr.data_off;
switch (op_type) {
case OP_RD:
REG_RD(bp, addr);
break;
case OP_WR:
REG_WR(bp, addr, op->write.val);
break;
case OP_SW:
bnx2x_init_str_wr(bp, addr, data, len);
break;
case OP_WB:
bnx2x_init_wr_wb(bp, addr, data, len);
break;
case OP_ZR:
bnx2x_init_fill(bp, addr, 0, op->zero.len, 0);
break;
case OP_WB_ZR:
bnx2x_init_fill(bp, addr, 0, op->zero.len, 1);
break;
case OP_ZP:
bnx2x_init_wr_zp(bp, addr, len,
op->arr_wr.data_off);
break;
case OP_WR_64:
bnx2x_init_wr_64(bp, addr, data, len);
break;
case OP_IF_MODE_AND:
/* if any of the flags doesn't match, skip the
* conditional block.
*/
if ((INIT_MODE_FLAGS(bp) &
op->if_mode.mode_bit_map) !=
op->if_mode.mode_bit_map)
op_idx += op->if_mode.cmd_offset;
break;
case OP_IF_MODE_OR:
/* if all the flags don't match, skip the conditional
* block.
*/
if ((INIT_MODE_FLAGS(bp) &
op->if_mode.mode_bit_map) == 0)
op_idx += op->if_mode.cmd_offset;
break;
default:
/* Should never get here! */
break;
}
}
}
/****************************************************************************
* PXP Arbiter
****************************************************************************/
/*
* This code configures the PCI read/write arbiter
* which implements a weighted round robin
* between the virtual queues in the chip.
*
* The values were derived for each PCI max payload and max request size.
* since max payload and max request size are only known at run time,
* this is done as a separate init stage.
*/
#define NUM_WR_Q 13
#define NUM_RD_Q 29
#define MAX_RD_ORD 3
#define MAX_WR_ORD 2
/* configuration for one arbiter queue */
struct arb_line {
int l;
int add;
int ubound;
};
/* derived configuration for each read queue for each max request size */
static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
{ {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
{ {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
{ {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
{ {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
};
/* derived configuration for each write queue for each max request size */
static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
{ {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
{ {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
{ {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
{ {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
{ {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
{ {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
{ {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
{ {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
{ {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
{ {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
{ {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
};
/* register addresses for read queues */
static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
PXP2_REG_RQ_BW_RD_UBOUND0},
{PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
PXP2_REG_PSWRQ_BW_UB1},
{PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
PXP2_REG_PSWRQ_BW_UB2},
{PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
PXP2_REG_PSWRQ_BW_UB3},
{PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
PXP2_REG_RQ_BW_RD_UBOUND4},
{PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
PXP2_REG_RQ_BW_RD_UBOUND5},
{PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
PXP2_REG_PSWRQ_BW_UB6},
{PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
PXP2_REG_PSWRQ_BW_UB7},
{PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
PXP2_REG_PSWRQ_BW_UB8},
/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
PXP2_REG_PSWRQ_BW_UB9},
{PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
PXP2_REG_PSWRQ_BW_UB10},
{PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
PXP2_REG_PSWRQ_BW_UB11},
{PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
PXP2_REG_RQ_BW_RD_UBOUND12},
{PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
PXP2_REG_RQ_BW_RD_UBOUND13},
{PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
PXP2_REG_RQ_BW_RD_UBOUND14},
{PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
PXP2_REG_RQ_BW_RD_UBOUND15},
{PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
PXP2_REG_RQ_BW_RD_UBOUND16},
{PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
PXP2_REG_RQ_BW_RD_UBOUND17},
{PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
PXP2_REG_RQ_BW_RD_UBOUND18},
/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
PXP2_REG_RQ_BW_RD_UBOUND19},
{PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
PXP2_REG_RQ_BW_RD_UBOUND20},
{PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
PXP2_REG_RQ_BW_RD_UBOUND22},
{PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
PXP2_REG_RQ_BW_RD_UBOUND23},
{PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
PXP2_REG_RQ_BW_RD_UBOUND24},
{PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
PXP2_REG_RQ_BW_RD_UBOUND25},
{PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
PXP2_REG_RQ_BW_RD_UBOUND26},
{PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
PXP2_REG_RQ_BW_RD_UBOUND27},
{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
PXP2_REG_PSWRQ_BW_UB28}
};
/* register addresses for write queues */
static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
PXP2_REG_PSWRQ_BW_UB1},
{PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
PXP2_REG_PSWRQ_BW_UB2},
{PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
PXP2_REG_PSWRQ_BW_UB3},
{PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
PXP2_REG_PSWRQ_BW_UB6},
{PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
PXP2_REG_PSWRQ_BW_UB7},
{PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
PXP2_REG_PSWRQ_BW_UB8},
{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
PXP2_REG_PSWRQ_BW_UB9},
{PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
PXP2_REG_PSWRQ_BW_UB10},
{PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
PXP2_REG_PSWRQ_BW_UB11},
/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
PXP2_REG_PSWRQ_BW_UB28},
{PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
PXP2_REG_RQ_BW_WR_UBOUND29},
{PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
PXP2_REG_RQ_BW_WR_UBOUND30}
};
static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
int w_order)
{
u32 val, i;
if (r_order > MAX_RD_ORD) {
DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
r_order, MAX_RD_ORD);
r_order = MAX_RD_ORD;
}
if (w_order > MAX_WR_ORD) {
DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
w_order, MAX_WR_ORD);
w_order = MAX_WR_ORD;
}
if (CHIP_REV_IS_FPGA(bp)) {
DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
w_order = 0;
}
DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
for (i = 0; i < NUM_RD_Q-1; i++) {
REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
REG_WR(bp, read_arb_addr[i].add,
read_arb_data[i][r_order].add);
REG_WR(bp, read_arb_addr[i].ubound,
read_arb_data[i][r_order].ubound);
}
for (i = 0; i < NUM_WR_Q-1; i++) {
if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
(write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
REG_WR(bp, write_arb_addr[i].l,
write_arb_data[i][w_order].l);
REG_WR(bp, write_arb_addr[i].add,
write_arb_data[i][w_order].add);
REG_WR(bp, write_arb_addr[i].ubound,
write_arb_data[i][w_order].ubound);
} else {
val = REG_RD(bp, write_arb_addr[i].l);
REG_WR(bp, write_arb_addr[i].l,
val | (write_arb_data[i][w_order].l << 10));
val = REG_RD(bp, write_arb_addr[i].add);
REG_WR(bp, write_arb_addr[i].add,
val | (write_arb_data[i][w_order].add << 10));
val = REG_RD(bp, write_arb_addr[i].ubound);
REG_WR(bp, write_arb_addr[i].ubound,
val | (write_arb_data[i][w_order].ubound << 7));
}
}
val = write_arb_data[NUM_WR_Q-1][w_order].add;
val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
val = read_arb_data[NUM_RD_Q-1][r_order].add;
val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
if (CHIP_IS_E3(bp))
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
else if (CHIP_IS_E2(bp))
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
else
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
if (!CHIP_IS_E1(bp)) {
/* MPS w_order optimal TH presently TH
* 128 0 0 2
* 256 1 1 3
* >=512 2 2 3
*/
/* DMAE is special */
if (!CHIP_IS_E1H(bp)) {
/* E2 can use optimal TH */
val = w_order;
REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
} else {
val = ((w_order == 0) ? 2 : 3);
REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
}
REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
}
/* Validate number of tags suppoted by device */
#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
val &= 0xFF;
if (val <= 0x20)
REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
}
/****************************************************************************
* ILT management
****************************************************************************/
/*
* This codes hides the low level HW interaction for ILT management and
* configuration. The API consists of a shadow ILT table which is set by the
* driver and a set of routines to use it to configure the HW.
*
*/
/* ILT HW init operations */
/* ILT memory management operations */
#define ILT_MEMOP_ALLOC 0
#define ILT_MEMOP_FREE 1
/* the phys address is shifted right 12 bits and has an added
* 1=valid bit added to the 53rd bit
* then since this is a wide register(TM)
* we split it into two 32 bit writes
*/
#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
#define ILT_RANGE(f, l) (((l) << 10) | f)
static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
struct ilt_line *line, u32 size, u8 memop)
{
if (memop == ILT_MEMOP_FREE) {
BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
return 0;
}
BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
if (!line->page)
return -1;
line->size = size;
return 0;
}
static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
u8 memop)
{
int i, rc;
struct bnx2x_ilt *ilt = BP_ILT(bp);
struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
if (!ilt || !ilt->lines)
return -1;
if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
return 0;
for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
ilt_cli->page_size, memop);
}
return rc;
}
static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
{
int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
if (!rc)
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
if (!rc)
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
if (!rc)
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
return rc;
}
static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
dma_addr_t page_mapping)
{
u32 reg;
if (CHIP_IS_E1(bp))
reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
else
reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
}
static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
struct bnx2x_ilt *ilt, int idx, u8 initop)
{
dma_addr_t null_mapping;
int abs_idx = ilt->start_line + idx;
switch (initop) {
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
break;
case INITOP_CLEAR:
null_mapping = 0;
bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
break;
}
}
static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli,
u32 ilt_start, u8 initop)
{
u32 start_reg = 0;
u32 end_reg = 0;
/* The boundary is either SET or INIT,
CLEAR => SET and for now SET ~~ INIT */
/* find the appropriate regs */
if (CHIP_IS_E1(bp)) {
switch (ilt_cli->client_num) {
case ILT_CLIENT_CDU:
start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
break;
case ILT_CLIENT_QM:
start_reg = PXP2_REG_PSWRQ_QM0_L2P;
break;
case ILT_CLIENT_SRC:
start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
break;
case ILT_CLIENT_TM:
start_reg = PXP2_REG_PSWRQ_TM0_L2P;
break;
}
REG_WR(bp, start_reg + BP_FUNC(bp)*4,
ILT_RANGE((ilt_start + ilt_cli->start),
(ilt_start + ilt_cli->end)));
} else {
switch (ilt_cli->client_num) {
case ILT_CLIENT_CDU:
start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
break;
case ILT_CLIENT_QM:
start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
end_reg = PXP2_REG_RQ_QM_LAST_ILT;
break;
case ILT_CLIENT_SRC:
start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
break;
case ILT_CLIENT_TM:
start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
end_reg = PXP2_REG_RQ_TM_LAST_ILT;
break;
}
REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
}
}
static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
struct bnx2x_ilt *ilt,
struct ilt_client_info *ilt_cli,
u8 initop)
{
int i;
if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
return;
for (i = ilt_cli->start; i <= ilt_cli->end; i++)
bnx2x_ilt_line_init_op(bp, ilt, i, initop);
/* init/clear the ILT boundries */
bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
}
static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
}
static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
int cli_num, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
}
static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
}
static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
u32 psz_reg, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
return;
switch (initop) {
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
break;
case INITOP_CLEAR:
break;
}
}
/*
* called during init common stage, ilt clients should be initialized
* prioir to calling this function
*/
static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
PXP2_REG_RQ_CDU_P_SIZE, initop);
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
PXP2_REG_RQ_QM_P_SIZE, initop);
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
PXP2_REG_RQ_SRC_P_SIZE, initop);
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
PXP2_REG_RQ_TM_P_SIZE, initop);
}
/****************************************************************************
* QM initializations
****************************************************************************/
#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
#define QM_INIT_MIN_CID_COUNT 31
#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
/* called during init port stage */
static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
u8 initop)
{
int port = BP_PORT(bp);
if (QM_INIT(qm_cid_count)) {
switch (initop) {
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
qm_cid_count/16 - 1);
break;
case INITOP_CLEAR:
break;
}
}
}
static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
{
int i;
u32 wb_data[2];
wb_data[0] = wb_data[1] = 0;
for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
REG_WR(bp, QM_REG_BASEADDR + i*4,
qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8,
wb_data, 2);
if (CHIP_IS_E1H(bp)) {
REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
wb_data, 2);
}
}
}
/* called during init common stage */
static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
u8 initop)
{
if (!QM_INIT(qm_cid_count))
return;
switch (initop) {
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
bnx2x_qm_set_ptr_table(bp, qm_cid_count);
break;
case INITOP_CLEAR:
break;
}
}
/****************************************************************************
* SRC initializations
****************************************************************************/
#ifdef BCM_CNIC
/* called during init func stage */
static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
{
int i;
int port = BP_PORT(bp);
/* Initialize T2 */
for (i = 0; i < src_cid_count-1; i++)
t2[i].next = (u64)(t2_mapping +
(i+1)*sizeof(struct src_ent));
/* tell the searcher where the T2 table is */
REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
U64_LO(t2_mapping), U64_HI(t2_mapping));
bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
U64_LO((u64)t2_mapping +
(src_cid_count-1) * sizeof(struct src_ent)),
U64_HI((u64)t2_mapping +
(src_cid_count-1) * sizeof(struct src_ent)));
}
#endif
#endif /* BNX2X_INIT_OPS_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,493 @@
/* Copyright 2008-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*
* Written by Yaniv Rosner
*
*/
#ifndef BNX2X_LINK_H
#define BNX2X_LINK_H
/***********************************************************/
/* Defines */
/***********************************************************/
#define DEFAULT_PHY_DEV_ADDR 3
#define E2_DEFAULT_PHY_DEV_ADDR 5
#define BNX2X_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO
#define BNX2X_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX
#define BNX2X_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX
#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
#define NET_SERDES_IF_XFI 1
#define NET_SERDES_IF_SFI 2
#define NET_SERDES_IF_KR 3
#define NET_SERDES_IF_DXGXS 4
#define SPEED_AUTO_NEG 0
#define SPEED_20000 20000
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
#define SFP_EEPROM_VENDOR_OUI_SIZE 3
#define SFP_EEPROM_PART_NO_ADDR 0x28
#define SFP_EEPROM_PART_NO_SIZE 16
#define SFP_EEPROM_REVISION_ADDR 0x38
#define SFP_EEPROM_REVISION_SIZE 4
#define SFP_EEPROM_SERIAL_ADDR 0x44
#define SFP_EEPROM_SERIAL_SIZE 16
#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
#define SFP_EEPROM_DATE_SIZE 6
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
(((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1)
/* Single Media board contains single external phy */
#define SINGLE_MEDIA(params) (params->num_phys == 2)
/* Dual Media board contains two external phy with different media */
#define DUAL_MEDIA(params) (params->num_phys == 3)
#define FW_PARAM_PHY_ADDR_MASK 0x000000FF
#define FW_PARAM_PHY_TYPE_MASK 0x0000FF00
#define FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000
#define FW_PARAM_MDIO_CTRL_OFFSET 16
#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \
FW_PARAM_PHY_ADDR_MASK)
#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \
FW_PARAM_PHY_TYPE_MASK)
#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \
FW_PARAM_MDIO_CTRL_MASK) >> \
FW_PARAM_MDIO_CTRL_OFFSET)
#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
/***********************************************************/
/* Structs */
/***********************************************************/
#define INT_PHY 0
#define EXT_PHY1 1
#define EXT_PHY2 2
#define MAX_PHYS 3
/* Same configuration is shared between the XGXS and the first external phy */
#define LINK_CONFIG_SIZE (MAX_PHYS - 1)
#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \
0 : (_phy_idx - 1))
/***********************************************************/
/* bnx2x_phy struct */
/* Defines the required arguments and function per phy */
/***********************************************************/
struct link_vars;
struct link_params;
struct bnx2x_phy;
typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
struct link_vars *vars);
typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
struct link_vars *vars);
typedef void (*link_reset_t)(struct bnx2x_phy *phy,
struct link_params *params);
typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
struct link_params *params);
typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
struct link_params *params, u8 mode);
typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
struct link_params *params, u32 action);
struct bnx2x_phy {
u32 type;
/* Loaded during init */
u8 addr;
u8 def_md_devad;
u16 flags;
/* Require HW lock */
#define FLAGS_HW_LOCK_REQUIRED (1<<0)
/* No Over-Current detection */
#define FLAGS_NOC (1<<1)
/* Fan failure detection required */
#define FLAGS_FAN_FAILURE_DET_REQ (1<<2)
/* Initialize first the XGXS and only then the phy itself */
#define FLAGS_INIT_XGXS_FIRST (1<<3)
#define FLAGS_WC_DUAL_MODE (1<<4)
#define FLAGS_4_PORT_MODE (1<<5)
#define FLAGS_REARM_LATCH_SIGNAL (1<<6)
#define FLAGS_SFP_NOT_APPROVED (1<<7)
#define FLAGS_MDC_MDIO_WA (1<<8)
#define FLAGS_DUMMY_READ (1<<9)
#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
#define FLAGS_TX_ERROR_CHECK (1<<12)
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
/* preemphasis values for the tx side */
u16 tx_preemphasis[4];
/* EMAC address for access MDIO */
u32 mdio_ctrl;
u32 supported;
u32 media_type;
#define ETH_PHY_UNSPECIFIED 0x0
#define ETH_PHY_SFP_FIBER 0x1
#define ETH_PHY_XFP_FIBER 0x2
#define ETH_PHY_DA_TWINAX 0x3
#define ETH_PHY_BASE_T 0x4
#define ETH_PHY_KR 0xf0
#define ETH_PHY_CX4 0xf1
#define ETH_PHY_NOT_PRESENT 0xff
/* The address in which version is located*/
u32 ver_addr;
u16 req_flow_ctrl;
u16 req_line_speed;
u32 speed_cap_mask;
u16 req_duplex;
u16 rsrv;
/* Called per phy/port init, and it configures LASI, speed, autoneg,
duplex, flow control negotiation, etc. */
config_init_t config_init;
/* Called due to interrupt. It determines the link, speed */
read_status_t read_status;
/* Called when driver is unloading. Should reset the phy */
link_reset_t link_reset;
/* Set the loopback configuration for the phy */
config_loopback_t config_loopback;
/* Format the given raw number into str up to len */
format_fw_ver_t format_fw_ver;
/* Reset the phy (both ports) */
hw_reset_t hw_reset;
/* Set link led mode (on/off/oper)*/
set_link_led_t set_link_led;
/* PHY Specific tasks */
phy_specific_func_t phy_specific_func;
#define DISABLE_TX 1
#define ENABLE_TX 2
};
/* Inputs parameters to the CLC */
struct link_params {
u8 port;
/* Default / User Configuration */
u8 loopback_mode;
#define LOOPBACK_NONE 0
#define LOOPBACK_EMAC 1
#define LOOPBACK_BMAC 2
#define LOOPBACK_XGXS 3
#define LOOPBACK_EXT_PHY 4
#define LOOPBACK_EXT 5
#define LOOPBACK_UMAC 6
#define LOOPBACK_XMAC 7
/* Device parameters */
u8 mac_addr[6];
u16 req_duplex[LINK_CONFIG_SIZE];
u16 req_flow_ctrl[LINK_CONFIG_SIZE];
u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
/* shmem parameters */
u32 shmem_base;
u32 shmem2_base;
u32 speed_cap_mask[LINK_CONFIG_SIZE];
u32 switch_cfg;
#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
u32 lane_config;
/* Phy register parameter */
u32 chip_id;
/* features */
u32 feature_config_flags;
#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
/* Will be populated during common init */
struct bnx2x_phy phy[MAX_PHYS];
/* Will be populated during common init */
u8 num_phys;
u8 rsrv;
u16 hw_led_mode; /* part of the hw_config read from the shmem */
u32 multi_phy_config;
/* Device pointer passed to all callback functions */
struct bnx2x *bp;
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
req_flow_ctrl is set to AUTO */
};
/* Output parameters */
struct link_vars {
u8 phy_flags;
#define PHY_XGXS_FLAG (1<<0)
#define PHY_SGMII_FLAG (1<<1)
#define PHY_PHYSICAL_LINK_FLAG (1<<2)
#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
#define PHY_OVER_CURRENT_FLAG (1<<4)
u8 mac_type;
#define MAC_TYPE_NONE 0
#define MAC_TYPE_EMAC 1
#define MAC_TYPE_BMAC 2
#define MAC_TYPE_UMAC 3
#define MAC_TYPE_XMAC 4
u8 phy_link_up; /* internal phy link indication */
u8 link_up;
u16 line_speed;
u16 duplex;
u16 flow_ctrl;
u16 ieee_fc;
/* The same definitions as the shmem parameter */
u32 link_status;
u8 fault_detected;
u8 rsrv1;
u16 periodic_flags;
#define PERIODIC_FLAGS_LINK_EVENT 0x0001
u32 aeu_int_mask;
};
/***********************************************************/
/* Functions */
/***********************************************************/
int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
/* Reset the link. Should be called when driver or interface goes down
Before calling phy firmware upgrade, the reset_ext_phy should be set
to 0 */
int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
u8 reset_ext_phy);
/* bnx2x_link_update should be called upon link interrupt */
int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
/* use the following phy functions to read/write from external_phy
In order to use it to read/write internal phy registers, use
DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
the register */
int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 *ret_val);
int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
struct link_vars *output);
/* returns string representing the fw_version of the external phy */
int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
u8 *version, u16 len);
/* Set/Unset the led
Basically, the CLC takes care of the led for the link, but in case one needs
to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
blink the led, and LED_MODE_OFF to set the led off.*/
int bnx2x_set_led(struct link_params *params,
struct link_vars *vars, u8 mode, u32 speed);
#define LED_MODE_OFF 0
#define LED_MODE_ON 1
#define LED_MODE_OPER 2
#define LED_MODE_FRONT_PANEL_OFF 3
/* bnx2x_handle_module_detect_int should be called upon module detection
interrupt */
void bnx2x_handle_module_detect_int(struct link_params *params);
/* Get the actual link status. In case it returns 0, link is up,
otherwise link is down*/
int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
u8 is_serdes);
/* One-time initialization for external phy after power up */
int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u32 chip_id);
/* Reset the external PHY using GPIO */
void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf);
void bnx2x_hw_reset_phy(struct link_params *params);
/* Checks if HW lock is required for this phy/board type */
u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base);
/* Check swap bit and adjust PHY order */
u32 bnx2x_phy_selection(struct link_params *params);
/* Probe the phys on board, and populate them in "params" */
int bnx2x_phy_probe(struct link_params *params);
/* Checks if fan failure detection is required on one of the phys on board */
u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base, u8 port);
/* DCBX structs */
/* Number of maximum COS per chip */
#define DCBX_E2E3_MAX_NUM_COS (2)
#define DCBX_E3B0_MAX_NUM_COS_PORT0 (6)
#define DCBX_E3B0_MAX_NUM_COS_PORT1 (3)
#define DCBX_E3B0_MAX_NUM_COS ( \
MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \
DCBX_E3B0_MAX_NUM_COS_PORT1))
#define DCBX_MAX_NUM_COS ( \
MAXVAL(DCBX_E3B0_MAX_NUM_COS, \
DCBX_E2E3_MAX_NUM_COS))
/* PFC port configuration params */
struct bnx2x_nig_brb_pfc_port_params {
/* NIG */
u32 pause_enable;
u32 llfc_out_en;
u32 llfc_enable;
u32 pkt_priority_to_cos;
u8 num_of_rx_cos_priority_mask;
u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
u32 llfc_high_priority_classes;
u32 llfc_low_priority_classes;
/* BRB */
u32 cos0_pauseable;
u32 cos1_pauseable;
};
/* ETS port configuration params */
struct bnx2x_ets_bw_params {
u8 bw;
};
struct bnx2x_ets_sp_params {
/**
* valid values are 0 - 5. 0 is highest strict priority.
* There can't be two COS's with the same pri.
*/
u8 pri;
};
enum bnx2x_cos_state {
bnx2x_cos_state_strict = 0,
bnx2x_cos_state_bw = 1,
};
struct bnx2x_ets_cos_params {
enum bnx2x_cos_state state ;
union {
struct bnx2x_ets_bw_params bw_params;
struct bnx2x_ets_sp_params sp_params;
} params;
};
struct bnx2x_ets_params {
u8 num_of_cos; /* Number of valid COS entries*/
struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
};
/**
* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
* when link is already up
*/
int bnx2x_update_pfc(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *pfc_params);
/* Used to configure the ETS to disable */
int bnx2x_ets_disabled(struct link_params *params,
struct link_vars *vars);
/* Used to configure the ETS to BW limited */
void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
const u32 cos1_bw);
/* Used to configure the ETS to strict */
int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
/* Configure the COS to ETS according to BW and SP settings.*/
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
const struct bnx2x_ets_params *ets_params);
/* Read pfc statistic*/
void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
u32 pfc_frames_sent[2],
u32 pfc_frames_received[2]);
void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
u32 chip_id, u32 shmem_base, u32 shmem2_base,
u8 port);
int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
struct link_params *params);
void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
#endif /* BNX2X_LINK_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,381 @@
/* bnx2x_stats.h: Broadcom Everest network driver.
*
* Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
* Slowpath and fastpath rework by Vladislav Zolotarov
* Statistics and Link management by Yitchak Gertner
*
*/
#ifndef BNX2X_STATS_H
#define BNX2X_STATS_H
#include <linux/types.h>
struct nig_stats {
u32 brb_discard;
u32 brb_packet;
u32 brb_truncate;
u32 flow_ctrl_discard;
u32 flow_ctrl_octets;
u32 flow_ctrl_packet;
u32 mng_discard;
u32 mng_octet_inp;
u32 mng_octet_out;
u32 mng_packet_inp;
u32 mng_packet_out;
u32 pbf_octets;
u32 pbf_packet;
u32 safc_inp;
u32 egress_mac_pkt0_lo;
u32 egress_mac_pkt0_hi;
u32 egress_mac_pkt1_lo;
u32 egress_mac_pkt1_hi;
};
enum bnx2x_stats_event {
STATS_EVENT_PMF = 0,
STATS_EVENT_LINK_UP,
STATS_EVENT_UPDATE,
STATS_EVENT_STOP,
STATS_EVENT_MAX
};
enum bnx2x_stats_state {
STATS_STATE_DISABLED = 0,
STATS_STATE_ENABLED,
STATS_STATE_MAX
};
struct bnx2x_eth_stats {
u32 total_bytes_received_hi;
u32 total_bytes_received_lo;
u32 total_bytes_transmitted_hi;
u32 total_bytes_transmitted_lo;
u32 total_unicast_packets_received_hi;
u32 total_unicast_packets_received_lo;
u32 total_multicast_packets_received_hi;
u32 total_multicast_packets_received_lo;
u32 total_broadcast_packets_received_hi;
u32 total_broadcast_packets_received_lo;
u32 total_unicast_packets_transmitted_hi;
u32 total_unicast_packets_transmitted_lo;
u32 total_multicast_packets_transmitted_hi;
u32 total_multicast_packets_transmitted_lo;
u32 total_broadcast_packets_transmitted_hi;
u32 total_broadcast_packets_transmitted_lo;
u32 valid_bytes_received_hi;
u32 valid_bytes_received_lo;
u32 error_bytes_received_hi;
u32 error_bytes_received_lo;
u32 etherstatsoverrsizepkts_hi;
u32 etherstatsoverrsizepkts_lo;
u32 no_buff_discard_hi;
u32 no_buff_discard_lo;
u32 rx_stat_ifhcinbadoctets_hi;
u32 rx_stat_ifhcinbadoctets_lo;
u32 tx_stat_ifhcoutbadoctets_hi;
u32 tx_stat_ifhcoutbadoctets_lo;
u32 rx_stat_dot3statsfcserrors_hi;
u32 rx_stat_dot3statsfcserrors_lo;
u32 rx_stat_dot3statsalignmenterrors_hi;
u32 rx_stat_dot3statsalignmenterrors_lo;
u32 rx_stat_dot3statscarriersenseerrors_hi;
u32 rx_stat_dot3statscarriersenseerrors_lo;
u32 rx_stat_falsecarriererrors_hi;
u32 rx_stat_falsecarriererrors_lo;
u32 rx_stat_etherstatsundersizepkts_hi;
u32 rx_stat_etherstatsundersizepkts_lo;
u32 rx_stat_dot3statsframestoolong_hi;
u32 rx_stat_dot3statsframestoolong_lo;
u32 rx_stat_etherstatsfragments_hi;
u32 rx_stat_etherstatsfragments_lo;
u32 rx_stat_etherstatsjabbers_hi;
u32 rx_stat_etherstatsjabbers_lo;
u32 rx_stat_maccontrolframesreceived_hi;
u32 rx_stat_maccontrolframesreceived_lo;
u32 rx_stat_bmac_xpf_hi;
u32 rx_stat_bmac_xpf_lo;
u32 rx_stat_bmac_xcf_hi;
u32 rx_stat_bmac_xcf_lo;
u32 rx_stat_xoffstateentered_hi;
u32 rx_stat_xoffstateentered_lo;
u32 rx_stat_xonpauseframesreceived_hi;
u32 rx_stat_xonpauseframesreceived_lo;
u32 rx_stat_xoffpauseframesreceived_hi;
u32 rx_stat_xoffpauseframesreceived_lo;
u32 tx_stat_outxonsent_hi;
u32 tx_stat_outxonsent_lo;
u32 tx_stat_outxoffsent_hi;
u32 tx_stat_outxoffsent_lo;
u32 tx_stat_flowcontroldone_hi;
u32 tx_stat_flowcontroldone_lo;
u32 tx_stat_etherstatscollisions_hi;
u32 tx_stat_etherstatscollisions_lo;
u32 tx_stat_dot3statssinglecollisionframes_hi;
u32 tx_stat_dot3statssinglecollisionframes_lo;
u32 tx_stat_dot3statsmultiplecollisionframes_hi;
u32 tx_stat_dot3statsmultiplecollisionframes_lo;
u32 tx_stat_dot3statsdeferredtransmissions_hi;
u32 tx_stat_dot3statsdeferredtransmissions_lo;
u32 tx_stat_dot3statsexcessivecollisions_hi;
u32 tx_stat_dot3statsexcessivecollisions_lo;
u32 tx_stat_dot3statslatecollisions_hi;
u32 tx_stat_dot3statslatecollisions_lo;
u32 tx_stat_etherstatspkts64octets_hi;
u32 tx_stat_etherstatspkts64octets_lo;
u32 tx_stat_etherstatspkts65octetsto127octets_hi;
u32 tx_stat_etherstatspkts65octetsto127octets_lo;
u32 tx_stat_etherstatspkts128octetsto255octets_hi;
u32 tx_stat_etherstatspkts128octetsto255octets_lo;
u32 tx_stat_etherstatspkts256octetsto511octets_hi;
u32 tx_stat_etherstatspkts256octetsto511octets_lo;
u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
u32 tx_stat_etherstatspktsover1522octets_hi;
u32 tx_stat_etherstatspktsover1522octets_lo;
u32 tx_stat_bmac_2047_hi;
u32 tx_stat_bmac_2047_lo;
u32 tx_stat_bmac_4095_hi;
u32 tx_stat_bmac_4095_lo;
u32 tx_stat_bmac_9216_hi;
u32 tx_stat_bmac_9216_lo;
u32 tx_stat_bmac_16383_hi;
u32 tx_stat_bmac_16383_lo;
u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
u32 tx_stat_bmac_ufl_hi;
u32 tx_stat_bmac_ufl_lo;
u32 pause_frames_received_hi;
u32 pause_frames_received_lo;
u32 pause_frames_sent_hi;
u32 pause_frames_sent_lo;
u32 etherstatspkts1024octetsto1522octets_hi;
u32 etherstatspkts1024octetsto1522octets_lo;
u32 etherstatspktsover1522octets_hi;
u32 etherstatspktsover1522octets_lo;
u32 brb_drop_hi;
u32 brb_drop_lo;
u32 brb_truncate_hi;
u32 brb_truncate_lo;
u32 mac_filter_discard;
u32 mf_tag_discard;
u32 brb_truncate_discard;
u32 mac_discard;
u32 driver_xoff;
u32 rx_err_discard_pkt;
u32 rx_skb_alloc_failed;
u32 hw_csum_err;
u32 nig_timer_max;
/* TPA */
u32 total_tpa_aggregations_hi;
u32 total_tpa_aggregations_lo;
u32 total_tpa_aggregated_frames_hi;
u32 total_tpa_aggregated_frames_lo;
u32 total_tpa_bytes_hi;
u32 total_tpa_bytes_lo;
};
struct bnx2x_eth_q_stats {
u32 total_unicast_bytes_received_hi;
u32 total_unicast_bytes_received_lo;
u32 total_broadcast_bytes_received_hi;
u32 total_broadcast_bytes_received_lo;
u32 total_multicast_bytes_received_hi;
u32 total_multicast_bytes_received_lo;
u32 total_bytes_received_hi;
u32 total_bytes_received_lo;
u32 total_unicast_bytes_transmitted_hi;
u32 total_unicast_bytes_transmitted_lo;
u32 total_broadcast_bytes_transmitted_hi;
u32 total_broadcast_bytes_transmitted_lo;
u32 total_multicast_bytes_transmitted_hi;
u32 total_multicast_bytes_transmitted_lo;
u32 total_bytes_transmitted_hi;
u32 total_bytes_transmitted_lo;
u32 total_unicast_packets_received_hi;
u32 total_unicast_packets_received_lo;
u32 total_multicast_packets_received_hi;
u32 total_multicast_packets_received_lo;
u32 total_broadcast_packets_received_hi;
u32 total_broadcast_packets_received_lo;
u32 total_unicast_packets_transmitted_hi;
u32 total_unicast_packets_transmitted_lo;
u32 total_multicast_packets_transmitted_hi;
u32 total_multicast_packets_transmitted_lo;
u32 total_broadcast_packets_transmitted_hi;
u32 total_broadcast_packets_transmitted_lo;
u32 valid_bytes_received_hi;
u32 valid_bytes_received_lo;
u32 etherstatsoverrsizepkts_hi;
u32 etherstatsoverrsizepkts_lo;
u32 no_buff_discard_hi;
u32 no_buff_discard_lo;
u32 driver_xoff;
u32 rx_err_discard_pkt;
u32 rx_skb_alloc_failed;
u32 hw_csum_err;
u32 total_packets_received_checksum_discarded_hi;
u32 total_packets_received_checksum_discarded_lo;
u32 total_packets_received_ttl0_discarded_hi;
u32 total_packets_received_ttl0_discarded_lo;
u32 total_transmitted_dropped_packets_error_hi;
u32 total_transmitted_dropped_packets_error_lo;
/* TPA */
u32 total_tpa_aggregations_hi;
u32 total_tpa_aggregations_lo;
u32 total_tpa_aggregated_frames_hi;
u32 total_tpa_aggregated_frames_lo;
u32 total_tpa_bytes_hi;
u32 total_tpa_bytes_lo;
};
/****************************************************************************
* Macros
****************************************************************************/
/* sum[hi:lo] += add[hi:lo] */
#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
do { \
s_lo += a_lo; \
s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
} while (0)
/* difference = minuend - subtrahend */
#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
do { \
if (m_lo < s_lo) { \
/* underflow */ \
d_hi = m_hi - s_hi; \
if (d_hi > 0) { \
/* we can 'loan' 1 */ \
d_hi--; \
d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
} else { \
/* m_hi <= s_hi */ \
d_hi = 0; \
d_lo = 0; \
} \
} else { \
/* m_lo >= s_lo */ \
if (m_hi < s_hi) { \
d_hi = 0; \
d_lo = 0; \
} else { \
/* m_hi >= s_hi */ \
d_hi = m_hi - s_hi; \
d_lo = m_lo - s_lo; \
} \
} \
} while (0)
#define UPDATE_STAT64(s, t) \
do { \
DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
pstats->mac_stx[0].t##_hi = new->s##_hi; \
pstats->mac_stx[0].t##_lo = new->s##_lo; \
ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
pstats->mac_stx[1].t##_lo, diff.lo); \
} while (0)
#define UPDATE_STAT64_NIG(s, t) \
do { \
DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
diff.lo, new->s##_lo, old->s##_lo); \
ADD_64(estats->t##_hi, diff.hi, \
estats->t##_lo, diff.lo); \
} while (0)
/* sum[hi:lo] += add */
#define ADD_EXTEND_64(s_hi, s_lo, a) \
do { \
s_lo += a; \
s_hi += (s_lo < a) ? 1 : 0; \
} while (0)
#define ADD_STAT64(diff, t) \
do { \
ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
pstats->mac_stx[1].t##_lo, new->diff##_lo); \
} while (0)
#define UPDATE_EXTEND_STAT(s) \
do { \
ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
pstats->mac_stx[1].s##_lo, \
new->s); \
} while (0)
#define UPDATE_EXTEND_TSTAT(s, t) \
do { \
diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
old_tclient->s = tclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
#define UPDATE_EXTEND_USTAT(s, t) \
do { \
diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
old_uclient->s = uclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
#define UPDATE_EXTEND_XSTAT(s, t) \
do { \
diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
old_xclient->s = xclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
/* minuend -= subtrahend */
#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
do { \
DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
} while (0)
/* minuend[hi:lo] -= subtrahend */
#define SUB_EXTEND_64(m_hi, m_lo, s) \
do { \
SUB_64(m_hi, 0, m_lo, s); \
} while (0)
#define SUB_EXTEND_USTAT(s, t) \
do { \
diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
/* forward */
struct bnx2x;
void bnx2x_stats_init(struct bnx2x *bp);
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
#endif /* BNX2X_STATS_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,478 @@
/* cnic.h: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
*/
#ifndef CNIC_H
#define CNIC_H
#define HC_INDEX_ISCSI_EQ_CONS 6
#define HC_INDEX_FCOE_EQ_CONS 3
#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
#define KWQ_PAGE_CNT 4
#define KCQ_PAGE_CNT 16
#define KWQ_CID 24
#define KCQ_CID 25
/*
* krnlq_context definition
*/
#define L5_KRNLQ_FLAGS 0x00000000
#define L5_KRNLQ_SIZE 0x00000000
#define L5_KRNLQ_TYPE 0x00000000
#define KRNLQ_FLAGS_PG_SZ (0xf<<0)
#define KRNLQ_FLAGS_PG_SZ_256 (0<<0)
#define KRNLQ_FLAGS_PG_SZ_512 (1<<0)
#define KRNLQ_FLAGS_PG_SZ_1K (2<<0)
#define KRNLQ_FLAGS_PG_SZ_2K (3<<0)
#define KRNLQ_FLAGS_PG_SZ_4K (4<<0)
#define KRNLQ_FLAGS_PG_SZ_8K (5<<0)
#define KRNLQ_FLAGS_PG_SZ_16K (6<<0)
#define KRNLQ_FLAGS_PG_SZ_32K (7<<0)
#define KRNLQ_FLAGS_PG_SZ_64K (8<<0)
#define KRNLQ_FLAGS_PG_SZ_128K (9<<0)
#define KRNLQ_FLAGS_PG_SZ_256K (10<<0)
#define KRNLQ_FLAGS_PG_SZ_512K (11<<0)
#define KRNLQ_FLAGS_PG_SZ_1M (12<<0)
#define KRNLQ_FLAGS_PG_SZ_2M (13<<0)
#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15)
#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
#define KRNLQ_TYPE_TYPE (0xf<<28)
#define KRNLQ_TYPE_TYPE_EMPTY (0<<28)
#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28)
#define L5_KRNLQ_HOST_QIDX 0x00000004
#define L5_KRNLQ_HOST_FW_QIDX 0x00000008
#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c
#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c
#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010
#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014
#define L5_KRNLQ_PGTBL_PGIDX 0x00000018
#define L5_KRNLQ_NX_PG_QIDX 0x00000018
#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c
#define L5_KRNLQ_QIDX_INCR 0x0000001c
#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020
#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024
#define BNX2_PG_CTX_MAP 0x1a0034
#define BNX2_ISCSI_CTX_MAP 0x1a0074
#define MAX_COMPLETED_KCQE 64
#define MAX_CNIC_L5_CONTEXT 256
#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT
#define MAX_ISCSI_TBL_SZ 256
#define CNIC_LOCAL_PORT_MIN 60000
#define CNIC_LOCAL_PORT_MAX 61024
#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
#define MAX_KWQE_CNT (KWQE_CNT - 1)
#define MAX_KCQE_CNT (KCQE_CNT - 1)
#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
(MAX_KCQE_CNT - 1)) ? \
(x) + 2 : (x) + 1
#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
#define BNX2X_KWQ_DATA(cp, x) \
&(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
#define DEF_IPID_START 0x8000
#define DEF_KA_TIMEOUT 10000
#define DEF_KA_INTERVAL 300000
#define DEF_KA_MAX_PROBE_COUNT 3
#define DEF_TOS 0
#define DEF_TTL 0xfe
#define DEF_SND_SEQ_SCALE 0
#define DEF_RCV_BUF 0xffff
#define DEF_SND_BUF 0xffff
#define DEF_SEED 0
#define DEF_MAX_RT_TIME 500
#define DEF_MAX_DA_COUNT 2
#define DEF_SWS_TIMER 1000
#define DEF_MAX_CWND 0xffff
struct cnic_ctx {
u32 cid;
void *ctx;
dma_addr_t mapping;
};
#define BNX2_MAX_CID 0x2000
struct cnic_dma {
int num_pages;
void **pg_arr;
dma_addr_t *pg_map_arr;
int pgtbl_size;
u32 *pgtbl;
dma_addr_t pgtbl_map;
};
struct cnic_id_tbl {
spinlock_t lock;
u32 start;
u32 max;
u32 next;
unsigned long *table;
};
#define CNIC_KWQ16_DATA_SIZE 128
struct kwqe_16_data {
u8 data[CNIC_KWQ16_DATA_SIZE];
};
struct cnic_iscsi {
struct cnic_dma task_array_info;
struct cnic_dma r2tq_info;
struct cnic_dma hq_info;
};
struct cnic_context {
u32 cid;
struct kwqe_16_data *kwqe_data;
dma_addr_t kwqe_data_mapping;
wait_queue_head_t waitq;
int wait_cond;
unsigned long timestamp;
unsigned long ctx_flags;
#define CTX_FL_OFFLD_START 0
#define CTX_FL_DELETE_WAIT 1
#define CTX_FL_CID_ERROR 2
u8 ulp_proto_id;
union {
struct cnic_iscsi *iscsi;
} proto;
};
struct kcq_info {
struct cnic_dma dma;
struct kcqe **kcq;
u16 *hw_prod_idx_ptr;
u16 sw_prod_idx;
u16 *status_idx_ptr;
u32 io_addr;
u16 (*next_idx)(u16);
u16 (*hw_idx)(u16);
};
struct iro {
u32 base;
u16 m1;
u16 m2;
u16 m3;
u16 size;
};
struct cnic_uio_dev {
struct uio_info cnic_uinfo;
u32 uio_dev;
int l2_ring_size;
void *l2_ring;
dma_addr_t l2_ring_map;
int l2_buf_size;
void *l2_buf;
dma_addr_t l2_buf_map;
struct cnic_dev *dev;
struct pci_dev *pdev;
struct list_head list;
};
struct cnic_local {
spinlock_t cnic_ulp_lock;
void *ulp_handle[MAX_CNIC_ULP_TYPE];
unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
#define ULP_F_INIT 0
#define ULP_F_START 1
#define ULP_F_CALL_PENDING 2
struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
unsigned long cnic_local_flags;
#define CNIC_LCL_FL_KWQ_INIT 0x0
#define CNIC_LCL_FL_L2_WAIT 0x1
#define CNIC_LCL_FL_RINGS_INITED 0x2
#define CNIC_LCL_FL_STOP_ISCSI 0x4
struct cnic_dev *dev;
struct cnic_eth_dev *ethdev;
struct cnic_uio_dev *udev;
int l2_rx_ring_size;
int l2_single_buf_size;
u16 *rx_cons_ptr;
u16 *tx_cons_ptr;
u16 rx_cons;
u16 tx_cons;
const struct iro *iro_arr;
#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
struct cnic_dma kwq_info;
struct kwqe **kwq;
struct cnic_dma kwq_16_data_info;
u16 max_kwq_idx;
u16 kwq_prod_idx;
u32 kwq_io_addr;
u16 *kwq_con_idx_ptr;
u16 kwq_con_idx;
struct kcq_info kcq1;
struct kcq_info kcq2;
union {
void *gen;
struct status_block_msix *bnx2;
struct host_hc_status_block_e1x *bnx2x_e1x;
/* index values - which counter to update */
#define SM_RX_ID 0
#define SM_TX_ID 1
} status_blk;
struct host_sp_status_block *bnx2x_def_status_blk;
u32 status_blk_num;
u32 bnx2x_igu_sb_id;
u32 int_num;
u32 last_status_idx;
struct tasklet_struct cnic_irq_task;
struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
struct cnic_sock *csk_tbl;
struct cnic_id_tbl csk_port_tbl;
struct cnic_dma gbl_buf_info;
struct cnic_iscsi *iscsi_tbl;
struct cnic_context *ctx_tbl;
struct cnic_id_tbl cid_tbl;
atomic_t iscsi_conn;
u32 iscsi_start_cid;
u32 fcoe_init_cid;
u32 fcoe_start_cid;
struct cnic_id_tbl fcoe_cid_tbl;
u32 max_cid_space;
/* per connection parameters */
int num_iscsi_tasks;
int num_ccells;
int task_array_size;
int r2tq_size;
int hq_size;
int num_cqs;
struct delayed_work delete_task;
struct cnic_ctx *ctx_arr;
int ctx_blks;
int ctx_blk_size;
unsigned long ctx_align;
int cids_per_blk;
u32 chip_id;
int func;
u32 pfid;
u8 port_mode;
#define CHIP_4_PORT_MODE 0
#define CHIP_2_PORT_MODE 1
#define CHIP_PORT_MODE_NONE 2
u32 shmem_base;
struct cnic_ops *cnic_ops;
int (*start_hw)(struct cnic_dev *);
void (*stop_hw)(struct cnic_dev *);
void (*setup_pgtbl)(struct cnic_dev *,
struct cnic_dma *);
int (*alloc_resc)(struct cnic_dev *);
void (*free_resc)(struct cnic_dev *);
int (*start_cm)(struct cnic_dev *);
void (*stop_cm)(struct cnic_dev *);
void (*enable_int)(struct cnic_dev *);
void (*disable_int_sync)(struct cnic_dev *);
void (*ack_int)(struct cnic_dev *);
void (*close_conn)(struct cnic_sock *, u32 opcode);
};
struct bnx2x_bd_chain_next {
u32 addr_lo;
u32 addr_hi;
u8 reserved[8];
};
#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
#define CDU_REGION_NUMBER_XCM_AG 2
#define CDU_REGION_NUMBER_UCM_AG 4
#define CDU_VALID_DATA(_cid, _region, _type) \
(((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
#define CDU_CRC8(_cid, _region, _type) \
(calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \
(0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
#define BNX2X_CONTEXT_MEM_SIZE 1024
#define BNX2X_FCOE_CID 16
#define BNX2X_ISCSI_START_CID 18
#define BNX2X_ISCSI_NUM_CONNECTIONS 128
#define BNX2X_ISCSI_TASK_CONTEXT_SIZE 128
#define BNX2X_ISCSI_MAX_PENDING_R2TS 4
#define BNX2X_ISCSI_R2TQE_SIZE 8
#define BNX2X_ISCSI_HQ_BD_SIZE 64
#define BNX2X_ISCSI_GLB_BUF_SIZE 64
#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff
#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff
#define BNX2X_FCOE_NUM_CONNECTIONS 128
#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ
#define BNX2X_CHIP_NUM_57710 0x164e
#define BNX2X_CHIP_NUM_57711 0x164f
#define BNX2X_CHIP_NUM_57711E 0x1650
#define BNX2X_CHIP_NUM_57712 0x1662
#define BNX2X_CHIP_NUM_57712E 0x1663
#define BNX2X_CHIP_NUM_57713 0x1651
#define BNX2X_CHIP_NUM_57713E 0x1652
#define BNX2X_CHIP_NUM_57800 0x168a
#define BNX2X_CHIP_NUM_57810 0x168e
#define BNX2X_CHIP_NUM_57840 0x168d
#define BNX2X_CHIP_NUM(x) (x >> 16)
#define BNX2X_CHIP_IS_57710(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710)
#define BNX2X_CHIP_IS_57711(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
#define BNX2X_CHIP_IS_57711E(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
#define BNX2X_CHIP_IS_E1H(x) \
(BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
#define BNX2X_CHIP_IS_57712(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712)
#define BNX2X_CHIP_IS_57712E(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E)
#define BNX2X_CHIP_IS_57713(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713)
#define BNX2X_CHIP_IS_57713E(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E)
#define BNX2X_CHIP_IS_57800(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800)
#define BNX2X_CHIP_IS_57810(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810)
#define BNX2X_CHIP_IS_57840(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840)
#define BNX2X_CHIP_IS_E2(x) \
(BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
#define BNX2X_CHIP_IS_E3(x) \
(BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \
BNX2X_CHIP_IS_57840(x))
#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x))
#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id)
#define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2)
#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1)
#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \
(BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \
((x) + 2) : ((x) + 1)
#define BNX2X_DEF_SB_ID HC_SP_SB_ID
#define BNX2X_SHMEM_MF_BLK_OFFSET 0x7e4
#define BNX2X_SHMEM_ADDR(base, field) (base + \
offsetof(struct shmem_region, field))
#define BNX2X_SHMEM2_ADDR(base, field) (base + \
offsetof(struct shmem2_region, field))
#define BNX2X_SHMEM2_HAS(base, field) \
((base) && \
(CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \
offsetof(struct shmem2_region, field)))
#define BNX2X_MF_CFG_ADDR(base, field) \
((base) + offsetof(struct mf_cfg, field))
#ifndef ETH_MAX_RX_CLIENTS_E2
#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
#endif
#define CNIC_PORT(cp) ((cp)->pfid & 1)
#define CNIC_FUNC(cp) ((cp)->func)
#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
0 : (CNIC_FUNC(cp) & 1))
#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \
(CNIC_E1HVN(cp) << 17) | (x))
#define BNX2X_SW_CID(x) (x & 0x1ffff)
#define BNX2X_CL_QZONE_ID(cp, cli) \
(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \
cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
#ifndef MAX_STAT_COUNTER_ID
#define MAX_STAT_COUNTER_ID \
(BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \
((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\
MAX_STAT_COUNTER_ID_E1))
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,340 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
*/
#ifndef CNIC_IF_H
#define CNIC_IF_H
#define CNIC_MODULE_VERSION "2.5.7"
#define CNIC_MODULE_RELDATE "July 20, 2011"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
#define CNIC_ULP_FCOE 2
#define CNIC_ULP_L4 3
#define MAX_CNIC_ULP_TYPE_EXT 3
#define MAX_CNIC_ULP_TYPE 4
struct kwqe {
u32 kwqe_op_flag;
#define KWQE_QID_SHIFT 8
#define KWQE_OPCODE_MASK 0x00ff0000
#define KWQE_OPCODE_SHIFT 16
#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
#define KWQE_LAYER_MASK 0x70000000
#define KWQE_LAYER_SHIFT 28
#define KWQE_FLAGS_LAYER_MASK_L2 (2<<28)
#define KWQE_FLAGS_LAYER_MASK_L3 (3<<28)
#define KWQE_FLAGS_LAYER_MASK_L4 (4<<28)
#define KWQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
#define KWQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
#define KWQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28)
u32 kwqe_info0;
u32 kwqe_info1;
u32 kwqe_info2;
u32 kwqe_info3;
u32 kwqe_info4;
u32 kwqe_info5;
u32 kwqe_info6;
};
struct kwqe_16 {
u32 kwqe_info0;
u32 kwqe_info1;
u32 kwqe_info2;
u32 kwqe_info3;
};
struct kcqe {
u32 kcqe_info0;
u32 kcqe_info1;
u32 kcqe_info2;
u32 kcqe_info3;
u32 kcqe_info4;
u32 kcqe_info5;
u32 kcqe_info6;
u32 kcqe_op_flag;
#define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
#define KCQE_FLAGS_LAYER_MASK (0x7<<28)
#define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
#define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
#define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
#define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28)
#define KCQE_FLAGS_NEXT (1<<31)
#define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
#define KCQE_FLAGS_OPCODE_SHIFT (16)
#define KCQE_OPCODE(op) \
(((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
};
#define MAX_CNIC_CTL_DATA 64
#define MAX_DRV_CTL_DATA 64
#define CNIC_CTL_STOP_CMD 1
#define CNIC_CTL_START_CMD 2
#define CNIC_CTL_COMPLETION_CMD 3
#define CNIC_CTL_STOP_ISCSI_CMD 4
#define DRV_CTL_IO_WR_CMD 0x101
#define DRV_CTL_IO_RD_CMD 0x102
#define DRV_CTL_CTX_WR_CMD 0x103
#define DRV_CTL_CTXTBL_WR_CMD 0x104
#define DRV_CTL_RET_L5_SPQ_CREDIT_CMD 0x105
#define DRV_CTL_START_L2_CMD 0x106
#define DRV_CTL_STOP_L2_CMD 0x107
#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d
struct cnic_ctl_completion {
u32 cid;
u8 opcode;
u8 error;
};
struct cnic_ctl_info {
int cmd;
union {
struct cnic_ctl_completion comp;
char bytes[MAX_CNIC_CTL_DATA];
} data;
};
struct drv_ctl_spq_credit {
u32 credit_count;
};
struct drv_ctl_io {
u32 cid_addr;
u32 offset;
u32 data;
dma_addr_t dma_addr;
};
struct drv_ctl_l2_ring {
u32 client_id;
u32 cid;
};
struct drv_ctl_info {
int cmd;
union {
struct drv_ctl_spq_credit credit;
struct drv_ctl_io io;
struct drv_ctl_l2_ring ring;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
struct cnic_ops {
struct module *cnic_owner;
/* Calls to these functions are protected by RCU. When
* unregistering, we wait for any calls to complete before
* continuing.
*/
int (*cnic_handler)(void *, void *);
int (*cnic_ctl)(void *, struct cnic_ctl_info *);
};
#define MAX_CNIC_VEC 8
struct cnic_irq {
unsigned int vector;
void *status_blk;
u32 status_blk_num;
u32 status_blk_num2;
u32 irq_flags;
#define CNIC_IRQ_FL_MSIX 0x00000001
};
struct cnic_eth_dev {
struct module *drv_owner;
u32 drv_state;
#define CNIC_DRV_STATE_REGD 0x00000001
#define CNIC_DRV_STATE_USING_MSIX 0x00000002
#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
#define CNIC_DRV_STATE_NO_FCOE 0x00000010
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
void __iomem *io_base;
void __iomem *io_base2;
const void *iro_arr;
u32 ctx_tbl_offset;
u32 ctx_tbl_len;
int ctx_blk_size;
u32 starting_cid;
u32 max_iscsi_conn;
u32 max_fcoe_conn;
u32 max_rdma_conn;
u32 fcoe_init_cid;
u32 fcoe_wwn_port_name_hi;
u32 fcoe_wwn_port_name_lo;
u32 fcoe_wwn_node_name_hi;
u32 fcoe_wwn_node_name_lo;
u16 iscsi_l2_client_id;
u16 iscsi_l2_cid;
u8 iscsi_mac[ETH_ALEN];
int num_irq;
struct cnic_irq irq_arr[MAX_CNIC_VEC];
int (*drv_register_cnic)(struct net_device *,
struct cnic_ops *, void *);
int (*drv_unregister_cnic)(struct net_device *);
int (*drv_submit_kwqes_32)(struct net_device *,
struct kwqe *[], u32);
int (*drv_submit_kwqes_16)(struct net_device *,
struct kwqe_16 *[], u32);
int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
unsigned long reserved1[2];
};
struct cnic_sockaddr {
union {
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} local;
union {
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} remote;
};
struct cnic_sock {
struct cnic_dev *dev;
void *context;
u32 src_ip[4];
u32 dst_ip[4];
u16 src_port;
u16 dst_port;
u16 vlan_id;
unsigned char old_ha[6];
unsigned char ha[6];
u32 mtu;
u32 cid;
u32 l5_cid;
u32 pg_cid;
int ulp_type;
u32 ka_timeout;
u32 ka_interval;
u8 ka_max_probe_count;
u8 tos;
u8 ttl;
u8 snd_seq_scale;
u32 rcv_buf;
u32 snd_buf;
u32 seed;
unsigned long tcp_flags;
#define SK_TCP_NO_DELAY_ACK 0x1
#define SK_TCP_KEEP_ALIVE 0x2
#define SK_TCP_NAGLE 0x4
#define SK_TCP_TIMESTAMP 0x8
#define SK_TCP_SACK 0x10
#define SK_TCP_SEG_SCALING 0x20
unsigned long flags;
#define SK_F_INUSE 0
#define SK_F_OFFLD_COMPLETE 1
#define SK_F_OFFLD_SCHED 2
#define SK_F_PG_OFFLD_COMPLETE 3
#define SK_F_CONNECT_START 4
#define SK_F_IPV6 5
#define SK_F_CLOSING 7
atomic_t ref_count;
u32 state;
struct kwqe kwqe1;
struct kwqe kwqe2;
struct kwqe kwqe3;
};
struct cnic_dev {
struct net_device *netdev;
struct pci_dev *pcidev;
void __iomem *regview;
struct list_head list;
int (*register_device)(struct cnic_dev *dev, int ulp_type,
void *ulp_ctx);
int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num_wqes);
int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
u32 num_wqes);
int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
void *);
int (*cm_destroy)(struct cnic_sock *);
int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
int (*cm_abort)(struct cnic_sock *);
int (*cm_close)(struct cnic_sock *);
struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
unsigned long flags;
#define CNIC_F_CNIC_UP 1
#define CNIC_F_BNX2_CLASS 3
#define CNIC_F_BNX2X_CLASS 4
atomic_t ref_count;
u8 mac_addr[6];
int max_iscsi_conn;
int max_fcoe_conn;
int max_rdma_conn;
void *cnic_priv;
};
#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
#define CNIC_RD(dev, off) readl(dev->regview + off)
#define CNIC_RD16(dev, off) readw(dev->regview + off)
struct cnic_ulp_ops {
/* Calls to these functions are protected by RCU. When
* unregistering, we wait for any calls to complete before
* continuing.
*/
void (*cnic_init)(struct cnic_dev *dev);
void (*cnic_exit)(struct cnic_dev *dev);
void (*cnic_start)(void *ulp_ctx);
void (*cnic_stop)(void *ulp_ctx);
void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
u32 num_cqes);
void (*indicate_netevent)(void *ulp_ctx, unsigned long event, u16 vid);
void (*cm_connect_complete)(struct cnic_sock *);
void (*cm_close_complete)(struct cnic_sock *);
void (*cm_abort_complete)(struct cnic_sock *);
void (*cm_remote_close)(struct cnic_sock *);
void (*cm_remote_abort)(struct cnic_sock *);
int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
char *data, u16 data_size);
struct module *owner;
atomic_t ref_count;
};
extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
extern int cnic_unregister_driver(int ulp_type);
extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev);
extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff