chelsio: Move the Chelsio drivers

Moves the drivers for the Chelsio chipsets into
drivers/net/ethernet/chelsio/ and the necessary Kconfig and Makefile
changes.

CC: Divy Le Ray <divy@chelsio.com>
CC: Dimitris Michailidis <dm@chelsio.com>
CC: Casey Leedom <leedom@chelsio.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Jeff Kirsher
2011-04-07 06:57:17 -07:00
parent adfc5217e9
commit f7917c009c
78 changed files with 123 additions and 97 deletions

View File

@@ -0,0 +1,106 @@
#
# Chelsio device configuration
#
config NET_VENDOR_CHELSIO
bool "Chelsio devices"
depends on PCI || INET
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Chelsio devices. If you say Y, you will be asked for
your specific card in the following questions.
if NET_VENDOR_CHELSIO
config CHELSIO_T1
tristate "Chelsio 10Gb Ethernet support"
depends on PCI
select CRC32
select MDIO
---help---
This driver supports Chelsio gigabit and 10-gigabit
Ethernet cards. More information about adapter features and
performance tuning is in <file:Documentation/networking/cxgb.txt>.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.html>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module, choose M here: the module
will be called cxgb.
config CHELSIO_T1_1G
bool "Chelsio gigabit Ethernet support"
depends on CHELSIO_T1
---help---
Enables support for Chelsio's gigabit Ethernet PCI cards. If you
are using only 10G cards say 'N' here.
config CHELSIO_T3
tristate "Chelsio Communications T3 10Gb Ethernet support"
depends on PCI && INET
select FW_LOADER
select MDIO
---help---
This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
adapters.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.html>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module, choose M here: the module
will be called cxgb3.
config CHELSIO_T4
tristate "Chelsio Communications T4 Ethernet support"
depends on PCI
select FW_LOADER
select MDIO
---help---
This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
adapters.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.html>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module choose M here; the module
will be called cxgb4.
config CHELSIO_T4VF
tristate "Chelsio Communications T4 Virtual Function Ethernet support"
depends on PCI
---help---
This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
adapters with PCI-E SR-IOV Virtual Functions.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.html>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module choose M here; the module
will be called cxgb4vf.
endif # NET_VENDOR_CHELSIO

View File

@@ -0,0 +1,8 @@
#
# Makefile for the Chelsio network device drivers.
#
obj-$(CONFIG_CHELSIO_T1) += cxgb/
obj-$(CONFIG_CHELSIO_T3) += cxgb3/
obj-$(CONFIG_CHELSIO_T4) += cxgb4/
obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/

View File

@@ -0,0 +1,9 @@
#
# Chelsio T1 driver
#
obj-$(CONFIG_CHELSIO_T1) += cxgb.o
cxgb-$(CONFIG_CHELSIO_T1_1G) += mv88e1xxx.o vsc7326.o
cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o \
mv88x201x.o my3126.o $(cxgb-y)

View File

@@ -0,0 +1,353 @@
/*****************************************************************************
* *
* File: common.h *
* $Revision: 1.21 $ *
* $Date: 2005/06/22 00:43:25 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#define pr_fmt(fmt) "cxgb: " fmt
#ifndef _CXGB_COMMON_H_
#define _CXGB_COMMON_H_
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/mdio.h>
#include <linux/crc32.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
#define DRV_NAME "cxgb"
#define DRV_VERSION "2.2"
#define CH_DEVICE(devid, ssid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
#define SUPPORTED_PAUSE (1 << 13)
#define SUPPORTED_LOOPBACK (1 << 15)
#define ADVERTISED_PAUSE (1 << 13)
#define ADVERTISED_ASYM_PAUSE (1 << 14)
typedef struct adapter adapter_t;
struct t1_rx_mode {
struct net_device *dev;
};
#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
#define t1_rx_mode_mc_cnt(rm) (netdev_mc_count(rm->dev))
#define t1_get_netdev(rm) (rm->dev)
#define MAX_NPORTS 4
#define PORT_MASK ((1 << MAX_NPORTS) - 1)
#define NMTUS 8
#define TCB_SIZE 128
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
enum {
CHBT_BOARD_N110,
CHBT_BOARD_N210,
CHBT_BOARD_7500,
CHBT_BOARD_8000,
CHBT_BOARD_CHT101,
CHBT_BOARD_CHT110,
CHBT_BOARD_CHT210,
CHBT_BOARD_CHT204,
CHBT_BOARD_CHT204V,
CHBT_BOARD_CHT204E,
CHBT_BOARD_CHN204,
CHBT_BOARD_COUGAR,
CHBT_BOARD_6800,
CHBT_BOARD_SIMUL,
};
enum {
CHBT_TERM_FPGA,
CHBT_TERM_T1,
CHBT_TERM_T2,
CHBT_TERM_T3
};
enum {
CHBT_MAC_CHELSIO_A,
CHBT_MAC_IXF1010,
CHBT_MAC_PM3393,
CHBT_MAC_VSC7321,
CHBT_MAC_DUMMY
};
enum {
CHBT_PHY_88E1041,
CHBT_PHY_88E1111,
CHBT_PHY_88X2010,
CHBT_PHY_XPAK,
CHBT_PHY_MY3126,
CHBT_PHY_8244,
CHBT_PHY_DUMMY
};
enum {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
/* Revisions of T1 chip */
enum {
TERM_T1A = 0,
TERM_T1B = 1,
TERM_T2 = 3
};
struct sge_params {
unsigned int cmdQ_size[2];
unsigned int freelQ_size[2];
unsigned int large_buf_capacity;
unsigned int rx_coalesce_usecs;
unsigned int last_rx_coalesce_raw;
unsigned int default_rx_coalesce_usecs;
unsigned int sample_interval_usecs;
unsigned int coalesce_enable;
unsigned int polling;
};
struct chelsio_pci_params {
unsigned short speed;
unsigned char width;
unsigned char is_pcix;
};
struct tp_params {
unsigned int pm_size;
unsigned int cm_size;
unsigned int pm_rx_base;
unsigned int pm_tx_base;
unsigned int pm_rx_pg_size;
unsigned int pm_tx_pg_size;
unsigned int pm_rx_num_pgs;
unsigned int pm_tx_num_pgs;
unsigned int rx_coalescing_size;
unsigned int use_5tuple_mode;
};
struct mc5_params {
unsigned int mode; /* selects MC5 width */
unsigned int nservers; /* size of server region */
unsigned int nroutes; /* size of routing region */
};
/* Default MC5 region sizes */
#define DEFAULT_SERVER_REGION_LEN 256
#define DEFAULT_RT_REGION_LEN 1024
struct adapter_params {
struct sge_params sge;
struct mc5_params mc5;
struct tp_params tp;
struct chelsio_pci_params pci;
const struct board_info *brd_info;
unsigned short mtus[NMTUS];
unsigned int nports; /* # of ethernet ports */
unsigned int stats_update_period;
unsigned short chip_revision;
unsigned char chip_version;
unsigned char is_asic;
unsigned char has_msi;
};
struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
unsigned short requested_speed; /* speed user has requested */
unsigned short speed; /* actual link speed */
unsigned char requested_duplex; /* duplex user has requested */
unsigned char duplex; /* actual link duplex */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
};
struct cmac;
struct cphy;
struct port_info {
struct net_device *dev;
struct cmac *mac;
struct cphy *phy;
struct link_config link_config;
struct net_device_stats netstats;
};
struct sge;
struct peespi;
struct adapter {
u8 __iomem *regs;
struct pci_dev *pdev;
unsigned long registered_device_map;
unsigned long open_device_map;
unsigned long flags;
const char *name;
int msg_enable;
u32 mmio_len;
struct work_struct ext_intr_handler_task;
struct adapter_params params;
/* Terminator modules. */
struct sge *sge;
struct peespi *espi;
struct petp *tp;
struct napi_struct napi;
struct port_info port[MAX_NPORTS];
struct delayed_work stats_update_task;
struct timer_list stats_update_timer;
spinlock_t tpi_lock;
spinlock_t work_lock;
spinlock_t mac_lock;
/* guards async operations */
spinlock_t async_lock ____cacheline_aligned;
u32 slow_intr_mask;
int t1powersave;
};
enum { /* adapter flags */
FULL_INIT_DONE = 1 << 0,
};
struct mdio_ops;
struct gmac;
struct gphy;
struct board_info {
unsigned char board;
unsigned char port_number;
unsigned long caps;
unsigned char chip_term;
unsigned char chip_mac;
unsigned char chip_phy;
unsigned int clock_core;
unsigned int clock_mc3;
unsigned int clock_mc4;
unsigned int espi_nports;
unsigned int clock_elmer0;
unsigned char mdio_mdien;
unsigned char mdio_mdiinv;
unsigned char mdio_mdc;
unsigned char mdio_phybaseaddr;
const struct gmac *gmac;
const struct gphy *gphy;
const struct mdio_ops *mdio_ops;
const char *desc;
};
static inline int t1_is_asic(const adapter_t *adapter)
{
return adapter->params.is_asic;
}
extern const struct pci_device_id t1_pci_tbl[];
static inline int adapter_matches_type(const adapter_t *adapter,
int version, int revision)
{
return adapter->params.chip_version == version &&
adapter->params.chip_revision == revision;
}
#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
#define is_T2(adap) adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2)
/* Returns true if an adapter supports VLAN acceleration and TSO */
static inline int vlan_tso_capable(const adapter_t *adapter)
{
return !t1_is_T1B(adapter);
}
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
#define board_info(adapter) ((adapter)->params.brd_info)
#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full)
static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
{
return board_info(adap)->clock_core / 1000000;
}
extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
extern void t1_interrupts_enable(adapter_t *adapter);
extern void t1_interrupts_disable(adapter_t *adapter);
extern void t1_interrupts_clear(adapter_t *adapter);
extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
extern void t1_elmer0_ext_intr(adapter_t *adapter);
extern int t1_slow_intr_handler(adapter_t *adapter);
extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
extern const struct board_info *t1_get_board_info(unsigned int board_id);
extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
unsigned short ssid);
extern int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
struct adapter_params *p);
extern int t1_init_hw_modules(adapter_t *adapter);
extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
extern void t1_free_sw_modules(adapter_t *adapter);
extern void t1_fatal_err(adapter_t *adapter);
extern void t1_link_changed(adapter_t *adapter, int port_id);
extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
int speed, int duplex, int pause);
#endif /* _CXGB_COMMON_H_ */

View File

@@ -0,0 +1,175 @@
/*****************************************************************************
* *
* File: cphy.h *
* $Revision: 1.7 $ *
* $Date: 2005/06/21 18:29:47 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_CPHY_H_
#define _CXGB_CPHY_H_
#include "common.h"
struct mdio_ops {
void (*init)(adapter_t *adapter, const struct board_info *bi);
int (*read)(struct net_device *dev, int phy_addr, int mmd_addr,
u16 reg_addr);
int (*write)(struct net_device *dev, int phy_addr, int mmd_addr,
u16 reg_addr, u16 val);
unsigned mode_support;
};
/* PHY interrupt types */
enum {
cphy_cause_link_change = 0x1,
cphy_cause_error = 0x2,
cphy_cause_fifo_error = 0x3
};
enum {
PHY_LINK_UP = 0x1,
PHY_AUTONEG_RDY = 0x2,
PHY_AUTONEG_EN = 0x4
};
struct cphy;
/* PHY operations */
struct cphy_ops {
void (*destroy)(struct cphy *);
int (*reset)(struct cphy *, int wait);
int (*interrupt_enable)(struct cphy *);
int (*interrupt_disable)(struct cphy *);
int (*interrupt_clear)(struct cphy *);
int (*interrupt_handler)(struct cphy *);
int (*autoneg_enable)(struct cphy *);
int (*autoneg_disable)(struct cphy *);
int (*autoneg_restart)(struct cphy *);
int (*advertise)(struct cphy *phy, unsigned int advertise_map);
int (*set_loopback)(struct cphy *, int on);
int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
int *duplex, int *fc);
u32 mmds;
};
/* A PHY instance */
struct cphy {
int state; /* Link status state machine */
adapter_t *adapter; /* associated adapter */
struct delayed_work phy_update;
u16 bmsr;
int count;
int act_count;
int act_on;
u32 elmer_gpo;
const struct cphy_ops *ops; /* PHY operations */
struct mdio_if_info mdio;
struct cphy_instance *instance;
};
/* Convenience MDIO read/write wrappers */
static inline int cphy_mdio_read(struct cphy *cphy, int mmd, int reg,
unsigned int *valp)
{
int rc = cphy->mdio.mdio_read(cphy->mdio.dev, cphy->mdio.prtad, mmd,
reg);
*valp = (rc >= 0) ? rc : -1;
return (rc >= 0) ? 0 : rc;
}
static inline int cphy_mdio_write(struct cphy *cphy, int mmd, int reg,
unsigned int val)
{
return cphy->mdio.mdio_write(cphy->mdio.dev, cphy->mdio.prtad, mmd,
reg, val);
}
static inline int simple_mdio_read(struct cphy *cphy, int reg,
unsigned int *valp)
{
return cphy_mdio_read(cphy, MDIO_DEVAD_NONE, reg, valp);
}
static inline int simple_mdio_write(struct cphy *cphy, int reg,
unsigned int val)
{
return cphy_mdio_write(cphy, MDIO_DEVAD_NONE, reg, val);
}
/* Convenience initializer */
static inline void cphy_init(struct cphy *phy, struct net_device *dev,
int phy_addr, struct cphy_ops *phy_ops,
const struct mdio_ops *mdio_ops)
{
struct adapter *adapter = netdev_priv(dev);
phy->adapter = adapter;
phy->ops = phy_ops;
if (mdio_ops) {
phy->mdio.prtad = phy_addr;
phy->mdio.mmds = phy_ops->mmds;
phy->mdio.mode_support = mdio_ops->mode_support;
phy->mdio.mdio_read = mdio_ops->read;
phy->mdio.mdio_write = mdio_ops->write;
}
phy->mdio.dev = dev;
}
/* Operations of the PHY-instance factory */
struct gphy {
/* Construct a PHY instance with the given PHY address */
struct cphy *(*create)(struct net_device *dev, int phy_addr,
const struct mdio_ops *mdio_ops);
/*
* Reset the PHY chip. This resets the whole PHY chip, not individual
* ports.
*/
int (*reset)(adapter_t *adapter);
};
extern const struct gphy t1_my3126_ops;
extern const struct gphy t1_mv88e1xxx_ops;
extern const struct gphy t1_vsc8244_ops;
extern const struct gphy t1_mv88x201x_ops;
#endif /* _CXGB_CPHY_H_ */

View File

@@ -0,0 +1,639 @@
/*****************************************************************************
* *
* File: cpl5_cmd.h *
* $Revision: 1.6 $ *
* $Date: 2005/06/21 18:29:47 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_CPL5_CMD_H_
#define _CXGB_CPL5_CMD_H_
#include <asm/byteorder.h>
#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
#error "Adjust your <asm/byteorder.h> defines"
#endif
enum CPL_opcode {
CPL_PASS_OPEN_REQ = 0x1,
CPL_PASS_OPEN_RPL = 0x2,
CPL_PASS_ESTABLISH = 0x3,
CPL_PASS_ACCEPT_REQ = 0xE,
CPL_PASS_ACCEPT_RPL = 0x4,
CPL_ACT_OPEN_REQ = 0x5,
CPL_ACT_OPEN_RPL = 0x6,
CPL_CLOSE_CON_REQ = 0x7,
CPL_CLOSE_CON_RPL = 0x8,
CPL_CLOSE_LISTSRV_REQ = 0x9,
CPL_CLOSE_LISTSRV_RPL = 0xA,
CPL_ABORT_REQ = 0xB,
CPL_ABORT_RPL = 0xC,
CPL_PEER_CLOSE = 0xD,
CPL_ACT_ESTABLISH = 0x17,
CPL_GET_TCB = 0x24,
CPL_GET_TCB_RPL = 0x25,
CPL_SET_TCB = 0x26,
CPL_SET_TCB_FIELD = 0x27,
CPL_SET_TCB_RPL = 0x28,
CPL_PCMD = 0x29,
CPL_PCMD_READ = 0x31,
CPL_PCMD_READ_RPL = 0x32,
CPL_RX_DATA = 0xA0,
CPL_RX_DATA_DDP = 0xA1,
CPL_RX_DATA_ACK = 0xA3,
CPL_RX_PKT = 0xAD,
CPL_RX_ISCSI_HDR = 0xAF,
CPL_TX_DATA_ACK = 0xB0,
CPL_TX_DATA = 0xB1,
CPL_TX_PKT = 0xB2,
CPL_TX_PKT_LSO = 0xB6,
CPL_RTE_DELETE_REQ = 0xC0,
CPL_RTE_DELETE_RPL = 0xC1,
CPL_RTE_WRITE_REQ = 0xC2,
CPL_RTE_WRITE_RPL = 0xD3,
CPL_RTE_READ_REQ = 0xC3,
CPL_RTE_READ_RPL = 0xC4,
CPL_L2T_WRITE_REQ = 0xC5,
CPL_L2T_WRITE_RPL = 0xD4,
CPL_L2T_READ_REQ = 0xC6,
CPL_L2T_READ_RPL = 0xC7,
CPL_SMT_WRITE_REQ = 0xC8,
CPL_SMT_WRITE_RPL = 0xD5,
CPL_SMT_READ_REQ = 0xC9,
CPL_SMT_READ_RPL = 0xCA,
CPL_ARP_MISS_REQ = 0xCD,
CPL_ARP_MISS_RPL = 0xCE,
CPL_MIGRATE_C2T_REQ = 0xDC,
CPL_MIGRATE_C2T_RPL = 0xDD,
CPL_ERROR = 0xD7,
/* internal: driver -> TOM */
CPL_MSS_CHANGE = 0xE1
};
#define NUM_CPL_CMDS 256
enum CPL_error {
CPL_ERR_NONE = 0,
CPL_ERR_TCAM_PARITY = 1,
CPL_ERR_TCAM_FULL = 3,
CPL_ERR_CONN_RESET = 20,
CPL_ERR_CONN_EXIST = 22,
CPL_ERR_ARP_MISS = 23,
CPL_ERR_BAD_SYN = 24,
CPL_ERR_CONN_TIMEDOUT = 30,
CPL_ERR_XMIT_TIMEDOUT = 31,
CPL_ERR_PERSIST_TIMEDOUT = 32,
CPL_ERR_FINWAIT2_TIMEDOUT = 33,
CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
CPL_ERR_ABORT_FAILED = 42,
CPL_ERR_GENERAL = 99
};
enum {
CPL_CONN_POLICY_AUTO = 0,
CPL_CONN_POLICY_ASK = 1,
CPL_CONN_POLICY_DENY = 3
};
enum {
ULP_MODE_NONE = 0,
ULP_MODE_TCPDDP = 1,
ULP_MODE_ISCSI = 2,
ULP_MODE_IWARP = 3,
ULP_MODE_SSL = 4
};
enum {
CPL_PASS_OPEN_ACCEPT,
CPL_PASS_OPEN_REJECT
};
enum {
CPL_ABORT_SEND_RST = 0,
CPL_ABORT_NO_RST,
CPL_ABORT_POST_CLOSE_REQ = 2
};
enum { // TX_PKT_LSO ethernet types
CPL_ETH_II,
CPL_ETH_II_VLAN,
CPL_ETH_802_3,
CPL_ETH_802_3_VLAN
};
union opcode_tid {
u32 opcode_tid;
u8 opcode;
};
#define S_OPCODE 24
#define V_OPCODE(x) ((x) << S_OPCODE)
#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
#define G_TID(x) ((x) & 0xFFFFFF)
/* tid is assumed to be 24-bits */
#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
/* extract the TID from a CPL command */
#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
struct tcp_options {
u16 mss;
u8 wsf;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 rsvd:4;
u8 ecn:1;
u8 sack:1;
u8 tstamp:1;
#else
u8 tstamp:1;
u8 sack:1;
u8 ecn:1;
u8 rsvd:4;
#endif
};
struct cpl_pass_open_req {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u32 opt0h;
u32 opt0l;
u32 peer_netmask;
u32 opt1;
};
struct cpl_pass_open_rpl {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u8 resvd[7];
u8 status;
};
struct cpl_pass_establish {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u32 tos_tid;
u8 l2t_idx;
u8 rsvd[3];
u32 snd_isn;
u32 rcv_isn;
};
struct cpl_pass_accept_req {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u32 tos_tid;
struct tcp_options tcp_options;
u8 dst_mac[6];
u16 vlan_tag;
u8 src_mac[6];
u8 rsvd[2];
u32 rcv_isn;
u32 unknown_tcp_options;
};
struct cpl_pass_accept_rpl {
union opcode_tid ot;
u32 rsvd0;
u32 rsvd1;
u32 peer_ip;
u32 opt0h;
union {
u32 opt0l;
struct {
u8 rsvd[3];
u8 status;
};
};
};
struct cpl_act_open_req {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u32 opt0h;
u32 opt0l;
u32 iff_vlantag;
u32 rsvd;
};
struct cpl_act_open_rpl {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u32 new_tid;
u8 rsvd[3];
u8 status;
};
struct cpl_act_establish {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u32 tos_tid;
u32 rsvd;
u32 snd_isn;
u32 rcv_isn;
};
struct cpl_get_tcb {
union opcode_tid ot;
u32 rsvd;
};
struct cpl_get_tcb_rpl {
union opcode_tid ot;
u16 len;
u8 rsvd;
u8 status;
};
struct cpl_set_tcb {
union opcode_tid ot;
u16 len;
u16 rsvd;
};
struct cpl_set_tcb_field {
union opcode_tid ot;
u8 rsvd[3];
u8 offset;
u32 mask;
u32 val;
};
struct cpl_set_tcb_rpl {
union opcode_tid ot;
u8 rsvd[3];
u8 status;
};
struct cpl_pcmd {
union opcode_tid ot;
u16 dlen_in;
u16 dlen_out;
u32 pcmd_parm[2];
};
struct cpl_pcmd_read {
union opcode_tid ot;
u32 rsvd1;
u16 rsvd2;
u32 addr;
u16 len;
};
struct cpl_pcmd_read_rpl {
union opcode_tid ot;
u16 len;
};
struct cpl_close_con_req {
union opcode_tid ot;
u32 rsvd;
};
struct cpl_close_con_rpl {
union opcode_tid ot;
u8 rsvd[3];
u8 status;
u32 snd_nxt;
u32 rcv_nxt;
};
struct cpl_close_listserv_req {
union opcode_tid ot;
u32 rsvd;
};
struct cpl_close_listserv_rpl {
union opcode_tid ot;
u8 rsvd[3];
u8 status;
};
struct cpl_abort_req {
union opcode_tid ot;
u32 rsvd0;
u8 rsvd1;
u8 cmd;
u8 rsvd2[6];
};
struct cpl_abort_rpl {
union opcode_tid ot;
u32 rsvd0;
u8 rsvd1;
u8 status;
u8 rsvd2[6];
};
struct cpl_peer_close {
union opcode_tid ot;
u32 rsvd;
};
struct cpl_tx_data {
union opcode_tid ot;
u32 len;
u32 rsvd0;
u16 urg;
u16 flags;
};
struct cpl_tx_data_ack {
union opcode_tid ot;
u32 ack_seq;
};
struct cpl_rx_data {
union opcode_tid ot;
u32 len;
u32 seq;
u16 urg;
u8 rsvd;
u8 status;
};
struct cpl_rx_data_ack {
union opcode_tid ot;
u32 credit;
};
struct cpl_rx_data_ddp {
union opcode_tid ot;
u32 len;
u32 seq;
u32 nxt_seq;
u32 ulp_crc;
u16 ddp_status;
u8 rsvd;
u8 status;
};
/*
* We want this header's alignment to be no more stringent than 2-byte aligned.
* All fields are u8 or u16 except for the length. However that field is not
* used so we break it into 2 16-bit parts to easily meet our alignment needs.
*/
struct cpl_tx_pkt {
u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 iff:4;
u8 ip_csum_dis:1;
u8 l4_csum_dis:1;
u8 vlan_valid:1;
u8 rsvd:1;
#else
u8 rsvd:1;
u8 vlan_valid:1;
u8 l4_csum_dis:1;
u8 ip_csum_dis:1;
u8 iff:4;
#endif
u16 vlan;
u16 len_hi;
u16 len_lo;
};
struct cpl_tx_pkt_lso {
u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 iff:4;
u8 ip_csum_dis:1;
u8 l4_csum_dis:1;
u8 vlan_valid:1;
u8 :1;
#else
u8 :1;
u8 vlan_valid:1;
u8 l4_csum_dis:1;
u8 ip_csum_dis:1;
u8 iff:4;
#endif
u16 vlan;
__be32 len;
u8 rsvd[5];
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 tcp_hdr_words:4;
u8 ip_hdr_words:4;
#else
u8 ip_hdr_words:4;
u8 tcp_hdr_words:4;
#endif
__be16 eth_type_mss;
};
struct cpl_rx_pkt {
u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 iff:4;
u8 csum_valid:1;
u8 bad_pkt:1;
u8 vlan_valid:1;
u8 rsvd:1;
#else
u8 rsvd:1;
u8 vlan_valid:1;
u8 bad_pkt:1;
u8 csum_valid:1;
u8 iff:4;
#endif
u16 csum;
u16 vlan;
u16 len;
};
struct cpl_l2t_write_req {
union opcode_tid ot;
u32 params;
u8 rsvd1[2];
u8 dst_mac[6];
};
struct cpl_l2t_write_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd[3];
};
struct cpl_l2t_read_req {
union opcode_tid ot;
u8 rsvd[3];
u8 l2t_idx;
};
struct cpl_l2t_read_rpl {
union opcode_tid ot;
u32 params;
u8 rsvd1[2];
u8 dst_mac[6];
};
struct cpl_smt_write_req {
union opcode_tid ot;
u8 rsvd0;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 rsvd1:1;
u8 mtu_idx:3;
u8 iff:4;
#else
u8 iff:4;
u8 mtu_idx:3;
u8 rsvd1:1;
#endif
u16 rsvd2;
u16 rsvd3;
u8 src_mac1[6];
u16 rsvd4;
u8 src_mac0[6];
};
struct cpl_smt_write_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd[3];
};
struct cpl_smt_read_req {
union opcode_tid ot;
u8 rsvd0;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 rsvd1:4;
u8 iff:4;
#else
u8 iff:4;
u8 rsvd1:4;
#endif
u16 rsvd2;
};
struct cpl_smt_read_rpl {
union opcode_tid ot;
u8 status;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 rsvd1:1;
u8 mtu_idx:3;
u8 rsvd0:4;
#else
u8 rsvd0:4;
u8 mtu_idx:3;
u8 rsvd1:1;
#endif
u16 rsvd2;
u16 rsvd3;
u8 src_mac1[6];
u16 rsvd4;
u8 src_mac0[6];
};
struct cpl_rte_delete_req {
union opcode_tid ot;
u32 params;
};
struct cpl_rte_delete_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd[3];
};
struct cpl_rte_write_req {
union opcode_tid ot;
u32 params;
u32 netmask;
u32 faddr;
};
struct cpl_rte_write_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd[3];
};
struct cpl_rte_read_req {
union opcode_tid ot;
u32 params;
};
struct cpl_rte_read_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd0[2];
u8 l2t_idx;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 rsvd1:7;
u8 select:1;
#else
u8 select:1;
u8 rsvd1:7;
#endif
u8 rsvd2[3];
u32 addr;
};
struct cpl_mss_change {
union opcode_tid ot;
u32 mss;
};
#endif /* _CXGB_CPL5_CMD_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,158 @@
/*****************************************************************************
* *
* File: elmer0.h *
* $Revision: 1.6 $ *
* $Date: 2005/06/21 22:49:43 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_ELMER0_H_
#define _CXGB_ELMER0_H_
/* ELMER0 flavors */
enum {
ELMER0_XC2S300E_6FT256_C,
ELMER0_XC2S100E_6TQ144_C
};
/* ELMER0 registers */
#define A_ELMER0_VERSION 0x100000
#define A_ELMER0_PHY_CFG 0x100004
#define A_ELMER0_INT_ENABLE 0x100008
#define A_ELMER0_INT_CAUSE 0x10000c
#define A_ELMER0_GPI_CFG 0x100010
#define A_ELMER0_GPI_STAT 0x100014
#define A_ELMER0_GPO 0x100018
#define A_ELMER0_PORT0_MI1_CFG 0x400000
#define S_MI1_MDI_ENABLE 0
#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
#define F_MI1_MDI_ENABLE V_MI1_MDI_ENABLE(1U)
#define S_MI1_MDI_INVERT 1
#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT)
#define F_MI1_MDI_INVERT V_MI1_MDI_INVERT(1U)
#define S_MI1_PREAMBLE_ENABLE 2
#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE)
#define F_MI1_PREAMBLE_ENABLE V_MI1_PREAMBLE_ENABLE(1U)
#define S_MI1_SOF 3
#define M_MI1_SOF 0x3
#define V_MI1_SOF(x) ((x) << S_MI1_SOF)
#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF)
#define S_MI1_CLK_DIV 5
#define M_MI1_CLK_DIV 0xff
#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV)
#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV)
#define A_ELMER0_PORT0_MI1_ADDR 0x400004
#define S_MI1_REG_ADDR 0
#define M_MI1_REG_ADDR 0x1f
#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR)
#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR)
#define S_MI1_PHY_ADDR 5
#define M_MI1_PHY_ADDR 0x1f
#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR)
#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR)
#define A_ELMER0_PORT0_MI1_DATA 0x400008
#define S_MI1_DATA 0
#define M_MI1_DATA 0xffff
#define V_MI1_DATA(x) ((x) << S_MI1_DATA)
#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA)
#define A_ELMER0_PORT0_MI1_OP 0x40000c
#define S_MI1_OP 0
#define M_MI1_OP 0x3
#define V_MI1_OP(x) ((x) << S_MI1_OP)
#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP)
#define S_MI1_ADDR_AUTOINC 2
#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC)
#define F_MI1_ADDR_AUTOINC V_MI1_ADDR_AUTOINC(1U)
#define S_MI1_OP_BUSY 31
#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U)
#define A_ELMER0_PORT1_MI1_CFG 0x500000
#define A_ELMER0_PORT1_MI1_ADDR 0x500004
#define A_ELMER0_PORT1_MI1_DATA 0x500008
#define A_ELMER0_PORT1_MI1_OP 0x50000c
#define A_ELMER0_PORT2_MI1_CFG 0x600000
#define A_ELMER0_PORT2_MI1_ADDR 0x600004
#define A_ELMER0_PORT2_MI1_DATA 0x600008
#define A_ELMER0_PORT2_MI1_OP 0x60000c
#define A_ELMER0_PORT3_MI1_CFG 0x700000
#define A_ELMER0_PORT3_MI1_ADDR 0x700004
#define A_ELMER0_PORT3_MI1_DATA 0x700008
#define A_ELMER0_PORT3_MI1_OP 0x70000c
/* Simple bit definition for GPI and GP0 registers. */
#define ELMER0_GP_BIT0 0x0001
#define ELMER0_GP_BIT1 0x0002
#define ELMER0_GP_BIT2 0x0004
#define ELMER0_GP_BIT3 0x0008
#define ELMER0_GP_BIT4 0x0010
#define ELMER0_GP_BIT5 0x0020
#define ELMER0_GP_BIT6 0x0040
#define ELMER0_GP_BIT7 0x0080
#define ELMER0_GP_BIT8 0x0100
#define ELMER0_GP_BIT9 0x0200
#define ELMER0_GP_BIT10 0x0400
#define ELMER0_GP_BIT11 0x0800
#define ELMER0_GP_BIT12 0x1000
#define ELMER0_GP_BIT13 0x2000
#define ELMER0_GP_BIT14 0x4000
#define ELMER0_GP_BIT15 0x8000
#define ELMER0_GP_BIT16 0x10000
#define ELMER0_GP_BIT17 0x20000
#define ELMER0_GP_BIT18 0x40000
#define ELMER0_GP_BIT19 0x80000
#define MI1_OP_DIRECT_WRITE 1
#define MI1_OP_DIRECT_READ 2
#define MI1_OP_INDIRECT_ADDRESS 0
#define MI1_OP_INDIRECT_WRITE 1
#define MI1_OP_INDIRECT_READ_INC 2
#define MI1_OP_INDIRECT_READ 3
#endif /* _CXGB_ELMER0_H_ */

View File

@@ -0,0 +1,373 @@
/*****************************************************************************
* *
* File: espi.c *
* $Revision: 1.14 $ *
* $Date: 2005/05/14 00:59:32 $ *
* Description: *
* Ethernet SPI functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#include "common.h"
#include "regs.h"
#include "espi.h"
struct peespi {
adapter_t *adapter;
struct espi_intr_counts intr_cnt;
u32 misc_ctrl;
spinlock_t lock;
};
#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \
F_RAMPARITYERR | F_DIP2PARITYERR)
#define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \
| F_MONITORED_INTERFACE)
#define TRICN_CNFG 14
#define TRICN_CMD_READ 0x11
#define TRICN_CMD_WRITE 0x21
#define TRICN_CMD_ATTEMPTS 10
static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
int ch_addr, int reg_offset, u32 wr_data)
{
int busy, attempts = TRICN_CMD_ATTEMPTS;
writel(V_WRITE_DATA(wr_data) |
V_REGISTER_OFFSET(reg_offset) |
V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
V_BUNDLE_ADDR(bundle_addr) |
V_SPI4_COMMAND(TRICN_CMD_WRITE),
adapter->regs + A_ESPI_CMD_ADDR);
writel(0, adapter->regs + A_ESPI_GOSTAT);
do {
busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
} while (busy && --attempts);
if (busy)
pr_err("%s: TRICN write timed out\n", adapter->name);
return busy;
}
static int tricn_init(adapter_t *adapter)
{
int i, sme = 1;
if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) {
pr_err("%s: ESPI clock not ready\n", adapter->name);
return -1;
}
writel(F_ESPI_RX_CORE_RST, adapter->regs + A_ESPI_RX_RESET);
if (sme) {
tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
}
for (i = 1; i <= 8; i++)
tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
for (i = 1; i <= 2; i++)
tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
for (i = 1; i <= 3; i++)
tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
tricn_write(adapter, 0, 2, 4, TRICN_CNFG, 0xf1);
tricn_write(adapter, 0, 2, 5, TRICN_CNFG, 0xe1);
tricn_write(adapter, 0, 2, 6, TRICN_CNFG, 0xf1);
tricn_write(adapter, 0, 2, 7, TRICN_CNFG, 0x80);
tricn_write(adapter, 0, 2, 8, TRICN_CNFG, 0xf1);
writel(F_ESPI_RX_CORE_RST | F_ESPI_RX_LNK_RST,
adapter->regs + A_ESPI_RX_RESET);
return 0;
}
void t1_espi_intr_enable(struct peespi *espi)
{
u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
/*
* Cannot enable ESPI interrupts on T1B because HW asserts the
* interrupt incorrectly, namely the driver gets ESPI interrupts
* but no data is actually dropped (can verify this reading the ESPI
* drop registers). Also, once the ESPI interrupt is asserted it
* cannot be cleared (HW bug).
*/
enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE);
writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
}
void t1_espi_intr_clear(struct peespi *espi)
{
readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
}
void t1_espi_intr_disable(struct peespi *espi)
{
u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE);
writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
}
int t1_espi_intr_handler(struct peespi *espi)
{
u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
if (status & F_DIP4ERR)
espi->intr_cnt.DIP4_err++;
if (status & F_RXDROP)
espi->intr_cnt.rx_drops++;
if (status & F_TXDROP)
espi->intr_cnt.tx_drops++;
if (status & F_RXOVERFLOW)
espi->intr_cnt.rx_ovflw++;
if (status & F_RAMPARITYERR)
espi->intr_cnt.parity_err++;
if (status & F_DIP2PARITYERR) {
espi->intr_cnt.DIP2_parity_err++;
/*
* Must read the error count to clear the interrupt
* that it causes.
*/
readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
}
/*
* For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
* write the status as is.
*/
if (status && t1_is_T1B(espi->adapter))
status = 1;
writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
return 0;
}
const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
{
return &espi->intr_cnt;
}
static void espi_setup_for_pm3393(adapter_t *adapter)
{
u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1);
writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3);
writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH);
writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
}
static void espi_setup_for_vsc7321(adapter_t *adapter)
{
writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1);
writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
writel(V_RX_NPORTS(4) | V_TX_NPORTS(4), adapter->regs + A_PORT_CONFIG);
writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
}
/*
* Note that T1B requires at least 2 ports for IXF1010 due to a HW bug.
*/
static void espi_setup_for_ixf1010(adapter_t *adapter, int nports)
{
writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
if (nports == 4) {
if (is_T2(adapter)) {
writel(0xf00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
writel(0x3c0, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
} else {
writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
}
} else {
writel(0x1fff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
}
writel(V_RX_NPORTS(nports) | V_TX_NPORTS(nports), adapter->regs + A_PORT_CONFIG);
}
int t1_espi_init(struct peespi *espi, int mac_type, int nports)
{
u32 status_enable_extra = 0;
adapter_t *adapter = espi->adapter;
/* Disable ESPI training. MACs that can handle it enable it below. */
writel(0, adapter->regs + A_ESPI_TRAIN);
if (is_T2(adapter)) {
writel(V_OUT_OF_SYNC_COUNT(4) |
V_DIP2_PARITY_ERR_THRES(3) |
V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
writel(nports == 4 ? 0x200040 : 0x1000080,
adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
} else
writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
if (mac_type == CHBT_MAC_PM3393)
espi_setup_for_pm3393(adapter);
else if (mac_type == CHBT_MAC_VSC7321)
espi_setup_for_vsc7321(adapter);
else if (mac_type == CHBT_MAC_IXF1010) {
status_enable_extra = F_INTEL1010MODE;
espi_setup_for_ixf1010(adapter, nports);
} else
return -1;
writel(status_enable_extra | F_RXSTATUSENABLE,
adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
if (is_T2(adapter)) {
tricn_init(adapter);
/*
* Always position the control at the 1st port egress IN
* (sop,eop) counter to reduce PIOs for T/N210 workaround.
*/
espi->misc_ctrl = readl(adapter->regs + A_ESPI_MISC_CONTROL);
espi->misc_ctrl &= ~MON_MASK;
espi->misc_ctrl |= F_MONITORED_DIRECTION;
if (adapter->params.nports == 1)
espi->misc_ctrl |= F_MONITORED_INTERFACE;
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
spin_lock_init(&espi->lock);
}
return 0;
}
void t1_espi_destroy(struct peespi *espi)
{
kfree(espi);
}
struct peespi *t1_espi_create(adapter_t *adapter)
{
struct peespi *espi = kzalloc(sizeof(*espi), GFP_KERNEL);
if (espi)
espi->adapter = adapter;
return espi;
}
#if 0
void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
{
struct peespi *espi = adapter->espi;
if (!is_T2(adapter))
return;
spin_lock(&espi->lock);
espi->misc_ctrl = (val & ~MON_MASK) |
(espi->misc_ctrl & MON_MASK);
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
spin_unlock(&espi->lock);
}
#endif /* 0 */
u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
{
struct peespi *espi = adapter->espi;
u32 sel;
if (!is_T2(adapter))
return 0;
sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
if (!wait) {
if (!spin_trylock(&espi->lock))
return 0;
} else
spin_lock(&espi->lock);
if ((sel != (espi->misc_ctrl & MON_MASK))) {
writel(((espi->misc_ctrl & ~MON_MASK) | sel),
adapter->regs + A_ESPI_MISC_CONTROL);
sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
} else
sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
spin_unlock(&espi->lock);
return sel;
}
/*
* This function is for T204 only.
* compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in
* one shot, since there is no per port counter on the out side.
*/
int t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
{
struct peespi *espi = adapter->espi;
u8 i, nport = (u8)adapter->params.nports;
if (!wait) {
if (!spin_trylock(&espi->lock))
return -1;
} else
spin_lock(&espi->lock);
if ((espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION) {
espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) |
F_MONITORED_DIRECTION;
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
}
for (i = 0 ; i < nport; i++, valp++) {
if (i) {
writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i),
adapter->regs + A_ESPI_MISC_CONTROL);
}
*valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
}
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
spin_unlock(&espi->lock);
return 0;
}

View File

@@ -0,0 +1,68 @@
/*****************************************************************************
* *
* File: espi.h *
* $Revision: 1.7 $ *
* $Date: 2005/06/21 18:29:47 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_ESPI_H_
#define _CXGB_ESPI_H_
#include "common.h"
struct espi_intr_counts {
unsigned int DIP4_err;
unsigned int rx_drops;
unsigned int tx_drops;
unsigned int rx_ovflw;
unsigned int parity_err;
unsigned int DIP2_parity_err;
};
struct peespi;
struct peespi *t1_espi_create(adapter_t *adapter);
void t1_espi_destroy(struct peespi *espi);
int t1_espi_init(struct peespi *espi, int mac_type, int nports);
void t1_espi_intr_enable(struct peespi *);
void t1_espi_intr_clear(struct peespi *);
void t1_espi_intr_disable(struct peespi *);
int t1_espi_intr_handler(struct peespi *);
const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
int t1_espi_get_mon_t204(adapter_t *, u32 *, u8);
#endif /* _CXGB_ESPI_H_ */

View File

@@ -0,0 +1,232 @@
/* $Date: 2005/03/07 23:59:05 $ $RCSfile: fpga_defs.h,v $ $Revision: 1.4 $ */
/*
* FPGA specific definitions
*/
#ifndef __CHELSIO_FPGA_DEFS_H__
#define __CHELSIO_FPGA_DEFS_H__
#define FPGA_PCIX_ADDR_VERSION 0xA08
#define FPGA_PCIX_ADDR_STAT 0xA0C
/* FPGA master interrupt Cause/Enable bits */
#define FPGA_PCIX_INTERRUPT_SGE_ERROR 0x1
#define FPGA_PCIX_INTERRUPT_SGE_DATA 0x2
#define FPGA_PCIX_INTERRUPT_TP 0x4
#define FPGA_PCIX_INTERRUPT_MC3 0x8
#define FPGA_PCIX_INTERRUPT_GMAC 0x10
#define FPGA_PCIX_INTERRUPT_PCIX 0x20
/* TP interrupt register addresses */
#define FPGA_TP_ADDR_INTERRUPT_ENABLE 0xA10
#define FPGA_TP_ADDR_INTERRUPT_CAUSE 0xA14
#define FPGA_TP_ADDR_VERSION 0xA18
/* TP interrupt Cause/Enable bits */
#define FPGA_TP_INTERRUPT_MC4 0x1
#define FPGA_TP_INTERRUPT_MC5 0x2
/*
* PM interrupt register addresses
*/
#define FPGA_MC3_REG_INTRENABLE 0xA20
#define FPGA_MC3_REG_INTRCAUSE 0xA24
#define FPGA_MC3_REG_VERSION 0xA28
/*
* GMAC interrupt register addresses
*/
#define FPGA_GMAC_ADDR_INTERRUPT_ENABLE 0xA30
#define FPGA_GMAC_ADDR_INTERRUPT_CAUSE 0xA34
#define FPGA_GMAC_ADDR_VERSION 0xA38
/* GMAC Cause/Enable bits */
#define FPGA_GMAC_INTERRUPT_PORT0 0x1
#define FPGA_GMAC_INTERRUPT_PORT1 0x2
#define FPGA_GMAC_INTERRUPT_PORT2 0x4
#define FPGA_GMAC_INTERRUPT_PORT3 0x8
/* MI0 registers */
#define A_MI0_CLK 0xb00
#define S_MI0_CLK_DIV 0
#define M_MI0_CLK_DIV 0xff
#define V_MI0_CLK_DIV(x) ((x) << S_MI0_CLK_DIV)
#define G_MI0_CLK_DIV(x) (((x) >> S_MI0_CLK_DIV) & M_MI0_CLK_DIV)
#define S_MI0_CLK_CNT 8
#define M_MI0_CLK_CNT 0xff
#define V_MI0_CLK_CNT(x) ((x) << S_MI0_CLK_CNT)
#define G_MI0_CLK_CNT(x) (((x) >> S_MI0_CLK_CNT) & M_MI0_CLK_CNT)
#define A_MI0_CSR 0xb04
#define S_MI0_CSR_POLL 0
#define V_MI0_CSR_POLL(x) ((x) << S_MI0_CSR_POLL)
#define F_MI0_CSR_POLL V_MI0_CSR_POLL(1U)
#define S_MI0_PREAMBLE 1
#define V_MI0_PREAMBLE(x) ((x) << S_MI0_PREAMBLE)
#define F_MI0_PREAMBLE V_MI0_PREAMBLE(1U)
#define S_MI0_INTR_ENABLE 2
#define V_MI0_INTR_ENABLE(x) ((x) << S_MI0_INTR_ENABLE)
#define F_MI0_INTR_ENABLE V_MI0_INTR_ENABLE(1U)
#define S_MI0_BUSY 3
#define V_MI0_BUSY(x) ((x) << S_MI0_BUSY)
#define F_MI0_BUSY V_MI0_BUSY(1U)
#define S_MI0_MDIO 4
#define V_MI0_MDIO(x) ((x) << S_MI0_MDIO)
#define F_MI0_MDIO V_MI0_MDIO(1U)
#define A_MI0_ADDR 0xb08
#define S_MI0_PHY_REG_ADDR 0
#define M_MI0_PHY_REG_ADDR 0x1f
#define V_MI0_PHY_REG_ADDR(x) ((x) << S_MI0_PHY_REG_ADDR)
#define G_MI0_PHY_REG_ADDR(x) (((x) >> S_MI0_PHY_REG_ADDR) & M_MI0_PHY_REG_ADDR)
#define S_MI0_PHY_ADDR 5
#define M_MI0_PHY_ADDR 0x1f
#define V_MI0_PHY_ADDR(x) ((x) << S_MI0_PHY_ADDR)
#define G_MI0_PHY_ADDR(x) (((x) >> S_MI0_PHY_ADDR) & M_MI0_PHY_ADDR)
#define A_MI0_DATA_EXT 0xb0c
#define A_MI0_DATA_INT 0xb10
/* GMAC registers */
#define A_GMAC_MACID_LO 0x28
#define A_GMAC_MACID_HI 0x2c
#define A_GMAC_CSR 0x30
#define S_INTERFACE 0
#define M_INTERFACE 0x3
#define V_INTERFACE(x) ((x) << S_INTERFACE)
#define G_INTERFACE(x) (((x) >> S_INTERFACE) & M_INTERFACE)
#define S_MAC_TX_ENABLE 2
#define V_MAC_TX_ENABLE(x) ((x) << S_MAC_TX_ENABLE)
#define F_MAC_TX_ENABLE V_MAC_TX_ENABLE(1U)
#define S_MAC_RX_ENABLE 3
#define V_MAC_RX_ENABLE(x) ((x) << S_MAC_RX_ENABLE)
#define F_MAC_RX_ENABLE V_MAC_RX_ENABLE(1U)
#define S_MAC_LB_ENABLE 4
#define V_MAC_LB_ENABLE(x) ((x) << S_MAC_LB_ENABLE)
#define F_MAC_LB_ENABLE V_MAC_LB_ENABLE(1U)
#define S_MAC_SPEED 5
#define M_MAC_SPEED 0x3
#define V_MAC_SPEED(x) ((x) << S_MAC_SPEED)
#define G_MAC_SPEED(x) (((x) >> S_MAC_SPEED) & M_MAC_SPEED)
#define S_MAC_HD_FC_ENABLE 7
#define V_MAC_HD_FC_ENABLE(x) ((x) << S_MAC_HD_FC_ENABLE)
#define F_MAC_HD_FC_ENABLE V_MAC_HD_FC_ENABLE(1U)
#define S_MAC_HALF_DUPLEX 8
#define V_MAC_HALF_DUPLEX(x) ((x) << S_MAC_HALF_DUPLEX)
#define F_MAC_HALF_DUPLEX V_MAC_HALF_DUPLEX(1U)
#define S_MAC_PROMISC 9
#define V_MAC_PROMISC(x) ((x) << S_MAC_PROMISC)
#define F_MAC_PROMISC V_MAC_PROMISC(1U)
#define S_MAC_MC_ENABLE 10
#define V_MAC_MC_ENABLE(x) ((x) << S_MAC_MC_ENABLE)
#define F_MAC_MC_ENABLE V_MAC_MC_ENABLE(1U)
#define S_MAC_RESET 11
#define V_MAC_RESET(x) ((x) << S_MAC_RESET)
#define F_MAC_RESET V_MAC_RESET(1U)
#define S_MAC_RX_PAUSE_ENABLE 12
#define V_MAC_RX_PAUSE_ENABLE(x) ((x) << S_MAC_RX_PAUSE_ENABLE)
#define F_MAC_RX_PAUSE_ENABLE V_MAC_RX_PAUSE_ENABLE(1U)
#define S_MAC_TX_PAUSE_ENABLE 13
#define V_MAC_TX_PAUSE_ENABLE(x) ((x) << S_MAC_TX_PAUSE_ENABLE)
#define F_MAC_TX_PAUSE_ENABLE V_MAC_TX_PAUSE_ENABLE(1U)
#define S_MAC_LWM_ENABLE 14
#define V_MAC_LWM_ENABLE(x) ((x) << S_MAC_LWM_ENABLE)
#define F_MAC_LWM_ENABLE V_MAC_LWM_ENABLE(1U)
#define S_MAC_MAGIC_PKT_ENABLE 15
#define V_MAC_MAGIC_PKT_ENABLE(x) ((x) << S_MAC_MAGIC_PKT_ENABLE)
#define F_MAC_MAGIC_PKT_ENABLE V_MAC_MAGIC_PKT_ENABLE(1U)
#define S_MAC_ISL_ENABLE 16
#define V_MAC_ISL_ENABLE(x) ((x) << S_MAC_ISL_ENABLE)
#define F_MAC_ISL_ENABLE V_MAC_ISL_ENABLE(1U)
#define S_MAC_JUMBO_ENABLE 17
#define V_MAC_JUMBO_ENABLE(x) ((x) << S_MAC_JUMBO_ENABLE)
#define F_MAC_JUMBO_ENABLE V_MAC_JUMBO_ENABLE(1U)
#define S_MAC_RX_PAD_ENABLE 18
#define V_MAC_RX_PAD_ENABLE(x) ((x) << S_MAC_RX_PAD_ENABLE)
#define F_MAC_RX_PAD_ENABLE V_MAC_RX_PAD_ENABLE(1U)
#define S_MAC_RX_CRC_ENABLE 19
#define V_MAC_RX_CRC_ENABLE(x) ((x) << S_MAC_RX_CRC_ENABLE)
#define F_MAC_RX_CRC_ENABLE V_MAC_RX_CRC_ENABLE(1U)
#define A_GMAC_IFS 0x34
#define S_MAC_IFS2 0
#define M_MAC_IFS2 0x3f
#define V_MAC_IFS2(x) ((x) << S_MAC_IFS2)
#define G_MAC_IFS2(x) (((x) >> S_MAC_IFS2) & M_MAC_IFS2)
#define S_MAC_IFS1 8
#define M_MAC_IFS1 0x7f
#define V_MAC_IFS1(x) ((x) << S_MAC_IFS1)
#define G_MAC_IFS1(x) (((x) >> S_MAC_IFS1) & M_MAC_IFS1)
#define A_GMAC_JUMBO_FRAME_LEN 0x38
#define A_GMAC_LNK_DLY 0x3c
#define A_GMAC_PAUSETIME 0x40
#define A_GMAC_MCAST_LO 0x44
#define A_GMAC_MCAST_HI 0x48
#define A_GMAC_MCAST_MASK_LO 0x4c
#define A_GMAC_MCAST_MASK_HI 0x50
#define A_GMAC_RMT_CNT 0x54
#define A_GMAC_RMT_DATA 0x58
#define A_GMAC_BACKOFF_SEED 0x5c
#define A_GMAC_TXF_THRES 0x60
#define S_TXF_READ_THRESHOLD 0
#define M_TXF_READ_THRESHOLD 0xff
#define V_TXF_READ_THRESHOLD(x) ((x) << S_TXF_READ_THRESHOLD)
#define G_TXF_READ_THRESHOLD(x) (((x) >> S_TXF_READ_THRESHOLD) & M_TXF_READ_THRESHOLD)
#define S_TXF_WRITE_THRESHOLD 16
#define M_TXF_WRITE_THRESHOLD 0xff
#define V_TXF_WRITE_THRESHOLD(x) ((x) << S_TXF_WRITE_THRESHOLD)
#define G_TXF_WRITE_THRESHOLD(x) (((x) >> S_TXF_WRITE_THRESHOLD) & M_TXF_WRITE_THRESHOLD)
#define MAC_REG_BASE 0x600
#define MAC_REG_ADDR(idx, reg) (MAC_REG_BASE + (idx) * 128 + (reg))
#define MAC_REG_IDLO(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_LO)
#define MAC_REG_IDHI(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_HI)
#define MAC_REG_CSR(idx) MAC_REG_ADDR(idx, A_GMAC_CSR)
#define MAC_REG_IFS(idx) MAC_REG_ADDR(idx, A_GMAC_IFS)
#define MAC_REG_LARGEFRAMELENGTH(idx) MAC_REG_ADDR(idx, A_GMAC_JUMBO_FRAME_LEN)
#define MAC_REG_LINKDLY(idx) MAC_REG_ADDR(idx, A_GMAC_LNK_DLY)
#define MAC_REG_PAUSETIME(idx) MAC_REG_ADDR(idx, A_GMAC_PAUSETIME)
#define MAC_REG_CASTLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_LO)
#define MAC_REG_MCASTHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_HI)
#define MAC_REG_CASTMASKLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_LO)
#define MAC_REG_MCASTMASKHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_HI)
#define MAC_REG_RMCNT(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_CNT)
#define MAC_REG_RMDATA(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_DATA)
#define MAC_REG_GMRANDBACKOFFSEED(idx) MAC_REG_ADDR(idx, A_GMAC_BACKOFF_SEED)
#define MAC_REG_TXFTHRESHOLDS(idx) MAC_REG_ADDR(idx, A_GMAC_TXF_THRES)
#endif

View File

@@ -0,0 +1,142 @@
/*****************************************************************************
* *
* File: gmac.h *
* $Revision: 1.6 $ *
* $Date: 2005/06/21 18:29:47 $ *
* Description: *
* Generic MAC functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_GMAC_H_
#define _CXGB_GMAC_H_
#include "common.h"
enum {
MAC_STATS_UPDATE_FAST,
MAC_STATS_UPDATE_FULL
};
enum {
MAC_DIRECTION_RX = 1,
MAC_DIRECTION_TX = 2
};
struct cmac_statistics {
/* Transmit */
u64 TxOctetsOK;
u64 TxOctetsBad;
u64 TxUnicastFramesOK;
u64 TxMulticastFramesOK;
u64 TxBroadcastFramesOK;
u64 TxPauseFrames;
u64 TxFramesWithDeferredXmissions;
u64 TxLateCollisions;
u64 TxTotalCollisions;
u64 TxFramesAbortedDueToXSCollisions;
u64 TxUnderrun;
u64 TxLengthErrors;
u64 TxInternalMACXmitError;
u64 TxFramesWithExcessiveDeferral;
u64 TxFCSErrors;
u64 TxJumboFramesOK;
u64 TxJumboOctetsOK;
/* Receive */
u64 RxOctetsOK;
u64 RxOctetsBad;
u64 RxUnicastFramesOK;
u64 RxMulticastFramesOK;
u64 RxBroadcastFramesOK;
u64 RxPauseFrames;
u64 RxFCSErrors;
u64 RxAlignErrors;
u64 RxSymbolErrors;
u64 RxDataErrors;
u64 RxSequenceErrors;
u64 RxRuntErrors;
u64 RxJabberErrors;
u64 RxInternalMACRcvError;
u64 RxInRangeLengthErrors;
u64 RxOutOfRangeLengthField;
u64 RxFrameTooLongErrors;
u64 RxJumboFramesOK;
u64 RxJumboOctetsOK;
};
struct cmac_ops {
void (*destroy)(struct cmac *);
int (*reset)(struct cmac *);
int (*interrupt_enable)(struct cmac *);
int (*interrupt_disable)(struct cmac *);
int (*interrupt_clear)(struct cmac *);
int (*interrupt_handler)(struct cmac *);
int (*enable)(struct cmac *, int);
int (*disable)(struct cmac *, int);
int (*loopback_enable)(struct cmac *);
int (*loopback_disable)(struct cmac *);
int (*set_mtu)(struct cmac *, int mtu);
int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm);
int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc);
int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex,
int *fc);
const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
};
typedef struct _cmac_instance cmac_instance;
struct cmac {
struct cmac_statistics stats;
adapter_t *adapter;
const struct cmac_ops *ops;
cmac_instance *instance;
};
struct gmac {
unsigned int stats_update_period;
struct cmac *(*create)(adapter_t *adapter, int index);
int (*reset)(adapter_t *);
};
extern const struct gmac t1_pm3393_ops;
extern const struct gmac t1_vsc7326_ops;
#endif /* _CXGB_GMAC_H_ */

View File

@@ -0,0 +1,397 @@
/* $Date: 2005/10/24 23:18:13 $ $RCSfile: mv88e1xxx.c,v $ $Revision: 1.49 $ */
#include "common.h"
#include "mv88e1xxx.h"
#include "cphy.h"
#include "elmer0.h"
/* MV88E1XXX MDI crossover register values */
#define CROSSOVER_MDI 0
#define CROSSOVER_MDIX 1
#define CROSSOVER_AUTO 3
#define INTR_ENABLE_MASK 0x6CA0
/*
* Set the bits given by 'bitval' in PHY register 'reg'.
*/
static void mdio_set_bit(struct cphy *cphy, int reg, u32 bitval)
{
u32 val;
(void) simple_mdio_read(cphy, reg, &val);
(void) simple_mdio_write(cphy, reg, val | bitval);
}
/*
* Clear the bits given by 'bitval' in PHY register 'reg'.
*/
static void mdio_clear_bit(struct cphy *cphy, int reg, u32 bitval)
{
u32 val;
(void) simple_mdio_read(cphy, reg, &val);
(void) simple_mdio_write(cphy, reg, val & ~bitval);
}
/*
* NAME: phy_reset
*
* DESC: Reset the given PHY's port. NOTE: This is not a global
* chip reset.
*
* PARAMS: cphy - Pointer to PHY instance data.
*
* RETURN: 0 - Successful reset.
* -1 - Timeout.
*/
static int mv88e1xxx_reset(struct cphy *cphy, int wait)
{
u32 ctl;
int time_out = 1000;
mdio_set_bit(cphy, MII_BMCR, BMCR_RESET);
do {
(void) simple_mdio_read(cphy, MII_BMCR, &ctl);
ctl &= BMCR_RESET;
if (ctl)
udelay(1);
} while (ctl && --time_out);
return ctl ? -1 : 0;
}
static int mv88e1xxx_interrupt_enable(struct cphy *cphy)
{
/* Enable PHY interrupts. */
(void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER,
INTR_ENABLE_MASK);
/* Enable Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
u32 elmer;
t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer |= ELMER0_GP_BIT1;
if (is_T2(cphy->adapter))
elmer |= ELMER0_GP_BIT2 | ELMER0_GP_BIT3 | ELMER0_GP_BIT4;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
}
return 0;
}
static int mv88e1xxx_interrupt_disable(struct cphy *cphy)
{
/* Disable all phy interrupts. */
(void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, 0);
/* Disable Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
u32 elmer;
t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer &= ~ELMER0_GP_BIT1;
if (is_T2(cphy->adapter))
elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
}
return 0;
}
static int mv88e1xxx_interrupt_clear(struct cphy *cphy)
{
u32 elmer;
/* Clear PHY interrupts by reading the register. */
(void) simple_mdio_read(cphy,
MV88E1XXX_INTERRUPT_STATUS_REGISTER, &elmer);
/* Clear Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
elmer |= ELMER0_GP_BIT1;
if (is_T2(cphy->adapter))
elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
}
return 0;
}
/*
* Set the PHY speed and duplex. This also disables auto-negotiation, except
* for 1Gb/s, where auto-negotiation is mandatory.
*/
static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex)
{
u32 ctl;
(void) simple_mdio_read(phy, MII_BMCR, &ctl);
if (speed >= 0) {
ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
if (speed == SPEED_100)
ctl |= BMCR_SPEED100;
else if (speed == SPEED_1000)
ctl |= BMCR_SPEED1000;
}
if (duplex >= 0) {
ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
if (duplex == DUPLEX_FULL)
ctl |= BMCR_FULLDPLX;
}
if (ctl & BMCR_SPEED1000) /* auto-negotiation required for 1Gb/s */
ctl |= BMCR_ANENABLE;
(void) simple_mdio_write(phy, MII_BMCR, ctl);
return 0;
}
static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover)
{
u32 data32;
(void) simple_mdio_read(cphy,
MV88E1XXX_SPECIFIC_CNTRL_REGISTER, &data32);
data32 &= ~V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE);
data32 |= V_PSCR_MDI_XOVER_MODE(crossover);
(void) simple_mdio_write(cphy,
MV88E1XXX_SPECIFIC_CNTRL_REGISTER, data32);
return 0;
}
static int mv88e1xxx_autoneg_enable(struct cphy *cphy)
{
u32 ctl;
(void) mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO);
(void) simple_mdio_read(cphy, MII_BMCR, &ctl);
/* restart autoneg for change to take effect */
ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
(void) simple_mdio_write(cphy, MII_BMCR, ctl);
return 0;
}
static int mv88e1xxx_autoneg_disable(struct cphy *cphy)
{
u32 ctl;
/*
* Crossover *must* be set to manual in order to disable auto-neg.
* The Alaska FAQs document highlights this point.
*/
(void) mv88e1xxx_crossover_set(cphy, CROSSOVER_MDI);
/*
* Must include autoneg reset when disabling auto-neg. This
* is described in the Alaska FAQ document.
*/
(void) simple_mdio_read(cphy, MII_BMCR, &ctl);
ctl &= ~BMCR_ANENABLE;
(void) simple_mdio_write(cphy, MII_BMCR, ctl | BMCR_ANRESTART);
return 0;
}
static int mv88e1xxx_autoneg_restart(struct cphy *cphy)
{
mdio_set_bit(cphy, MII_BMCR, BMCR_ANRESTART);
return 0;
}
static int mv88e1xxx_advertise(struct cphy *phy, unsigned int advertise_map)
{
u32 val = 0;
if (advertise_map &
(ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
(void) simple_mdio_read(phy, MII_GBCR, &val);
val &= ~(GBCR_ADV_1000HALF | GBCR_ADV_1000FULL);
if (advertise_map & ADVERTISED_1000baseT_Half)
val |= GBCR_ADV_1000HALF;
if (advertise_map & ADVERTISED_1000baseT_Full)
val |= GBCR_ADV_1000FULL;
}
(void) simple_mdio_write(phy, MII_GBCR, val);
val = 1;
if (advertise_map & ADVERTISED_10baseT_Half)
val |= ADVERTISE_10HALF;
if (advertise_map & ADVERTISED_10baseT_Full)
val |= ADVERTISE_10FULL;
if (advertise_map & ADVERTISED_100baseT_Half)
val |= ADVERTISE_100HALF;
if (advertise_map & ADVERTISED_100baseT_Full)
val |= ADVERTISE_100FULL;
if (advertise_map & ADVERTISED_PAUSE)
val |= ADVERTISE_PAUSE;
if (advertise_map & ADVERTISED_ASYM_PAUSE)
val |= ADVERTISE_PAUSE_ASYM;
(void) simple_mdio_write(phy, MII_ADVERTISE, val);
return 0;
}
static int mv88e1xxx_set_loopback(struct cphy *cphy, int on)
{
if (on)
mdio_set_bit(cphy, MII_BMCR, BMCR_LOOPBACK);
else
mdio_clear_bit(cphy, MII_BMCR, BMCR_LOOPBACK);
return 0;
}
static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok,
int *speed, int *duplex, int *fc)
{
u32 status;
int sp = -1, dplx = -1, pause = 0;
(void) simple_mdio_read(cphy,
MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
if ((status & V_PSSR_STATUS_RESOLVED) != 0) {
if (status & V_PSSR_RX_PAUSE)
pause |= PAUSE_RX;
if (status & V_PSSR_TX_PAUSE)
pause |= PAUSE_TX;
dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
sp = G_PSSR_SPEED(status);
if (sp == 0)
sp = SPEED_10;
else if (sp == 1)
sp = SPEED_100;
else
sp = SPEED_1000;
}
if (link_ok)
*link_ok = (status & V_PSSR_LINK) != 0;
if (speed)
*speed = sp;
if (duplex)
*duplex = dplx;
if (fc)
*fc = pause;
return 0;
}
static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable)
{
u32 val;
(void) simple_mdio_read(cphy,
MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, &val);
/*
* Set the downshift counter to 2 so we try to establish Gb link
* twice before downshifting.
*/
val &= ~(V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT));
if (downshift_enable)
val |= V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2);
(void) simple_mdio_write(cphy,
MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, val);
return 0;
}
static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
{
int cphy_cause = 0;
u32 status;
/*
* Loop until cause reads zero. Need to handle bouncing interrupts.
*/
while (1) {
u32 cause;
(void) simple_mdio_read(cphy,
MV88E1XXX_INTERRUPT_STATUS_REGISTER,
&cause);
cause &= INTR_ENABLE_MASK;
if (!cause)
break;
if (cause & MV88E1XXX_INTR_LINK_CHNG) {
(void) simple_mdio_read(cphy,
MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
if (status & MV88E1XXX_INTR_LINK_CHNG)
cphy->state |= PHY_LINK_UP;
else {
cphy->state &= ~PHY_LINK_UP;
if (cphy->state & PHY_AUTONEG_EN)
cphy->state &= ~PHY_AUTONEG_RDY;
cphy_cause |= cphy_cause_link_change;
}
}
if (cause & MV88E1XXX_INTR_AUTONEG_DONE)
cphy->state |= PHY_AUTONEG_RDY;
if ((cphy->state & (PHY_LINK_UP | PHY_AUTONEG_RDY)) ==
(PHY_LINK_UP | PHY_AUTONEG_RDY))
cphy_cause |= cphy_cause_link_change;
}
return cphy_cause;
}
static void mv88e1xxx_destroy(struct cphy *cphy)
{
kfree(cphy);
}
static struct cphy_ops mv88e1xxx_ops = {
.destroy = mv88e1xxx_destroy,
.reset = mv88e1xxx_reset,
.interrupt_enable = mv88e1xxx_interrupt_enable,
.interrupt_disable = mv88e1xxx_interrupt_disable,
.interrupt_clear = mv88e1xxx_interrupt_clear,
.interrupt_handler = mv88e1xxx_interrupt_handler,
.autoneg_enable = mv88e1xxx_autoneg_enable,
.autoneg_disable = mv88e1xxx_autoneg_disable,
.autoneg_restart = mv88e1xxx_autoneg_restart,
.advertise = mv88e1xxx_advertise,
.set_loopback = mv88e1xxx_set_loopback,
.set_speed_duplex = mv88e1xxx_set_speed_duplex,
.get_link_status = mv88e1xxx_get_link_status,
};
static struct cphy *mv88e1xxx_phy_create(struct net_device *dev, int phy_addr,
const struct mdio_ops *mdio_ops)
{
struct adapter *adapter = netdev_priv(dev);
struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
if (!cphy)
return NULL;
cphy_init(cphy, dev, phy_addr, &mv88e1xxx_ops, mdio_ops);
/* Configure particular PHY's to run in a different mode. */
if ((board_info(adapter)->caps & SUPPORTED_TP) &&
board_info(adapter)->chip_phy == CHBT_PHY_88E1111) {
/*
* Configure the PHY transmitter as class A to reduce EMI.
*/
(void) simple_mdio_write(cphy,
MV88E1XXX_EXTENDED_ADDR_REGISTER, 0xB);
(void) simple_mdio_write(cphy,
MV88E1XXX_EXTENDED_REGISTER, 0x8004);
}
(void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */
/* LED */
if (is_T2(adapter)) {
(void) simple_mdio_write(cphy,
MV88E1XXX_LED_CONTROL_REGISTER, 0x1);
}
return cphy;
}
static int mv88e1xxx_phy_reset(adapter_t* adapter)
{
return 0;
}
const struct gphy t1_mv88e1xxx_ops = {
.create = mv88e1xxx_phy_create,
.reset = mv88e1xxx_phy_reset
};

View File

@@ -0,0 +1,127 @@
/* $Date: 2005/03/07 23:59:05 $ $RCSfile: mv88e1xxx.h,v $ $Revision: 1.13 $ */
#ifndef CHELSIO_MV8E1XXX_H
#define CHELSIO_MV8E1XXX_H
#ifndef BMCR_SPEED1000
# define BMCR_SPEED1000 0x40
#endif
#ifndef ADVERTISE_PAUSE
# define ADVERTISE_PAUSE 0x400
#endif
#ifndef ADVERTISE_PAUSE_ASYM
# define ADVERTISE_PAUSE_ASYM 0x800
#endif
/* Gigabit MII registers */
#define MII_GBCR 9 /* 1000Base-T control register */
#define MII_GBSR 10 /* 1000Base-T status register */
/* 1000Base-T control register fields */
#define GBCR_ADV_1000HALF 0x100
#define GBCR_ADV_1000FULL 0x200
#define GBCR_PREFER_MASTER 0x400
#define GBCR_MANUAL_AS_MASTER 0x800
#define GBCR_MANUAL_CONFIG_ENABLE 0x1000
/* 1000Base-T status register fields */
#define GBSR_LP_1000HALF 0x400
#define GBSR_LP_1000FULL 0x800
#define GBSR_REMOTE_OK 0x1000
#define GBSR_LOCAL_OK 0x2000
#define GBSR_LOCAL_MASTER 0x4000
#define GBSR_MASTER_FAULT 0x8000
/* Marvell PHY interrupt status bits. */
#define MV88E1XXX_INTR_JABBER 0x0001
#define MV88E1XXX_INTR_POLARITY_CHNG 0x0002
#define MV88E1XXX_INTR_ENG_DETECT_CHNG 0x0010
#define MV88E1XXX_INTR_DOWNSHIFT 0x0020
#define MV88E1XXX_INTR_MDI_XOVER_CHNG 0x0040
#define MV88E1XXX_INTR_FIFO_OVER_UNDER 0x0080
#define MV88E1XXX_INTR_FALSE_CARRIER 0x0100
#define MV88E1XXX_INTR_SYMBOL_ERROR 0x0200
#define MV88E1XXX_INTR_LINK_CHNG 0x0400
#define MV88E1XXX_INTR_AUTONEG_DONE 0x0800
#define MV88E1XXX_INTR_PAGE_RECV 0x1000
#define MV88E1XXX_INTR_DUPLEX_CHNG 0x2000
#define MV88E1XXX_INTR_SPEED_CHNG 0x4000
#define MV88E1XXX_INTR_AUTONEG_ERR 0x8000
/* Marvell PHY specific registers. */
#define MV88E1XXX_SPECIFIC_CNTRL_REGISTER 16
#define MV88E1XXX_SPECIFIC_STATUS_REGISTER 17
#define MV88E1XXX_INTERRUPT_ENABLE_REGISTER 18
#define MV88E1XXX_INTERRUPT_STATUS_REGISTER 19
#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER 20
#define MV88E1XXX_RECV_ERR_CNTR_REGISTER 21
#define MV88E1XXX_RES_REGISTER 22
#define MV88E1XXX_GLOBAL_STATUS_REGISTER 23
#define MV88E1XXX_LED_CONTROL_REGISTER 24
#define MV88E1XXX_MANUAL_LED_OVERRIDE_REGISTER 25
#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER 26
#define MV88E1XXX_EXT_PHY_SPECIFIC_STATUS_REGISTER 27
#define MV88E1XXX_VIRTUAL_CABLE_TESTER_REGISTER 28
#define MV88E1XXX_EXTENDED_ADDR_REGISTER 29
#define MV88E1XXX_EXTENDED_REGISTER 30
/* PHY specific control register fields */
#define S_PSCR_MDI_XOVER_MODE 5
#define M_PSCR_MDI_XOVER_MODE 0x3
#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE)
#define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE)
/* Extended PHY specific control register fields */
#define S_DOWNSHIFT_ENABLE 8
#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE)
#define S_DOWNSHIFT_CNT 9
#define M_DOWNSHIFT_CNT 0x7
#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT)
#define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT)
/* PHY specific status register fields */
#define S_PSSR_JABBER 0
#define V_PSSR_JABBER (1 << S_PSSR_JABBER)
#define S_PSSR_POLARITY 1
#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY)
#define S_PSSR_RX_PAUSE 2
#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE)
#define S_PSSR_TX_PAUSE 3
#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE)
#define S_PSSR_ENERGY_DETECT 4
#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT)
#define S_PSSR_DOWNSHIFT_STATUS 5
#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS)
#define S_PSSR_MDI 6
#define V_PSSR_MDI (1 << S_PSSR_MDI)
#define S_PSSR_CABLE_LEN 7
#define M_PSSR_CABLE_LEN 0x7
#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN)
#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN)
#define S_PSSR_LINK 10
#define V_PSSR_LINK (1 << S_PSSR_LINK)
#define S_PSSR_STATUS_RESOLVED 11
#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED)
#define S_PSSR_PAGE_RECEIVED 12
#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED)
#define S_PSSR_DUPLEX 13
#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX)
#define S_PSSR_SPEED 14
#define M_PSSR_SPEED 0x3
#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED)
#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED)
#endif

View File

@@ -0,0 +1,260 @@
/*****************************************************************************
* *
* File: mv88x201x.c *
* $Revision: 1.12 $ *
* $Date: 2005/04/15 19:27:14 $ *
* Description: *
* Marvell PHY (mv88x201x) functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#include "cphy.h"
#include "elmer0.h"
/*
* The 88x2010 Rev C. requires some link status registers * to be read
* twice in order to get the right values. Future * revisions will fix
* this problem and then this macro * can disappear.
*/
#define MV88x2010_LINK_STATUS_BUGS 1
static int led_init(struct cphy *cphy)
{
/* Setup the LED registers so we can turn on/off.
* Writing these bits maps control to another
* register. mmd(0x1) addr(0x7)
*/
cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8304, 0xdddd);
return 0;
}
static int led_link(struct cphy *cphy, u32 do_enable)
{
u32 led = 0;
#define LINK_ENABLE_BIT 0x1
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, &led);
if (do_enable & LINK_ENABLE_BIT) {
led |= LINK_ENABLE_BIT;
cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led);
} else {
led &= ~LINK_ENABLE_BIT;
cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led);
}
return 0;
}
/* Port Reset */
static int mv88x201x_reset(struct cphy *cphy, int wait)
{
/* This can be done through registers. It is not required since
* a full chip reset is used.
*/
return 0;
}
static int mv88x201x_interrupt_enable(struct cphy *cphy)
{
/* Enable PHY LASI interrupts. */
cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
MDIO_PMA_LASI_LSALARM);
/* Enable Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
u32 elmer;
t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer |= ELMER0_GP_BIT6;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
}
return 0;
}
static int mv88x201x_interrupt_disable(struct cphy *cphy)
{
/* Disable PHY LASI interrupts. */
cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0x0);
/* Disable Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
u32 elmer;
t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer &= ~ELMER0_GP_BIT6;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
}
return 0;
}
static int mv88x201x_interrupt_clear(struct cphy *cphy)
{
u32 elmer;
u32 val;
#ifdef MV88x2010_LINK_STATUS_BUGS
/* Required to read twice before clear takes affect. */
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val);
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val);
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
/* Read this register after the others above it else
* the register doesn't clear correctly.
*/
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
#endif
/* Clear link status. */
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
/* Clear PHY LASI interrupts. */
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
#ifdef MV88x2010_LINK_STATUS_BUGS
/* Do it again. */
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val);
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val);
#endif
/* Clear Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
elmer |= ELMER0_GP_BIT6;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
}
return 0;
}
static int mv88x201x_interrupt_handler(struct cphy *cphy)
{
/* Clear interrupts */
mv88x201x_interrupt_clear(cphy);
/* We have only enabled link change interrupts and so
* cphy_cause must be a link change interrupt.
*/
return cphy_cause_link_change;
}
static int mv88x201x_set_loopback(struct cphy *cphy, int on)
{
return 0;
}
static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
int *speed, int *duplex, int *fc)
{
u32 val = 0;
if (link_ok) {
/* Read link status. */
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
val &= MDIO_STAT1_LSTATUS;
*link_ok = (val == MDIO_STAT1_LSTATUS);
/* Turn on/off Link LED */
led_link(cphy, *link_ok);
}
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
if (fc)
*fc = PAUSE_RX | PAUSE_TX;
return 0;
}
static void mv88x201x_destroy(struct cphy *cphy)
{
kfree(cphy);
}
static struct cphy_ops mv88x201x_ops = {
.destroy = mv88x201x_destroy,
.reset = mv88x201x_reset,
.interrupt_enable = mv88x201x_interrupt_enable,
.interrupt_disable = mv88x201x_interrupt_disable,
.interrupt_clear = mv88x201x_interrupt_clear,
.interrupt_handler = mv88x201x_interrupt_handler,
.get_link_status = mv88x201x_get_link_status,
.set_loopback = mv88x201x_set_loopback,
.mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS |
MDIO_DEVS_PHYXS | MDIO_DEVS_WIS),
};
static struct cphy *mv88x201x_phy_create(struct net_device *dev, int phy_addr,
const struct mdio_ops *mdio_ops)
{
u32 val;
struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
if (!cphy)
return NULL;
cphy_init(cphy, dev, phy_addr, &mv88x201x_ops, mdio_ops);
/* Commands the PHY to enable XFP's clock. */
cphy_mdio_read(cphy, MDIO_MMD_PCS, 0x8300, &val);
cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8300, val | 1);
/* Clear link status. Required because of a bug in the PHY. */
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT2, &val);
cphy_mdio_read(cphy, MDIO_MMD_PCS, MDIO_STAT2, &val);
/* Allows for Link,Ack LED turn on/off */
led_init(cphy);
return cphy;
}
/* Chip Reset */
static int mv88x201x_phy_reset(adapter_t *adapter)
{
u32 val;
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~4;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
msleep(100);
t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
msleep(1000);
/* Now lets enable the Laser. Delay 100us */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val |= 0x8000;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(100);
return 0;
}
const struct gphy t1_mv88x201x_ops = {
.create = mv88x201x_phy_create,
.reset = mv88x201x_phy_reset
};

View File

@@ -0,0 +1,209 @@
/* $Date: 2005/11/12 02:13:49 $ $RCSfile: my3126.c,v $ $Revision: 1.15 $ */
#include "cphy.h"
#include "elmer0.h"
#include "suni1x10gexp_regs.h"
/* Port Reset */
static int my3126_reset(struct cphy *cphy, int wait)
{
/*
* This can be done through registers. It is not required since
* a full chip reset is used.
*/
return 0;
}
static int my3126_interrupt_enable(struct cphy *cphy)
{
schedule_delayed_work(&cphy->phy_update, HZ/30);
t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo);
return 0;
}
static int my3126_interrupt_disable(struct cphy *cphy)
{
cancel_delayed_work_sync(&cphy->phy_update);
return 0;
}
static int my3126_interrupt_clear(struct cphy *cphy)
{
return 0;
}
#define OFFSET(REG_ADDR) (REG_ADDR << 2)
static int my3126_interrupt_handler(struct cphy *cphy)
{
u32 val;
u16 val16;
u16 status;
u32 act_count;
adapter_t *adapter;
adapter = cphy->adapter;
if (cphy->count == 50) {
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
val16 = (u16) val;
status = cphy->bmsr ^ val16;
if (status & MDIO_STAT1_LSTATUS)
t1_link_changed(adapter, 0);
cphy->bmsr = val16;
/* We have only enabled link change interrupts so it
must be that
*/
cphy->count = 0;
}
t1_tpi_write(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_CONTROL),
SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
t1_tpi_read(adapter,
OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW), &act_count);
t1_tpi_read(adapter,
OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW), &val);
act_count += val;
/* Populate elmer_gpo with the register value */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
cphy->elmer_gpo = val;
if ( (val & (1 << 8)) || (val & (1 << 19)) ||
(cphy->act_count == act_count) || cphy->act_on ) {
if (is_T2(adapter))
val |= (1 << 9);
else if (t1_is_T1B(adapter))
val |= (1 << 20);
cphy->act_on = 0;
} else {
if (is_T2(adapter))
val &= ~(1 << 9);
else if (t1_is_T1B(adapter))
val &= ~(1 << 20);
cphy->act_on = 1;
}
t1_tpi_write(adapter, A_ELMER0_GPO, val);
cphy->elmer_gpo = val;
cphy->act_count = act_count;
cphy->count++;
return cphy_cause_link_change;
}
static void my3216_poll(struct work_struct *work)
{
struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
my3126_interrupt_handler(cphy);
}
static int my3126_set_loopback(struct cphy *cphy, int on)
{
return 0;
}
/* To check the activity LED */
static int my3126_get_link_status(struct cphy *cphy,
int *link_ok, int *speed, int *duplex, int *fc)
{
u32 val;
u16 val16;
adapter_t *adapter;
adapter = cphy->adapter;
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
val16 = (u16) val;
/* Populate elmer_gpo with the register value */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
cphy->elmer_gpo = val;
*link_ok = (val16 & MDIO_STAT1_LSTATUS);
if (*link_ok) {
/* Turn on the LED. */
if (is_T2(adapter))
val &= ~(1 << 8);
else if (t1_is_T1B(adapter))
val &= ~(1 << 19);
} else {
/* Turn off the LED. */
if (is_T2(adapter))
val |= (1 << 8);
else if (t1_is_T1B(adapter))
val |= (1 << 19);
}
t1_tpi_write(adapter, A_ELMER0_GPO, val);
cphy->elmer_gpo = val;
*speed = SPEED_10000;
*duplex = DUPLEX_FULL;
/* need to add flow control */
if (fc)
*fc = PAUSE_RX | PAUSE_TX;
return 0;
}
static void my3126_destroy(struct cphy *cphy)
{
kfree(cphy);
}
static struct cphy_ops my3126_ops = {
.destroy = my3126_destroy,
.reset = my3126_reset,
.interrupt_enable = my3126_interrupt_enable,
.interrupt_disable = my3126_interrupt_disable,
.interrupt_clear = my3126_interrupt_clear,
.interrupt_handler = my3126_interrupt_handler,
.get_link_status = my3126_get_link_status,
.set_loopback = my3126_set_loopback,
.mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS |
MDIO_DEVS_PHYXS),
};
static struct cphy *my3126_phy_create(struct net_device *dev,
int phy_addr, const struct mdio_ops *mdio_ops)
{
struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL);
if (!cphy)
return NULL;
cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops);
INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
cphy->bmsr = 0;
return cphy;
}
/* Chip Reset */
static int my3126_phy_reset(adapter_t * adapter)
{
u32 val;
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~4;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
msleep(100);
t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
msleep(1000);
/* Now lets enable the Laser. Delay 100us */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val |= 0x8000;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(100);
return 0;
}
const struct gphy t1_my3126_ops = {
.create = my3126_phy_create,
.reset = my3126_phy_reset
};

View File

@@ -0,0 +1,796 @@
/*****************************************************************************
* *
* File: pm3393.c *
* $Revision: 1.16 $ *
* $Date: 2005/05/14 00:59:32 $ *
* Description: *
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#include "common.h"
#include "regs.h"
#include "gmac.h"
#include "elmer0.h"
#include "suni1x10gexp_regs.h"
#include <linux/crc32.h>
#include <linux/slab.h>
#define OFFSET(REG_ADDR) ((REG_ADDR) << 2)
/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
#define MAX_FRAME_SIZE 9600
#define IPG 12
#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
SUNI1x10GEXP_BITMSK_TXXG_PADEN)
#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
/* Update statistics every 15 minutes */
#define STATS_TICK_SECS (15 * 60)
enum { /* RMON registers */
RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW,
RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW,
TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW,
TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW,
TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW
};
struct _cmac_instance {
u8 enabled;
u8 fc;
u8 mac_addr[6];
};
static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
{
t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
return 0;
}
static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
{
t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
return 0;
}
/* Port reset. */
static int pm3393_reset(struct cmac *cmac)
{
return 0;
}
/*
* Enable interrupts for the PM3393
*
* 1. Enable PM3393 BLOCK interrupts.
* 2. Enable PM3393 Master Interrupt bit(INTE)
* 3. Enable ELMER's PM3393 bit.
* 4. Enable Terminator external interrupt.
*/
static int pm3393_interrupt_enable(struct cmac *cmac)
{
u32 pl_intr;
/* PM3393 - Enabling all hardware block interrupts.
*/
pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
/* Don't interrupt on statistics overflow, we are polling */
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
/* PM3393 - Global interrupt enable
*/
/* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
/* TERMINATOR - PL_INTERUPTS_EXT */
pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
return 0;
}
static int pm3393_interrupt_disable(struct cmac *cmac)
{
u32 elmer;
/* PM3393 - Enabling HW interrupt blocks. */
pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
/* PM3393 - Global interrupt enable */
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
/* ELMER - External chip interrupts. */
t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer &= ~ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
/* TERMINATOR - PL_INTERUPTS_EXT */
/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
* COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
*/
return 0;
}
static int pm3393_interrupt_clear(struct cmac *cmac)
{
u32 elmer;
u32 pl_intr;
u32 val32;
/* PM3393 - Clearing HW interrupt blocks. Note, this assumes
* bit WCIMODE=0 for a clear-on-read.
*/
pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
&val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
/* PM3393 - Global interrupt status
*/
pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
/* ELMER - External chip interrupts.
*/
t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
elmer |= ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
/* TERMINATOR - PL_INTERUPTS_EXT
*/
pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
return 0;
}
/* Interrupt handler */
static int pm3393_interrupt_handler(struct cmac *cmac)
{
u32 master_intr_status;
/* Read the master interrupt status register. */
pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
&master_intr_status);
if (netif_msg_intr(cmac->adapter))
dev_dbg(&cmac->adapter->pdev->dev, "PM3393 intr cause 0x%x\n",
master_intr_status);
/* TBD XXX Lets just clear everything for now */
pm3393_interrupt_clear(cmac);
return 0;
}
static int pm3393_enable(struct cmac *cmac, int which)
{
if (which & MAC_DIRECTION_RX)
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
(RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
if (which & MAC_DIRECTION_TX) {
u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
if (cmac->instance->fc & PAUSE_RX)
val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
if (cmac->instance->fc & PAUSE_TX)
val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
}
cmac->instance->enabled |= which;
return 0;
}
static int pm3393_enable_port(struct cmac *cmac, int which)
{
/* Clear port statistics */
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
udelay(2);
memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
pm3393_enable(cmac, which);
/*
* XXX This should be done by the PHY and preferably not at all.
* The PHY doesn't give us link status indication on its own so have
* the link management code query it instead.
*/
t1_link_changed(cmac->adapter, 0);
return 0;
}
static int pm3393_disable(struct cmac *cmac, int which)
{
if (which & MAC_DIRECTION_RX)
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
if (which & MAC_DIRECTION_TX)
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
/*
* The disable is graceful. Give the PM3393 time. Can't wait very
* long here, we may be holding locks.
*/
udelay(20);
cmac->instance->enabled &= ~which;
return 0;
}
static int pm3393_loopback_enable(struct cmac *cmac)
{
return 0;
}
static int pm3393_loopback_disable(struct cmac *cmac)
{
return 0;
}
static int pm3393_set_mtu(struct cmac *cmac, int mtu)
{
int enabled = cmac->instance->enabled;
/* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
mtu += 14 + 4;
if (mtu > MAX_FRAME_SIZE)
return -EINVAL;
/* Disable Rx/Tx MAC before configuring it. */
if (enabled)
pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
if (enabled)
pm3393_enable(cmac, enabled);
return 0;
}
static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
{
int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
u32 rx_mode;
/* Disable MAC RX before reconfiguring it */
if (enabled)
pm3393_disable(cmac, MAC_DIRECTION_RX);
pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
(u16)rx_mode);
if (t1_rx_mode_promisc(rm)) {
/* Promiscuous mode. */
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
}
if (t1_rx_mode_allmulti(rm)) {
/* Accept all multicast. */
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
} else if (t1_rx_mode_mc_cnt(rm)) {
/* Accept one or more multicast(s). */
struct netdev_hw_addr *ha;
int bit;
u16 mc_filter[4] = { 0, };
netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) {
/* bit[23:28] */
bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f;
mc_filter[bit >> 4] |= 1 << (bit & 0xf);
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
if (enabled)
pm3393_enable(cmac, MAC_DIRECTION_RX);
return 0;
}
static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
int *duplex, int *fc)
{
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
if (fc)
*fc = cmac->instance->fc;
return 0;
}
static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
int fc)
{
if (speed >= 0 && speed != SPEED_10000)
return -1;
if (duplex >= 0 && duplex != DUPLEX_FULL)
return -1;
if (fc & ~(PAUSE_TX | PAUSE_RX))
return -1;
if (fc != cmac->instance->fc) {
cmac->instance->fc = (u8) fc;
if (cmac->instance->enabled & MAC_DIRECTION_TX)
pm3393_enable(cmac, MAC_DIRECTION_TX);
}
return 0;
}
#define RMON_UPDATE(mac, name, stat_name) \
{ \
t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \
t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \
(mac)->stats.stat_name = (u64)(val0 & 0xffff) | \
((u64)(val1 & 0xffff) << 16) | \
((u64)(val2 & 0xff) << 32) | \
((mac)->stats.stat_name & \
0xffffff0000000000ULL); \
if (ro & \
(1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \
(mac)->stats.stat_name += 1ULL << 40; \
}
static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
int flag)
{
u64 ro;
u32 val0, val1, val2, val3;
/* Snap the counters */
pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
/* Counter rollover, clear on read */
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
(((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
/* Rx stats */
RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
RxInternalMACRcvError);
RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
RMON_UPDATE(mac, RxFragments, RxRuntErrors);
RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
/* Tx stats */
RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
TxInternalMACXmitError);
RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
return &mac->stats;
}
static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
{
memcpy(mac_addr, cmac->instance->mac_addr, 6);
return 0;
}
static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
{
u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
/*
* MAC addr: 00:07:43:00:13:09
*
* ma[5] = 0x09
* ma[4] = 0x13
* ma[3] = 0x00
* ma[2] = 0x43
* ma[1] = 0x07
* ma[0] = 0x00
*
* The PM3393 requires byte swapping and reverse order entry
* when programming MAC addresses:
*
* low_bits[15:0] = ma[1]:ma[0]
* mid_bits[31:16] = ma[3]:ma[2]
* high_bits[47:32] = ma[5]:ma[4]
*/
/* Store local copy */
memcpy(cmac->instance->mac_addr, ma, 6);
lo = ((u32) ma[1] << 8) | (u32) ma[0];
mid = ((u32) ma[3] << 8) | (u32) ma[2];
hi = ((u32) ma[5] << 8) | (u32) ma[4];
/* Disable Rx/Tx MAC before configuring it. */
if (enabled)
pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
/* Set RXXG Station Address */
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
/* Set TXXG Station Address */
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
/* Setup Exact Match Filter 1 with our MAC address
*
* Must disable exact match filter before configuring it.
*/
pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
val &= 0xff0f;
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
val |= 0x0090;
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
if (enabled)
pm3393_enable(cmac, enabled);
return 0;
}
static void pm3393_destroy(struct cmac *cmac)
{
kfree(cmac);
}
static struct cmac_ops pm3393_ops = {
.destroy = pm3393_destroy,
.reset = pm3393_reset,
.interrupt_enable = pm3393_interrupt_enable,
.interrupt_disable = pm3393_interrupt_disable,
.interrupt_clear = pm3393_interrupt_clear,
.interrupt_handler = pm3393_interrupt_handler,
.enable = pm3393_enable_port,
.disable = pm3393_disable,
.loopback_enable = pm3393_loopback_enable,
.loopback_disable = pm3393_loopback_disable,
.set_mtu = pm3393_set_mtu,
.set_rx_mode = pm3393_set_rx_mode,
.get_speed_duplex_fc = pm3393_get_speed_duplex_fc,
.set_speed_duplex_fc = pm3393_set_speed_duplex_fc,
.statistics_update = pm3393_update_statistics,
.macaddress_get = pm3393_macaddress_get,
.macaddress_set = pm3393_macaddress_set
};
static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
{
struct cmac *cmac;
cmac = kzalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
if (!cmac)
return NULL;
cmac->ops = &pm3393_ops;
cmac->instance = (cmac_instance *) (cmac + 1);
cmac->adapter = adapter;
cmac->instance->fc = PAUSE_TX | PAUSE_RX;
t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */
t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */
t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */
t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */
t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */
t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */
t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */
t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */
t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */
t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */
t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */
t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */
t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */
t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */
t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */
t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */
t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */
t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */
t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */
t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */
t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */
t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */
t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */
t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */
t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */
t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */
t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */
t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */
t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */
t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */
t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */
/* For T1 use timer based Mac flow control. */
t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */
t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */
t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */
/* Setup Exact Match Filter 0 to allow broadcast packets.
*/
t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */
t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */
t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */
t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */
t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */
t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */
t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */
t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */
return cmac;
}
static int pm3393_mac_reset(adapter_t * adapter)
{
u32 val;
u32 x;
u32 is_pl4_reset_finished;
u32 is_pl4_outof_lock;
u32 is_xaui_mabc_pll_locked;
u32 successful_reset;
int i;
/* The following steps are required to properly reset
* the PM3393. This information is provided in the
* PM3393 datasheet (Issue 2: November 2002)
* section 13.1 -- Device Reset.
*
* The PM3393 has three types of components that are
* individually reset:
*
* DRESETB - Digital circuitry
* PL4_ARESETB - PL4 analog circuitry
* XAUI_ARESETB - XAUI bus analog circuitry
*
* Steps to reset PM3393 using RSTB pin:
*
* 1. Assert RSTB pin low ( write 0 )
* 2. Wait at least 1ms to initiate a complete initialization of device.
* 3. Wait until all external clocks and REFSEL are stable.
* 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
* 5. De-assert RSTB ( write 1 )
* 6. Wait until internal timers to expires after ~14ms.
* - Allows analog clock synthesizer(PL4CSU) to stabilize to
* selected reference frequency before allowing the digital
* portion of the device to operate.
* 7. Wait at least 200us for XAUI interface to stabilize.
* 8. Verify the PM3393 came out of reset successfully.
* Set successful reset flag if everything worked else try again
* a few more times.
*/
successful_reset = 0;
for (i = 0; i < 3 && !successful_reset; i++) {
/* 1 */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~1;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
/* 2 */
msleep(1);
/* 3 */
msleep(1);
/* 4 */
msleep(2 /*1 extra ms for safety */ );
/* 5 */
val |= 1;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
/* 6 */
msleep(15 /*1 extra ms for safety */ );
/* 7 */
msleep(1);
/* 8 */
/* Has PL4 analog block come out of reset correctly? */
t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
/* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
* figure out why? */
/* Have all PL4 block clocks locked? */
x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
/*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ |
SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
is_pl4_outof_lock = (val & x);
/* ??? If this fails, might be able to software reset the XAUI part
* and try to recover... thus saving us from doing another HW reset */
/* Has the XAUI MABC PLL circuitry stablized? */
is_xaui_mabc_pll_locked =
(val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
&& is_xaui_mabc_pll_locked);
if (netif_msg_hw(adapter))
dev_dbg(&adapter->pdev->dev,
"PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
"is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
i, is_pl4_reset_finished, val,
is_pl4_outof_lock, is_xaui_mabc_pll_locked);
}
return successful_reset ? 0 : 1;
}
const struct gmac t1_pm3393_ops = {
.stats_update_period = STATS_TICK_SECS,
.create = pm3393_mac_create,
.reset = pm3393_mac_reset,
};

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,94 @@
/*****************************************************************************
* *
* File: sge.h *
* $Revision: 1.11 $ *
* $Date: 2005/06/21 22:10:55 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_SGE_H_
#define _CXGB_SGE_H_
#include <linux/types.h>
#include <linux/interrupt.h>
#include <asm/byteorder.h>
struct sge_intr_counts {
unsigned int rx_drops; /* # of packets dropped due to no mem */
unsigned int pure_rsps; /* # of non-payload responses */
unsigned int unhandled_irqs; /* # of unhandled interrupts */
unsigned int respQ_empty; /* # times respQ empty */
unsigned int respQ_overflow; /* # respQ overflow (fatal) */
unsigned int freelistQ_empty; /* # times freelist empty */
unsigned int pkt_too_big; /* packet too large (fatal) */
unsigned int pkt_mismatch;
unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */
unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
};
struct sge_port_stats {
u64 rx_cso_good; /* # of successful RX csum offloads */
u64 tx_cso; /* # of TX checksum offloads */
u64 tx_tso; /* # of TSO requests */
u64 vlan_xtract; /* # of VLAN tag extractions */
u64 vlan_insert; /* # of VLAN tag insertions */
u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */
};
struct sk_buff;
struct net_device;
struct adapter;
struct sge_params;
struct sge;
struct sge *t1_sge_create(struct adapter *, struct sge_params *);
int t1_sge_configure(struct sge *, struct sge_params *);
int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
void t1_sge_destroy(struct sge *);
irqreturn_t t1_interrupt(int irq, void *cookie);
int t1_poll(struct napi_struct *, int);
netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
void t1_vlan_mode(struct adapter *adapter, u32 features);
void t1_sge_start(struct sge *);
void t1_sge_stop(struct sge *);
int t1_sge_intr_error_handler(struct sge *);
void t1_sge_intr_enable(struct sge *);
void t1_sge_intr_disable(struct sge *);
void t1_sge_intr_clear(struct sge *);
const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge);
void t1_sge_get_port_stats(const struct sge *sge, int port, struct sge_port_stats *);
unsigned int t1_sched_update_parms(struct sge *, unsigned int, unsigned int,
unsigned int);
#endif /* _CXGB_SGE_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,171 @@
/* $Date: 2006/02/07 04:21:54 $ $RCSfile: tp.c,v $ $Revision: 1.73 $ */
#include "common.h"
#include "regs.h"
#include "tp.h"
#ifdef CONFIG_CHELSIO_T1_1G
#include "fpga_defs.h"
#endif
struct petp {
adapter_t *adapter;
};
/* Pause deadlock avoidance parameters */
#define DROP_MSEC 16
#define DROP_PKTS_CNT 1
static void tp_init(adapter_t * ap, const struct tp_params *p,
unsigned int tp_clk)
{
u32 val;
if (!t1_is_asic(ap))
return;
val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
if (!p->pm_size)
val |= F_OFFLOAD_DISABLE;
else
val |= F_TP_IN_ESPI_CHECK_IP_CSUM | F_TP_IN_ESPI_CHECK_TCP_CSUM;
writel(val, ap->regs + A_TP_IN_CONFIG);
writel(F_TP_OUT_CSPI_CPL |
F_TP_OUT_ESPI_ETHERNET |
F_TP_OUT_ESPI_GENERATE_IP_CSUM |
F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG);
writel(V_IP_TTL(64) |
F_PATH_MTU /* IP DF bit */ |
V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG);
/*
* Enable pause frame deadlock prevention.
*/
if (is_T2(ap) && ap->params.nports > 1) {
u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
V_DROP_TICKS_CNT(drop_ticks) |
V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
ap->regs + A_TP_TX_DROP_CONFIG);
}
}
void t1_tp_destroy(struct petp *tp)
{
kfree(tp);
}
struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p)
{
struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
if (!tp)
return NULL;
tp->adapter = adapter;
return tp;
}
void t1_tp_intr_enable(struct petp *tp)
{
u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE);
#ifdef CONFIG_CHELSIO_T1_1G
if (!t1_is_asic(tp->adapter)) {
/* FPGA */
writel(0xffffffff,
tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE);
writel(tp_intr | FPGA_PCIX_INTERRUPT_TP,
tp->adapter->regs + A_PL_ENABLE);
} else
#endif
{
/* We don't use any TP interrupts */
writel(0, tp->adapter->regs + A_TP_INT_ENABLE);
writel(tp_intr | F_PL_INTR_TP,
tp->adapter->regs + A_PL_ENABLE);
}
}
void t1_tp_intr_disable(struct petp *tp)
{
u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE);
#ifdef CONFIG_CHELSIO_T1_1G
if (!t1_is_asic(tp->adapter)) {
/* FPGA */
writel(0, tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE);
writel(tp_intr & ~FPGA_PCIX_INTERRUPT_TP,
tp->adapter->regs + A_PL_ENABLE);
} else
#endif
{
writel(0, tp->adapter->regs + A_TP_INT_ENABLE);
writel(tp_intr & ~F_PL_INTR_TP,
tp->adapter->regs + A_PL_ENABLE);
}
}
void t1_tp_intr_clear(struct petp *tp)
{
#ifdef CONFIG_CHELSIO_T1_1G
if (!t1_is_asic(tp->adapter)) {
writel(0xffffffff,
tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
writel(FPGA_PCIX_INTERRUPT_TP, tp->adapter->regs + A_PL_CAUSE);
return;
}
#endif
writel(0xffffffff, tp->adapter->regs + A_TP_INT_CAUSE);
writel(F_PL_INTR_TP, tp->adapter->regs + A_PL_CAUSE);
}
int t1_tp_intr_handler(struct petp *tp)
{
u32 cause;
#ifdef CONFIG_CHELSIO_T1_1G
/* FPGA doesn't support TP interrupts. */
if (!t1_is_asic(tp->adapter))
return 1;
#endif
cause = readl(tp->adapter->regs + A_TP_INT_CAUSE);
writel(cause, tp->adapter->regs + A_TP_INT_CAUSE);
return 0;
}
static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable)
{
u32 val = readl(tp->adapter->regs + A_TP_GLOBAL_CONFIG);
if (enable)
val |= csum_bit;
else
val &= ~csum_bit;
writel(val, tp->adapter->regs + A_TP_GLOBAL_CONFIG);
}
void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable)
{
set_csum_offload(tp, F_IP_CSUM, enable);
}
void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable)
{
set_csum_offload(tp, F_TCP_CSUM, enable);
}
/*
* Initialize TP state. tp_params contains initial settings for some TP
* parameters, particularly the one-time PM and CM settings.
*/
int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk)
{
adapter_t *adapter = tp->adapter;
tp_init(adapter, p, tp_clk);
writel(F_TP_RESET, adapter->regs + A_TP_RESET);
return 0;
}

View File

@@ -0,0 +1,72 @@
/* $Date: 2005/03/07 23:59:05 $ $RCSfile: tp.h,v $ $Revision: 1.20 $ */
#ifndef CHELSIO_TP_H
#define CHELSIO_TP_H
#include "common.h"
#define TP_MAX_RX_COALESCING_SIZE 16224U
struct tp_mib_statistics {
/* IP */
u32 ipInReceive_hi;
u32 ipInReceive_lo;
u32 ipInHdrErrors_hi;
u32 ipInHdrErrors_lo;
u32 ipInAddrErrors_hi;
u32 ipInAddrErrors_lo;
u32 ipInUnknownProtos_hi;
u32 ipInUnknownProtos_lo;
u32 ipInDiscards_hi;
u32 ipInDiscards_lo;
u32 ipInDelivers_hi;
u32 ipInDelivers_lo;
u32 ipOutRequests_hi;
u32 ipOutRequests_lo;
u32 ipOutDiscards_hi;
u32 ipOutDiscards_lo;
u32 ipOutNoRoutes_hi;
u32 ipOutNoRoutes_lo;
u32 ipReasmTimeout;
u32 ipReasmReqds;
u32 ipReasmOKs;
u32 ipReasmFails;
u32 reserved[8];
/* TCP */
u32 tcpActiveOpens;
u32 tcpPassiveOpens;
u32 tcpAttemptFails;
u32 tcpEstabResets;
u32 tcpOutRsts;
u32 tcpCurrEstab;
u32 tcpInSegs_hi;
u32 tcpInSegs_lo;
u32 tcpOutSegs_hi;
u32 tcpOutSegs_lo;
u32 tcpRetransSeg_hi;
u32 tcpRetransSeg_lo;
u32 tcpInErrs_hi;
u32 tcpInErrs_lo;
u32 tcpRtoMin;
u32 tcpRtoMax;
};
struct petp;
struct tp_params;
struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p);
void t1_tp_destroy(struct petp *tp);
void t1_tp_intr_disable(struct petp *tp);
void t1_tp_intr_enable(struct petp *tp);
void t1_tp_intr_clear(struct petp *tp);
int t1_tp_intr_handler(struct petp *tp);
void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
#endif

View File

@@ -0,0 +1,730 @@
/* $Date: 2006/04/28 19:20:06 $ $RCSfile: vsc7326.c,v $ $Revision: 1.19 $ */
/* Driver for Vitesse VSC7326 (Schaumburg) MAC */
#include "gmac.h"
#include "elmer0.h"
#include "vsc7326_reg.h"
/* Update fast changing statistics every 15 seconds */
#define STATS_TICK_SECS 15
/* 30 minutes for full statistics update */
#define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS)
#define MAX_MTU 9600
/* The egress WM value 0x01a01fff should be used only when the
* interface is down (MAC port disabled). This is a workaround
* for disabling the T2/MAC flow-control. When the interface is
* enabled, the WM value should be set to 0x014a03F0.
*/
#define WM_DISABLE 0x01a01fff
#define WM_ENABLE 0x014a03F0
struct init_table {
u32 addr;
u32 data;
};
struct _cmac_instance {
u32 index;
u32 ticks;
};
#define INITBLOCK_SLEEP 0xffffffff
static void vsc_read(adapter_t *adapter, u32 addr, u32 *val)
{
u32 status, vlo, vhi;
int i;
spin_lock_bh(&adapter->mac_lock);
t1_tpi_read(adapter, (addr << 2) + 4, &vlo);
i = 0;
do {
t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo);
t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi);
status = (vhi << 16) | vlo;
i++;
} while (((status & 1) == 0) && (i < 50));
if (i == 50)
pr_err("Invalid tpi read from MAC, breaking loop.\n");
t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo);
t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi);
*val = (vhi << 16) | vlo;
/* pr_err("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
((addr&0xe000)>>13), ((addr&0x1e00)>>9),
((addr&0x01fe)>>1), *val); */
spin_unlock_bh(&adapter->mac_lock);
}
static void vsc_write(adapter_t *adapter, u32 addr, u32 data)
{
spin_lock_bh(&adapter->mac_lock);
t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF);
t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF);
/* pr_err("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
((addr&0xe000)>>13), ((addr&0x1e00)>>9),
((addr&0x01fe)>>1), data); */
spin_unlock_bh(&adapter->mac_lock);
}
/* Hard reset the MAC. This wipes out *all* configuration. */
static void vsc7326_full_reset(adapter_t* adapter)
{
u32 val;
u32 result = 0xffff;
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~1;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(2);
val |= 0x1; /* Enable mac MAC itself */
val |= 0x800; /* Turn off the red LED */
t1_tpi_write(adapter, A_ELMER0_GPO, val);
mdelay(1);
vsc_write(adapter, REG_SW_RESET, 0x80000001);
do {
mdelay(1);
vsc_read(adapter, REG_SW_RESET, &result);
} while (result != 0x0);
}
static struct init_table vsc7326_reset[] = {
{ REG_IFACE_MODE, 0x00000000 },
{ REG_CRC_CFG, 0x00000020 },
{ REG_PLL_CLK_SPEED, 0x00050c00 },
{ REG_PLL_CLK_SPEED, 0x00050c00 },
{ REG_MSCH, 0x00002f14 },
{ REG_SPI4_MISC, 0x00040409 },
{ REG_SPI4_DESKEW, 0x00080000 },
{ REG_SPI4_ING_SETUP2, 0x08080004 },
{ REG_SPI4_ING_SETUP0, 0x04111004 },
{ REG_SPI4_EGR_SETUP0, 0x80001a04 },
{ REG_SPI4_ING_SETUP1, 0x02010000 },
{ REG_AGE_INC(0), 0x00000000 },
{ REG_AGE_INC(1), 0x00000000 },
{ REG_ING_CONTROL, 0x0a200011 },
{ REG_EGR_CONTROL, 0xa0010091 },
};
static struct init_table vsc7326_portinit[4][22] = {
{ /* Port 0 */
/* FIFO setup */
{ REG_DBG(0), 0x000004f0 },
{ REG_HDX(0), 0x00073101 },
{ REG_TEST(0,0), 0x00000022 },
{ REG_TEST(1,0), 0x00000022 },
{ REG_TOP_BOTTOM(0,0), 0x003f0000 },
{ REG_TOP_BOTTOM(1,0), 0x00120000 },
{ REG_HIGH_LOW_WM(0,0), 0x07460757 },
{ REG_HIGH_LOW_WM(1,0), WM_DISABLE },
{ REG_CT_THRHLD(0,0), 0x00000000 },
{ REG_CT_THRHLD(1,0), 0x00000000 },
{ REG_BUCKE(0), 0x0002ffff },
{ REG_BUCKI(0), 0x0002ffff },
{ REG_TEST(0,0), 0x00000020 },
{ REG_TEST(1,0), 0x00000020 },
/* Port config */
{ REG_MAX_LEN(0), 0x00002710 },
{ REG_PORT_FAIL(0), 0x00000002 },
{ REG_NORMALIZER(0), 0x00000a64 },
{ REG_DENORM(0), 0x00000010 },
{ REG_STICK_BIT(0), 0x03baa370 },
{ REG_DEV_SETUP(0), 0x00000083 },
{ REG_DEV_SETUP(0), 0x00000082 },
{ REG_MODE_CFG(0), 0x0200259f },
},
{ /* Port 1 */
/* FIFO setup */
{ REG_DBG(1), 0x000004f0 },
{ REG_HDX(1), 0x00073101 },
{ REG_TEST(0,1), 0x00000022 },
{ REG_TEST(1,1), 0x00000022 },
{ REG_TOP_BOTTOM(0,1), 0x007e003f },
{ REG_TOP_BOTTOM(1,1), 0x00240012 },
{ REG_HIGH_LOW_WM(0,1), 0x07460757 },
{ REG_HIGH_LOW_WM(1,1), WM_DISABLE },
{ REG_CT_THRHLD(0,1), 0x00000000 },
{ REG_CT_THRHLD(1,1), 0x00000000 },
{ REG_BUCKE(1), 0x0002ffff },
{ REG_BUCKI(1), 0x0002ffff },
{ REG_TEST(0,1), 0x00000020 },
{ REG_TEST(1,1), 0x00000020 },
/* Port config */
{ REG_MAX_LEN(1), 0x00002710 },
{ REG_PORT_FAIL(1), 0x00000002 },
{ REG_NORMALIZER(1), 0x00000a64 },
{ REG_DENORM(1), 0x00000010 },
{ REG_STICK_BIT(1), 0x03baa370 },
{ REG_DEV_SETUP(1), 0x00000083 },
{ REG_DEV_SETUP(1), 0x00000082 },
{ REG_MODE_CFG(1), 0x0200259f },
},
{ /* Port 2 */
/* FIFO setup */
{ REG_DBG(2), 0x000004f0 },
{ REG_HDX(2), 0x00073101 },
{ REG_TEST(0,2), 0x00000022 },
{ REG_TEST(1,2), 0x00000022 },
{ REG_TOP_BOTTOM(0,2), 0x00bd007e },
{ REG_TOP_BOTTOM(1,2), 0x00360024 },
{ REG_HIGH_LOW_WM(0,2), 0x07460757 },
{ REG_HIGH_LOW_WM(1,2), WM_DISABLE },
{ REG_CT_THRHLD(0,2), 0x00000000 },
{ REG_CT_THRHLD(1,2), 0x00000000 },
{ REG_BUCKE(2), 0x0002ffff },
{ REG_BUCKI(2), 0x0002ffff },
{ REG_TEST(0,2), 0x00000020 },
{ REG_TEST(1,2), 0x00000020 },
/* Port config */
{ REG_MAX_LEN(2), 0x00002710 },
{ REG_PORT_FAIL(2), 0x00000002 },
{ REG_NORMALIZER(2), 0x00000a64 },
{ REG_DENORM(2), 0x00000010 },
{ REG_STICK_BIT(2), 0x03baa370 },
{ REG_DEV_SETUP(2), 0x00000083 },
{ REG_DEV_SETUP(2), 0x00000082 },
{ REG_MODE_CFG(2), 0x0200259f },
},
{ /* Port 3 */
/* FIFO setup */
{ REG_DBG(3), 0x000004f0 },
{ REG_HDX(3), 0x00073101 },
{ REG_TEST(0,3), 0x00000022 },
{ REG_TEST(1,3), 0x00000022 },
{ REG_TOP_BOTTOM(0,3), 0x00fc00bd },
{ REG_TOP_BOTTOM(1,3), 0x00480036 },
{ REG_HIGH_LOW_WM(0,3), 0x07460757 },
{ REG_HIGH_LOW_WM(1,3), WM_DISABLE },
{ REG_CT_THRHLD(0,3), 0x00000000 },
{ REG_CT_THRHLD(1,3), 0x00000000 },
{ REG_BUCKE(3), 0x0002ffff },
{ REG_BUCKI(3), 0x0002ffff },
{ REG_TEST(0,3), 0x00000020 },
{ REG_TEST(1,3), 0x00000020 },
/* Port config */
{ REG_MAX_LEN(3), 0x00002710 },
{ REG_PORT_FAIL(3), 0x00000002 },
{ REG_NORMALIZER(3), 0x00000a64 },
{ REG_DENORM(3), 0x00000010 },
{ REG_STICK_BIT(3), 0x03baa370 },
{ REG_DEV_SETUP(3), 0x00000083 },
{ REG_DEV_SETUP(3), 0x00000082 },
{ REG_MODE_CFG(3), 0x0200259f },
},
};
static void run_table(adapter_t *adapter, struct init_table *ib, int len)
{
int i;
for (i = 0; i < len; i++) {
if (ib[i].addr == INITBLOCK_SLEEP) {
udelay( ib[i].data );
pr_err("sleep %d us\n",ib[i].data);
} else
vsc_write( adapter, ib[i].addr, ib[i].data );
}
}
static int bist_rd(adapter_t *adapter, int moduleid, int address)
{
int data = 0;
u32 result = 0;
if ((address != 0x0) &&
(address != 0x1) &&
(address != 0x2) &&
(address != 0xd) &&
(address != 0xe))
pr_err("No bist address: 0x%x\n", address);
data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
((moduleid & 0xff) << 0));
vsc_write(adapter, REG_RAM_BIST_CMD, data);
udelay(10);
vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
if ((result & (1 << 9)) != 0x0)
pr_err("Still in bist read: 0x%x\n", result);
else if ((result & (1 << 8)) != 0x0)
pr_err("bist read error: 0x%x\n", result);
return result & 0xff;
}
static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
{
int data = 0;
u32 result = 0;
if ((address != 0x0) &&
(address != 0x1) &&
(address != 0x2) &&
(address != 0xd) &&
(address != 0xe))
pr_err("No bist address: 0x%x\n", address);
if (value > 255)
pr_err("Suspicious write out of range value: 0x%x\n", value);
data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
((moduleid & 0xff) << 0));
vsc_write(adapter, REG_RAM_BIST_CMD, data);
udelay(5);
vsc_read(adapter, REG_RAM_BIST_CMD, &result);
if ((result & (1 << 27)) != 0x0)
pr_err("Still in bist write: 0x%x\n", result);
else if ((result & (1 << 26)) != 0x0)
pr_err("bist write error: 0x%x\n", result);
return 0;
}
static int run_bist(adapter_t *adapter, int moduleid)
{
/*run bist*/
(void) bist_wr(adapter,moduleid, 0x00, 0x02);
(void) bist_wr(adapter,moduleid, 0x01, 0x01);
return 0;
}
static int check_bist(adapter_t *adapter, int moduleid)
{
int result=0;
int column=0;
/*check bist*/
result = bist_rd(adapter,moduleid, 0x02);
column = ((bist_rd(adapter,moduleid, 0x0e)<<8) +
(bist_rd(adapter,moduleid, 0x0d)));
if ((result & 3) != 0x3)
pr_err("Result: 0x%x BIST error in ram %d, column: 0x%04x\n",
result, moduleid, column);
return 0;
}
static int enable_mem(adapter_t *adapter, int moduleid)
{
/*enable mem*/
(void) bist_wr(adapter,moduleid, 0x00, 0x00);
return 0;
}
static int run_bist_all(adapter_t *adapter)
{
int port = 0;
u32 val = 0;
vsc_write(adapter, REG_MEM_BIST, 0x5);
vsc_read(adapter, REG_MEM_BIST, &val);
for (port = 0; port < 12; port++)
vsc_write(adapter, REG_DEV_SETUP(port), 0x0);
udelay(300);
vsc_write(adapter, REG_SPI4_MISC, 0x00040409);
udelay(300);
(void) run_bist(adapter,13);
(void) run_bist(adapter,14);
(void) run_bist(adapter,20);
(void) run_bist(adapter,21);
mdelay(200);
(void) check_bist(adapter,13);
(void) check_bist(adapter,14);
(void) check_bist(adapter,20);
(void) check_bist(adapter,21);
udelay(100);
(void) enable_mem(adapter,13);
(void) enable_mem(adapter,14);
(void) enable_mem(adapter,20);
(void) enable_mem(adapter,21);
udelay(300);
vsc_write(adapter, REG_SPI4_MISC, 0x60040400);
udelay(300);
for (port = 0; port < 12; port++)
vsc_write(adapter, REG_DEV_SETUP(port), 0x1);
udelay(300);
vsc_write(adapter, REG_MEM_BIST, 0x0);
mdelay(10);
return 0;
}
static int mac_intr_handler(struct cmac *mac)
{
return 0;
}
static int mac_intr_enable(struct cmac *mac)
{
return 0;
}
static int mac_intr_disable(struct cmac *mac)
{
return 0;
}
static int mac_intr_clear(struct cmac *mac)
{
return 0;
}
/* Expect MAC address to be in network byte order. */
static int mac_set_address(struct cmac* mac, u8 addr[6])
{
u32 val;
int port = mac->instance->index;
vsc_write(mac->adapter, REG_MAC_LOW_ADDR(port),
(addr[3] << 16) | (addr[4] << 8) | addr[5]);
vsc_write(mac->adapter, REG_MAC_HIGH_ADDR(port),
(addr[0] << 16) | (addr[1] << 8) | addr[2]);
vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &val);
val &= ~0xf0000000;
vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, val | (port << 28));
vsc_write(mac->adapter, REG_ING_FFILT_MASK0,
0xffff0000 | (addr[4] << 8) | addr[5]);
vsc_write(mac->adapter, REG_ING_FFILT_MASK1,
0xffff0000 | (addr[2] << 8) | addr[3]);
vsc_write(mac->adapter, REG_ING_FFILT_MASK2,
0xffff0000 | (addr[0] << 8) | addr[1]);
return 0;
}
static int mac_get_address(struct cmac *mac, u8 addr[6])
{
u32 addr_lo, addr_hi;
int port = mac->instance->index;
vsc_read(mac->adapter, REG_MAC_LOW_ADDR(port), &addr_lo);
vsc_read(mac->adapter, REG_MAC_HIGH_ADDR(port), &addr_hi);
addr[0] = (u8) (addr_hi >> 16);
addr[1] = (u8) (addr_hi >> 8);
addr[2] = (u8) addr_hi;
addr[3] = (u8) (addr_lo >> 16);
addr[4] = (u8) (addr_lo >> 8);
addr[5] = (u8) addr_lo;
return 0;
}
/* This is intended to reset a port, not the whole MAC */
static int mac_reset(struct cmac *mac)
{
int index = mac->instance->index;
run_table(mac->adapter, vsc7326_portinit[index],
ARRAY_SIZE(vsc7326_portinit[index]));
return 0;
}
static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
{
u32 v;
int port = mac->instance->index;
vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &v);
v |= 1 << 12;
if (t1_rx_mode_promisc(rm))
v &= ~(1 << (port + 16));
else
v |= 1 << (port + 16);
vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, v);
return 0;
}
static int mac_set_mtu(struct cmac *mac, int mtu)
{
int port = mac->instance->index;
if (mtu > MAX_MTU)
return -EINVAL;
/* max_len includes header and FCS */
vsc_write(mac->adapter, REG_MAX_LEN(port), mtu + 14 + 4);
return 0;
}
static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex,
int fc)
{
u32 v;
int enable, port = mac->instance->index;
if (speed >= 0 && speed != SPEED_10 && speed != SPEED_100 &&
speed != SPEED_1000)
return -1;
if (duplex > 0 && duplex != DUPLEX_FULL)
return -1;
if (speed >= 0) {
vsc_read(mac->adapter, REG_MODE_CFG(port), &v);
enable = v & 3; /* save tx/rx enables */
v &= ~0xf;
v |= 4; /* full duplex */
if (speed == SPEED_1000)
v |= 8; /* GigE */
enable |= v;
vsc_write(mac->adapter, REG_MODE_CFG(port), v);
if (speed == SPEED_1000)
v = 0x82;
else if (speed == SPEED_100)
v = 0x84;
else /* SPEED_10 */
v = 0x86;
vsc_write(mac->adapter, REG_DEV_SETUP(port), v | 1); /* reset */
vsc_write(mac->adapter, REG_DEV_SETUP(port), v);
vsc_read(mac->adapter, REG_DBG(port), &v);
v &= ~0xff00;
if (speed == SPEED_1000)
v |= 0x400;
else if (speed == SPEED_100)
v |= 0x2000;
else /* SPEED_10 */
v |= 0xff00;
vsc_write(mac->adapter, REG_DBG(port), v);
vsc_write(mac->adapter, REG_TX_IFG(port),
speed == SPEED_1000 ? 5 : 0x11);
if (duplex == DUPLEX_HALF)
enable = 0x0; /* 100 or 10 */
else if (speed == SPEED_1000)
enable = 0xc;
else /* SPEED_100 or 10 */
enable = 0x4;
enable |= 0x9 << 10; /* IFG1 */
enable |= 0x6 << 6; /* IFG2 */
enable |= 0x1 << 4; /* VLAN */
enable |= 0x3; /* RX/TX EN */
vsc_write(mac->adapter, REG_MODE_CFG(port), enable);
}
vsc_read(mac->adapter, REG_PAUSE_CFG(port), &v);
v &= 0xfff0ffff;
v |= 0x20000; /* xon/xoff */
if (fc & PAUSE_RX)
v |= 0x40000;
if (fc & PAUSE_TX)
v |= 0x80000;
if (fc == (PAUSE_RX | PAUSE_TX))
v |= 0x10000;
vsc_write(mac->adapter, REG_PAUSE_CFG(port), v);
return 0;
}
static int mac_enable(struct cmac *mac, int which)
{
u32 val;
int port = mac->instance->index;
/* Write the correct WM value when the port is enabled. */
vsc_write(mac->adapter, REG_HIGH_LOW_WM(1,port), WM_ENABLE);
vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
if (which & MAC_DIRECTION_RX)
val |= 0x2;
if (which & MAC_DIRECTION_TX)
val |= 1;
vsc_write(mac->adapter, REG_MODE_CFG(port), val);
return 0;
}
static int mac_disable(struct cmac *mac, int which)
{
u32 val;
int i, port = mac->instance->index;
/* Reset the port, this also writes the correct WM value */
mac_reset(mac);
vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
if (which & MAC_DIRECTION_RX)
val &= ~0x2;
if (which & MAC_DIRECTION_TX)
val &= ~0x1;
vsc_write(mac->adapter, REG_MODE_CFG(port), val);
vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
/* Clear stats */
for (i = 0; i <= 0x3a; ++i)
vsc_write(mac->adapter, CRA(4, port, i), 0);
/* Clear software counters */
memset(&mac->stats, 0, sizeof(struct cmac_statistics));
return 0;
}
static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat)
{
u32 v, lo;
vsc_read(mac->adapter, addr, &v);
lo = *stat;
*stat = *stat - lo + v;
if (v == 0)
return;
if (v < lo)
*stat += (1ULL << 32);
}
static void port_stats_update(struct cmac *mac)
{
struct {
unsigned int reg;
unsigned int offset;
} hw_stats[] = {
#define HW_STAT(reg, stat_name) \
{ reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
/* Rx stats */
HW_STAT(RxUnicast, RxUnicastFramesOK),
HW_STAT(RxMulticast, RxMulticastFramesOK),
HW_STAT(RxBroadcast, RxBroadcastFramesOK),
HW_STAT(Crc, RxFCSErrors),
HW_STAT(RxAlignment, RxAlignErrors),
HW_STAT(RxOversize, RxFrameTooLongErrors),
HW_STAT(RxPause, RxPauseFrames),
HW_STAT(RxJabbers, RxJabberErrors),
HW_STAT(RxFragments, RxRuntErrors),
HW_STAT(RxUndersize, RxRuntErrors),
HW_STAT(RxSymbolCarrier, RxSymbolErrors),
HW_STAT(RxSize1519ToMax, RxJumboFramesOK),
/* Tx stats (skip collision stats as we are full-duplex only) */
HW_STAT(TxUnicast, TxUnicastFramesOK),
HW_STAT(TxMulticast, TxMulticastFramesOK),
HW_STAT(TxBroadcast, TxBroadcastFramesOK),
HW_STAT(TxPause, TxPauseFrames),
HW_STAT(TxUnderrun, TxUnderrun),
HW_STAT(TxSize1519ToMax, TxJumboFramesOK),
}, *p = hw_stats;
unsigned int port = mac->instance->index;
u64 *stats = (u64 *)&mac->stats;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hw_stats); i++)
rmon_update(mac, CRA(0x4, port, p->reg), stats + p->offset);
rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK);
rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad);
}
/*
* This function is called periodically to accumulate the current values of the
* RMON counters into the port statistics. Since the counters are only 32 bits
* some of them can overflow in less than a minute at GigE speeds, so this
* function should be called every 30 seconds or so.
*
* To cut down on reading costs we update only the octet counters at each tick
* and do a full update at major ticks, which can be every 30 minutes or more.
*/
static const struct cmac_statistics *mac_update_statistics(struct cmac *mac,
int flag)
{
if (flag == MAC_STATS_UPDATE_FULL ||
mac->instance->ticks >= MAJOR_UPDATE_TICKS) {
port_stats_update(mac);
mac->instance->ticks = 0;
} else {
int port = mac->instance->index;
rmon_update(mac, REG_RX_OK_BYTES(port),
&mac->stats.RxOctetsOK);
rmon_update(mac, REG_RX_BAD_BYTES(port),
&mac->stats.RxOctetsBad);
rmon_update(mac, REG_TX_OK_BYTES(port),
&mac->stats.TxOctetsOK);
mac->instance->ticks++;
}
return &mac->stats;
}
static void mac_destroy(struct cmac *mac)
{
kfree(mac);
}
static struct cmac_ops vsc7326_ops = {
.destroy = mac_destroy,
.reset = mac_reset,
.interrupt_handler = mac_intr_handler,
.interrupt_enable = mac_intr_enable,
.interrupt_disable = mac_intr_disable,
.interrupt_clear = mac_intr_clear,
.enable = mac_enable,
.disable = mac_disable,
.set_mtu = mac_set_mtu,
.set_rx_mode = mac_set_rx_mode,
.set_speed_duplex_fc = mac_set_speed_duplex_fc,
.statistics_update = mac_update_statistics,
.macaddress_get = mac_get_address,
.macaddress_set = mac_set_address,
};
static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index)
{
struct cmac *mac;
u32 val;
int i;
mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
if (!mac)
return NULL;
mac->ops = &vsc7326_ops;
mac->instance = (cmac_instance *)(mac + 1);
mac->adapter = adapter;
mac->instance->index = index;
mac->instance->ticks = 0;
i = 0;
do {
u32 vhi, vlo;
vhi = vlo = 0;
t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo);
udelay(1);
t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi);
udelay(5);
val = (vhi << 16) | vlo;
} while ((++i < 10000) && (val == 0xffffffff));
return mac;
}
static int vsc7326_mac_reset(adapter_t *adapter)
{
vsc7326_full_reset(adapter);
(void) run_bist_all(adapter);
run_table(adapter, vsc7326_reset, ARRAY_SIZE(vsc7326_reset));
return 0;
}
const struct gmac t1_vsc7326_ops = {
.stats_update_period = STATS_TICK_SECS,
.create = vsc7326_mac_create,
.reset = vsc7326_mac_reset,
};

View File

@@ -0,0 +1,297 @@
/* $Date: 2006/04/28 19:20:17 $ $RCSfile: vsc7326_reg.h,v $ $Revision: 1.5 $ */
#ifndef _VSC7321_REG_H_
#define _VSC7321_REG_H_
/* Register definitions for Vitesse VSC7321 (Meigs II) MAC
*
* Straight off the data sheet, VMDS-10038 Rev 2.0 and
* PD0011-01-14-Meigs-II 2002-12-12
*/
/* Just 'cause it's in here doesn't mean it's used. */
#define CRA(blk,sub,adr) ((((blk) & 0x7) << 13) | (((sub) & 0xf) << 9) | (((adr) & 0xff) << 1))
/* System and CPU comm's registers */
#define REG_CHIP_ID CRA(0x7,0xf,0x00) /* Chip ID */
#define REG_BLADE_ID CRA(0x7,0xf,0x01) /* Blade ID */
#define REG_SW_RESET CRA(0x7,0xf,0x02) /* Global Soft Reset */
#define REG_MEM_BIST CRA(0x7,0xf,0x04) /* mem */
#define REG_IFACE_MODE CRA(0x7,0xf,0x07) /* Interface mode */
#define REG_MSCH CRA(0x7,0x2,0x06) /* CRC error count */
#define REG_CRC_CNT CRA(0x7,0x2,0x0a) /* CRC error count */
#define REG_CRC_CFG CRA(0x7,0x2,0x0b) /* CRC config */
#define REG_SI_TRANSFER_SEL CRA(0x7,0xf,0x18) /* SI Transfer Select */
#define REG_PLL_CLK_SPEED CRA(0x7,0xf,0x19) /* Clock Speed Selection */
#define REG_SYS_CLK_SELECT CRA(0x7,0xf,0x1c) /* System Clock Select */
#define REG_GPIO_CTRL CRA(0x7,0xf,0x1d) /* GPIO Control */
#define REG_GPIO_OUT CRA(0x7,0xf,0x1e) /* GPIO Out */
#define REG_GPIO_IN CRA(0x7,0xf,0x1f) /* GPIO In */
#define REG_CPU_TRANSFER_SEL CRA(0x7,0xf,0x20) /* CPU Transfer Select */
#define REG_LOCAL_DATA CRA(0x7,0xf,0xfe) /* Local CPU Data Register */
#define REG_LOCAL_STATUS CRA(0x7,0xf,0xff) /* Local CPU Status Register */
/* Aggregator registers */
#define REG_AGGR_SETUP CRA(0x7,0x1,0x00) /* Aggregator Setup */
#define REG_PMAP_TABLE CRA(0x7,0x1,0x01) /* Port map table */
#define REG_MPLS_BIT0 CRA(0x7,0x1,0x08) /* MPLS bit0 position */
#define REG_MPLS_BIT1 CRA(0x7,0x1,0x09) /* MPLS bit1 position */
#define REG_MPLS_BIT2 CRA(0x7,0x1,0x0a) /* MPLS bit2 position */
#define REG_MPLS_BIT3 CRA(0x7,0x1,0x0b) /* MPLS bit3 position */
#define REG_MPLS_BITMASK CRA(0x7,0x1,0x0c) /* MPLS bit mask */
#define REG_PRE_BIT0POS CRA(0x7,0x1,0x10) /* Preamble bit0 position */
#define REG_PRE_BIT1POS CRA(0x7,0x1,0x11) /* Preamble bit1 position */
#define REG_PRE_BIT2POS CRA(0x7,0x1,0x12) /* Preamble bit2 position */
#define REG_PRE_BIT3POS CRA(0x7,0x1,0x13) /* Preamble bit3 position */
#define REG_PRE_ERR_CNT CRA(0x7,0x1,0x14) /* Preamble parity error count */
/* BIST registers */
/*#define REG_RAM_BIST_CMD CRA(0x7,0x2,0x00)*/ /* RAM BIST Command Register */
/*#define REG_RAM_BIST_RESULT CRA(0x7,0x2,0x01)*/ /* RAM BIST Read Status/Result */
#define REG_RAM_BIST_CMD CRA(0x7,0x1,0x00) /* RAM BIST Command Register */
#define REG_RAM_BIST_RESULT CRA(0x7,0x1,0x01) /* RAM BIST Read Status/Result */
#define BIST_PORT_SELECT 0x00 /* BIST port select */
#define BIST_COMMAND 0x01 /* BIST enable/disable */
#define BIST_STATUS 0x02 /* BIST operation status */
#define BIST_ERR_CNT_LSB 0x03 /* BIST error count lo 8b */
#define BIST_ERR_CNT_MSB 0x04 /* BIST error count hi 8b */
#define BIST_ERR_SEL_LSB 0x05 /* BIST error select lo 8b */
#define BIST_ERR_SEL_MSB 0x06 /* BIST error select hi 8b */
#define BIST_ERROR_STATE 0x07 /* BIST engine internal state */
#define BIST_ERR_ADR0 0x08 /* BIST error address lo 8b */
#define BIST_ERR_ADR1 0x09 /* BIST error address lomid 8b */
#define BIST_ERR_ADR2 0x0a /* BIST error address himid 8b */
#define BIST_ERR_ADR3 0x0b /* BIST error address hi 8b */
/* FIFO registers
* ie = 0 for ingress, 1 for egress
* fn = FIFO number, 0-9
*/
#define REG_TEST(ie,fn) CRA(0x2,ie&1,0x00+fn) /* Mode & Test Register */
#define REG_TOP_BOTTOM(ie,fn) CRA(0x2,ie&1,0x10+fn) /* FIFO Buffer Top & Bottom */
#define REG_TAIL(ie,fn) CRA(0x2,ie&1,0x20+fn) /* FIFO Write Pointer */
#define REG_HEAD(ie,fn) CRA(0x2,ie&1,0x30+fn) /* FIFO Read Pointer */
#define REG_HIGH_LOW_WM(ie,fn) CRA(0x2,ie&1,0x40+fn) /* Flow Control Water Marks */
#define REG_CT_THRHLD(ie,fn) CRA(0x2,ie&1,0x50+fn) /* Cut Through Threshold */
#define REG_FIFO_DROP_CNT(ie,fn) CRA(0x2,ie&1,0x60+fn) /* Drop & CRC Error Counter */
#define REG_DEBUG_BUF_CNT(ie,fn) CRA(0x2,ie&1,0x70+fn) /* Input Side Debug Counter */
#define REG_BUCKI(fn) CRA(0x2,2,0x20+fn) /* Input Side Debug Counter */
#define REG_BUCKE(fn) CRA(0x2,3,0x20+fn) /* Input Side Debug Counter */
/* Traffic shaper buckets
* ie = 0 for ingress, 1 for egress
* bn = bucket number 0-10 (yes, 11 buckets)
*/
/* OK, this one's kinda ugly. Some hardware designers are perverse. */
#define REG_TRAFFIC_SHAPER_BUCKET(ie,bn) CRA(0x2,ie&1,0x0a + (bn>7) | ((bn&7)<<4))
#define REG_TRAFFIC_SHAPER_CONTROL(ie) CRA(0x2,ie&1,0x3b)
#define REG_SRAM_ADR(ie) CRA(0x2,ie&1,0x0e) /* FIFO SRAM address */
#define REG_SRAM_WR_STRB(ie) CRA(0x2,ie&1,0x1e) /* FIFO SRAM write strobe */
#define REG_SRAM_RD_STRB(ie) CRA(0x2,ie&1,0x2e) /* FIFO SRAM read strobe */
#define REG_SRAM_DATA_0(ie) CRA(0x2,ie&1,0x3e) /* FIFO SRAM data lo 8b */
#define REG_SRAM_DATA_1(ie) CRA(0x2,ie&1,0x4e) /* FIFO SRAM data lomid 8b */
#define REG_SRAM_DATA_2(ie) CRA(0x2,ie&1,0x5e) /* FIFO SRAM data himid 8b */
#define REG_SRAM_DATA_3(ie) CRA(0x2,ie&1,0x6e) /* FIFO SRAM data hi 8b */
#define REG_SRAM_DATA_BLK_TYPE(ie) CRA(0x2,ie&1,0x7e) /* FIFO SRAM tag */
/* REG_ING_CONTROL equals REG_CONTROL with ie = 0, likewise REG_EGR_CONTROL is ie = 1 */
#define REG_CONTROL(ie) CRA(0x2,ie&1,0x0f) /* FIFO control */
#define REG_ING_CONTROL CRA(0x2,0x0,0x0f) /* Ingress control (alias) */
#define REG_EGR_CONTROL CRA(0x2,0x1,0x0f) /* Egress control (alias) */
#define REG_AGE_TIMER(ie) CRA(0x2,ie&1,0x1f) /* Aging timer */
#define REG_AGE_INC(ie) CRA(0x2,ie&1,0x2f) /* Aging increment */
#define DEBUG_OUT(ie) CRA(0x2,ie&1,0x3f) /* Output debug counter control */
#define DEBUG_CNT(ie) CRA(0x2,ie&1,0x4f) /* Output debug counter */
/* SPI4 interface */
#define REG_SPI4_MISC CRA(0x5,0x0,0x00) /* Misc Register */
#define REG_SPI4_STATUS CRA(0x5,0x0,0x01) /* CML Status */
#define REG_SPI4_ING_SETUP0 CRA(0x5,0x0,0x02) /* Ingress Status Channel Setup */
#define REG_SPI4_ING_SETUP1 CRA(0x5,0x0,0x03) /* Ingress Data Training Setup */
#define REG_SPI4_ING_SETUP2 CRA(0x5,0x0,0x04) /* Ingress Data Burst Size Setup */
#define REG_SPI4_EGR_SETUP0 CRA(0x5,0x0,0x05) /* Egress Status Channel Setup */
#define REG_SPI4_DBG_CNT(n) CRA(0x5,0x0,0x10+n) /* Debug counters 0-9 */
#define REG_SPI4_DBG_SETUP CRA(0x5,0x0,0x1A) /* Debug counters setup */
#define REG_SPI4_TEST CRA(0x5,0x0,0x20) /* Test Setup Register */
#define REG_TPGEN_UP0 CRA(0x5,0x0,0x21) /* Test Pattern generator user pattern 0 */
#define REG_TPGEN_UP1 CRA(0x5,0x0,0x22) /* Test Pattern generator user pattern 1 */
#define REG_TPCHK_UP0 CRA(0x5,0x0,0x23) /* Test Pattern checker user pattern 0 */
#define REG_TPCHK_UP1 CRA(0x5,0x0,0x24) /* Test Pattern checker user pattern 1 */
#define REG_TPSAM_P0 CRA(0x5,0x0,0x25) /* Sampled pattern 0 */
#define REG_TPSAM_P1 CRA(0x5,0x0,0x26) /* Sampled pattern 1 */
#define REG_TPERR_CNT CRA(0x5,0x0,0x27) /* Pattern checker error counter */
#define REG_SPI4_STICKY CRA(0x5,0x0,0x30) /* Sticky bits register */
#define REG_SPI4_DBG_INH CRA(0x5,0x0,0x31) /* Core egress & ingress inhibit */
#define REG_SPI4_DBG_STATUS CRA(0x5,0x0,0x32) /* Sampled ingress status */
#define REG_SPI4_DBG_GRANT CRA(0x5,0x0,0x33) /* Ingress cranted credit value */
#define REG_SPI4_DESKEW CRA(0x5,0x0,0x43) /* Ingress cranted credit value */
/* 10GbE MAC Block Registers */
/* Note that those registers that are exactly the same for 10GbE as for
* tri-speed are only defined with the version that needs a port number.
* Pass 0xa in those cases.
*
* Also note that despite the presence of a MAC address register, this part
* does no ingress MAC address filtering. That register is used only for
* pause frame detection and generation.
*/
/* 10GbE specific, and different from tri-speed */
#define REG_MISC_10G CRA(0x1,0xa,0x00) /* Misc 10GbE setup */
#define REG_PAUSE_10G CRA(0x1,0xa,0x01) /* Pause register */
#define REG_NORMALIZER_10G CRA(0x1,0xa,0x05) /* 10G normalizer */
#define REG_STICKY_RX CRA(0x1,0xa,0x06) /* RX debug register */
#define REG_DENORM_10G CRA(0x1,0xa,0x07) /* Denormalizer */
#define REG_STICKY_TX CRA(0x1,0xa,0x08) /* TX sticky bits */
#define REG_MAX_RXHIGH CRA(0x1,0xa,0x0a) /* XGMII lane 0-3 debug */
#define REG_MAX_RXLOW CRA(0x1,0xa,0x0b) /* XGMII lane 4-7 debug */
#define REG_MAC_TX_STICKY CRA(0x1,0xa,0x0c) /* MAC Tx state sticky debug */
#define REG_MAC_TX_RUNNING CRA(0x1,0xa,0x0d) /* MAC Tx state running debug */
#define REG_TX_ABORT_AGE CRA(0x1,0xa,0x14) /* Aged Tx frames discarded */
#define REG_TX_ABORT_SHORT CRA(0x1,0xa,0x15) /* Short Tx frames discarded */
#define REG_TX_ABORT_TAXI CRA(0x1,0xa,0x16) /* Taxi error frames discarded */
#define REG_TX_ABORT_UNDERRUN CRA(0x1,0xa,0x17) /* Tx Underrun abort counter */
#define REG_TX_DENORM_DISCARD CRA(0x1,0xa,0x18) /* Tx denormalizer discards */
#define REG_XAUI_STAT_A CRA(0x1,0xa,0x20) /* XAUI status A */
#define REG_XAUI_STAT_B CRA(0x1,0xa,0x21) /* XAUI status B */
#define REG_XAUI_STAT_C CRA(0x1,0xa,0x22) /* XAUI status C */
#define REG_XAUI_CONF_A CRA(0x1,0xa,0x23) /* XAUI configuration A */
#define REG_XAUI_CONF_B CRA(0x1,0xa,0x24) /* XAUI configuration B */
#define REG_XAUI_CODE_GRP_CNT CRA(0x1,0xa,0x25) /* XAUI code group error count */
#define REG_XAUI_CONF_TEST_A CRA(0x1,0xa,0x26) /* XAUI test register A */
#define REG_PDERRCNT CRA(0x1,0xa,0x27) /* XAUI test register B */
/* pn = port number 0-9 for tri-speed, 10 for 10GbE */
/* Both tri-speed and 10GbE */
#define REG_MAX_LEN(pn) CRA(0x1,pn,0x02) /* Max length */
#define REG_MAC_HIGH_ADDR(pn) CRA(0x1,pn,0x03) /* Upper 24 bits of MAC addr */
#define REG_MAC_LOW_ADDR(pn) CRA(0x1,pn,0x04) /* Lower 24 bits of MAC addr */
/* tri-speed only
* pn = port number, 0-9
*/
#define REG_MODE_CFG(pn) CRA(0x1,pn,0x00) /* Mode configuration */
#define REG_PAUSE_CFG(pn) CRA(0x1,pn,0x01) /* Pause configuration */
#define REG_NORMALIZER(pn) CRA(0x1,pn,0x05) /* Normalizer */
#define REG_TBI_STATUS(pn) CRA(0x1,pn,0x06) /* TBI status */
#define REG_PCS_STATUS_DBG(pn) CRA(0x1,pn,0x07) /* PCS status debug */
#define REG_PCS_CTRL(pn) CRA(0x1,pn,0x08) /* PCS control */
#define REG_TBI_CONFIG(pn) CRA(0x1,pn,0x09) /* TBI configuration */
#define REG_STICK_BIT(pn) CRA(0x1,pn,0x0a) /* Sticky bits */
#define REG_DEV_SETUP(pn) CRA(0x1,pn,0x0b) /* MAC clock/reset setup */
#define REG_DROP_CNT(pn) CRA(0x1,pn,0x0c) /* Drop counter */
#define REG_PORT_POS(pn) CRA(0x1,pn,0x0d) /* Preamble port position */
#define REG_PORT_FAIL(pn) CRA(0x1,pn,0x0e) /* Preamble port position */
#define REG_SERDES_CONF(pn) CRA(0x1,pn,0x0f) /* SerDes configuration */
#define REG_SERDES_TEST(pn) CRA(0x1,pn,0x10) /* SerDes test */
#define REG_SERDES_STAT(pn) CRA(0x1,pn,0x11) /* SerDes status */
#define REG_SERDES_COM_CNT(pn) CRA(0x1,pn,0x12) /* SerDes comma counter */
#define REG_DENORM(pn) CRA(0x1,pn,0x15) /* Frame denormalization */
#define REG_DBG(pn) CRA(0x1,pn,0x16) /* Device 1G debug */
#define REG_TX_IFG(pn) CRA(0x1,pn,0x18) /* Tx IFG config */
#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */
/* Statistics */
/* CRA(0x4,pn,reg) */
/* reg below */
/* pn = port number, 0-a, a = 10GbE */
enum {
RxInBytes = 0x00, // # Rx in octets
RxSymbolCarrier = 0x01, // Frames w/ symbol errors
RxPause = 0x02, // # pause frames received
RxUnsupOpcode = 0x03, // # control frames with unsupported opcode
RxOkBytes = 0x04, // # octets in good frames
RxBadBytes = 0x05, // # octets in bad frames
RxUnicast = 0x06, // # good unicast frames
RxMulticast = 0x07, // # good multicast frames
RxBroadcast = 0x08, // # good broadcast frames
Crc = 0x09, // # frames w/ bad CRC only
RxAlignment = 0x0a, // # frames w/ alignment err
RxUndersize = 0x0b, // # frames undersize
RxFragments = 0x0c, // # frames undersize w/ crc err
RxInRangeLengthError = 0x0d, // # frames with length error
RxOutOfRangeError = 0x0e, // # frames with illegal length field
RxOversize = 0x0f, // # frames oversize
RxJabbers = 0x10, // # frames oversize w/ crc err
RxSize64 = 0x11, // # frames 64 octets long
RxSize65To127 = 0x12, // # frames 65-127 octets
RxSize128To255 = 0x13, // # frames 128-255
RxSize256To511 = 0x14, // # frames 256-511
RxSize512To1023 = 0x15, // # frames 512-1023
RxSize1024To1518 = 0x16, // # frames 1024-1518
RxSize1519ToMax = 0x17, // # frames 1519-max
TxOutBytes = 0x18, // # octets tx
TxPause = 0x19, // # pause frames sent
TxOkBytes = 0x1a, // # octets tx OK
TxUnicast = 0x1b, // # frames unicast
TxMulticast = 0x1c, // # frames multicast
TxBroadcast = 0x1d, // # frames broadcast
TxMultipleColl = 0x1e, // # frames tx after multiple collisions
TxLateColl = 0x1f, // # late collisions detected
TxXcoll = 0x20, // # frames lost, excessive collisions
TxDefer = 0x21, // # frames deferred on first tx attempt
TxXdefer = 0x22, // # frames excessively deferred
TxCsense = 0x23, // carrier sense errors at frame end
TxSize64 = 0x24, // # frames 64 octets long
TxSize65To127 = 0x25, // # frames 65-127 octets
TxSize128To255 = 0x26, // # frames 128-255
TxSize256To511 = 0x27, // # frames 256-511
TxSize512To1023 = 0x28, // # frames 512-1023
TxSize1024To1518 = 0x29, // # frames 1024-1518
TxSize1519ToMax = 0x2a, // # frames 1519-max
TxSingleColl = 0x2b, // # frames tx after single collision
TxBackoff2 = 0x2c, // # frames tx ok after 2 backoffs/collisions
TxBackoff3 = 0x2d, // after 3 backoffs/collisions
TxBackoff4 = 0x2e, // after 4
TxBackoff5 = 0x2f, // after 5
TxBackoff6 = 0x30, // after 6
TxBackoff7 = 0x31, // after 7
TxBackoff8 = 0x32, // after 8
TxBackoff9 = 0x33, // after 9
TxBackoff10 = 0x34, // after 10
TxBackoff11 = 0x35, // after 11
TxBackoff12 = 0x36, // after 12
TxBackoff13 = 0x37, // after 13
TxBackoff14 = 0x38, // after 14
TxBackoff15 = 0x39, // after 15
TxUnderrun = 0x3a, // # frames dropped from underrun
// Hole. See REG_RX_XGMII_PROT_ERR below.
RxIpgShrink = 0x3c, // # of IPG shrinks detected
// Duplicate. See REG_STAT_STICKY10G below.
StatSticky1G = 0x3e, // tri-speed sticky bits
StatInit = 0x3f // Clear all statistics
};
#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */
#define REG_STAT_STICKY10G CRA(0x4,0xa,StatSticky1G) /* 10GbE sticky bits */
#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,RxOkBytes)
#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,RxBadBytes)
#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,TxOkBytes)
/* MII-Management Block registers */
/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If
* we hooked up to the one with separate directions, the middle 0x0 needs to
* change to 0x1. And the current errata states that MII-M 1 doesn't work.
*/
#define REG_MIIM_STATUS CRA(0x3,0x0,0x00) /* MII-M Status */
#define REG_MIIM_CMD CRA(0x3,0x0,0x01) /* MII-M Command */
#define REG_MIIM_DATA CRA(0x3,0x0,0x02) /* MII-M Data */
#define REG_MIIM_PRESCALE CRA(0x3,0x0,0x03) /* MII-M MDC Prescale */
#define REG_ING_FFILT_UM_EN CRA(0x2, 0, 0xd)
#define REG_ING_FFILT_BE_EN CRA(0x2, 0, 0x1d)
#define REG_ING_FFILT_VAL0 CRA(0x2, 0, 0x2d)
#define REG_ING_FFILT_VAL1 CRA(0x2, 0, 0x3d)
#define REG_ING_FFILT_MASK0 CRA(0x2, 0, 0x4d)
#define REG_ING_FFILT_MASK1 CRA(0x2, 0, 0x5d)
#define REG_ING_FFILT_MASK2 CRA(0x2, 0, 0x6d)
#define REG_ING_FFILT_ETYPE CRA(0x2, 0, 0x7d)
/* Whew. */
#endif

View File

@@ -0,0 +1,8 @@
#
# Chelsio T3 driver
#
obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
xgmac.o sge.o l2t.o cxgb3_offload.o aq100x.o

View File

@@ -0,0 +1,334 @@
/*
* Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* This file should not be included directly. Include common.h instead. */
#ifndef __T3_ADAPTER_H__
#define __T3_ADAPTER_H__
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cache.h>
#include <linux/mutex.h>
#include <linux/bitops.h>
#include "t3cdev.h"
#include <asm/io.h>
struct adapter;
struct sge_qset;
struct port_info;
enum mac_idx_types {
LAN_MAC_IDX = 0,
SAN_MAC_IDX,
MAX_MAC_IDX
};
struct iscsi_config {
__u8 mac_addr[ETH_ALEN];
__u32 flags;
int (*send)(struct port_info *pi, struct sk_buff **skb);
int (*recv)(struct port_info *pi, struct sk_buff *skb);
};
struct port_info {
struct adapter *adapter;
struct sge_qset *qs;
u8 port_id;
u8 nqsets;
u8 first_qset;
struct cphy phy;
struct cmac mac;
struct link_config link_config;
struct net_device_stats netstats;
int activity;
__be32 iscsi_ipv4addr;
struct iscsi_config iscsic;
int link_fault; /* link fault was detected */
};
enum { /* adapter flags */
FULL_INIT_DONE = (1 << 0),
USING_MSI = (1 << 1),
USING_MSIX = (1 << 2),
QUEUES_BOUND = (1 << 3),
TP_PARITY_INIT = (1 << 4),
NAPI_INIT = (1 << 5),
};
struct fl_pg_chunk {
struct page *page;
void *va;
unsigned int offset;
unsigned long *p_cnt;
dma_addr_t mapping;
};
struct rx_desc;
struct rx_sw_desc;
struct sge_fl { /* SGE per free-buffer list state */
unsigned int buf_size; /* size of each Rx buffer */
unsigned int credits; /* # of available Rx buffers */
unsigned int pend_cred; /* new buffers since last FL DB ring */
unsigned int size; /* capacity of free list */
unsigned int cidx; /* consumer index */
unsigned int pidx; /* producer index */
unsigned int gen; /* free list generation */
struct fl_pg_chunk pg_chunk;/* page chunk cache */
unsigned int use_pages; /* whether FL uses pages or sk_buffs */
unsigned int order; /* order of page allocations */
unsigned int alloc_size; /* size of allocated buffer */
struct rx_desc *desc; /* address of HW Rx descriptor ring */
struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
dma_addr_t phys_addr; /* physical address of HW ring start */
unsigned int cntxt_id; /* SGE context id for the free list */
unsigned long empty; /* # of times queue ran out of buffers */
unsigned long alloc_failed; /* # of times buffer allocation failed */
};
/*
* Bundle size for grouping offload RX packets for delivery to the stack.
* Don't make this too big as we do prefetch on each packet in a bundle.
*/
# define RX_BUNDLE_SIZE 8
struct rsp_desc;
struct sge_rspq { /* state for an SGE response queue */
unsigned int credits; /* # of pending response credits */
unsigned int size; /* capacity of response queue */
unsigned int cidx; /* consumer index */
unsigned int gen; /* current generation bit */
unsigned int polling; /* is the queue serviced through NAPI? */
unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
unsigned int next_holdoff; /* holdoff time for next interrupt */
unsigned int rx_recycle_buf; /* whether recycling occurred
within current sop-eop */
struct rsp_desc *desc; /* address of HW response ring */
dma_addr_t phys_addr; /* physical address of the ring */
unsigned int cntxt_id; /* SGE context id for the response q */
spinlock_t lock; /* guards response processing */
struct sk_buff_head rx_queue; /* offload packet receive queue */
struct sk_buff *pg_skb; /* used to build frag list in napi handler */
unsigned long offload_pkts;
unsigned long offload_bundles;
unsigned long eth_pkts; /* # of ethernet packets */
unsigned long pure_rsps; /* # of pure (non-data) responses */
unsigned long imm_data; /* responses with immediate data */
unsigned long rx_drops; /* # of packets dropped due to no mem */
unsigned long async_notif; /* # of asynchronous notification events */
unsigned long empty; /* # of times queue ran out of credits */
unsigned long nomem; /* # of responses deferred due to no mem */
unsigned long unhandled_irqs; /* # of spurious intrs */
unsigned long starved;
unsigned long restarted;
};
struct tx_desc;
struct tx_sw_desc;
struct sge_txq { /* state for an SGE Tx queue */
unsigned long flags; /* HW DMA fetch status */
unsigned int in_use; /* # of in-use Tx descriptors */
unsigned int size; /* # of descriptors */
unsigned int processed; /* total # of descs HW has processed */
unsigned int cleaned; /* total # of descs SW has reclaimed */
unsigned int stop_thres; /* SW TX queue suspend threshold */
unsigned int cidx; /* consumer index */
unsigned int pidx; /* producer index */
unsigned int gen; /* current value of generation bit */
unsigned int unacked; /* Tx descriptors used since last COMPL */
struct tx_desc *desc; /* address of HW Tx descriptor ring */
struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
spinlock_t lock; /* guards enqueueing of new packets */
unsigned int token; /* WR token */
dma_addr_t phys_addr; /* physical address of the ring */
struct sk_buff_head sendq; /* List of backpressured offload packets */
struct tasklet_struct qresume_tsk; /* restarts the queue */
unsigned int cntxt_id; /* SGE context id for the Tx q */
unsigned long stops; /* # of times q has been stopped */
unsigned long restarts; /* # of queue restarts */
};
enum { /* per port SGE statistics */
SGE_PSTAT_TSO, /* # of TSO requests */
SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
SGE_PSTAT_MAX /* must be last */
};
struct napi_gro_fraginfo;
struct sge_qset { /* an SGE queue set */
struct adapter *adap;
struct napi_struct napi;
struct sge_rspq rspq;
struct sge_fl fl[SGE_RXQ_PER_SET];
struct sge_txq txq[SGE_TXQ_PER_SET];
int nomem;
void *lro_va;
struct net_device *netdev;
struct netdev_queue *tx_q; /* associated netdev TX queue */
unsigned long txq_stopped; /* which Tx queues are stopped */
struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
struct timer_list rx_reclaim_timer; /* reclaims RX buffers */
unsigned long port_stats[SGE_PSTAT_MAX];
} ____cacheline_aligned;
struct sge {
struct sge_qset qs[SGE_QSETS];
spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
};
struct adapter {
struct t3cdev tdev;
struct list_head adapter_list;
void __iomem *regs;
struct pci_dev *pdev;
unsigned long registered_device_map;
unsigned long open_device_map;
unsigned long flags;
const char *name;
int msg_enable;
unsigned int mmio_len;
struct adapter_params params;
unsigned int slow_intr_mask;
unsigned long irq_stats[IRQ_NUM_STATS];
int msix_nvectors;
struct {
unsigned short vec;
char desc[22];
} msix_info[SGE_QSETS + 1];
/* T3 modules */
struct sge sge;
struct mc7 pmrx;
struct mc7 pmtx;
struct mc7 cm;
struct mc5 mc5;
struct net_device *port[MAX_NPORTS];
unsigned int check_task_cnt;
struct delayed_work adap_check_task;
struct work_struct ext_intr_handler_task;
struct work_struct fatal_error_handler_task;
struct work_struct link_fault_handler_task;
struct work_struct db_full_task;
struct work_struct db_empty_task;
struct work_struct db_drop_task;
struct dentry *debugfs_root;
struct mutex mdio_lock;
spinlock_t stats_lock;
spinlock_t work_lock;
struct sk_buff *nofail_skb;
};
static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
{
u32 val = readl(adapter->regs + reg_addr);
CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
return val;
}
static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
{
CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
writel(val, adapter->regs + reg_addr);
}
static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
{
return netdev_priv(adap->port[idx]);
}
static inline int phy2portid(struct cphy *phy)
{
struct adapter *adap = phy->adapter;
struct port_info *port0 = adap2pinfo(adap, 0);
return &port0->phy == phy ? 0 : 1;
}
#define OFFLOAD_DEVMAP_BIT 15
#define tdev2adap(d) container_of(d, struct adapter, tdev)
static inline int offload_running(struct adapter *adapter)
{
return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
}
int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
void t3_os_ext_intr_handler(struct adapter *adapter);
void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
int speed, int duplex, int fc);
void t3_os_phymod_changed(struct adapter *adap, int port_id);
void t3_os_link_fault(struct adapter *adapter, int port_id, int state);
void t3_os_link_fault_handler(struct adapter *adapter, int port_id);
void t3_sge_start(struct adapter *adap);
void t3_sge_stop(struct adapter *adap);
void t3_start_sge_timers(struct adapter *adap);
void t3_stop_sge_timers(struct adapter *adap);
void t3_free_sge_resources(struct adapter *adap);
void t3_sge_err_intr_handler(struct adapter *adapter);
irq_handler_t t3_intr_handler(struct adapter *adap, int polling);
netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
int irq_vec_idx, const struct qset_params *p,
int ntxq, struct net_device *dev,
struct netdev_queue *netdevq);
extern struct workqueue_struct *cxgb3_wq;
int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
#endif /* __T3_ADAPTER_H__ */

View File

@@ -0,0 +1,941 @@
/*
* Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "common.h"
#include "regs.h"
enum {
AEL100X_TX_CONFIG1 = 0xc002,
AEL1002_PWR_DOWN_HI = 0xc011,
AEL1002_PWR_DOWN_LO = 0xc012,
AEL1002_XFI_EQL = 0xc015,
AEL1002_LB_EN = 0xc017,
AEL_OPT_SETTINGS = 0xc017,
AEL_I2C_CTRL = 0xc30a,
AEL_I2C_DATA = 0xc30b,
AEL_I2C_STAT = 0xc30c,
AEL2005_GPIO_CTRL = 0xc214,
AEL2005_GPIO_STAT = 0xc215,
AEL2020_GPIO_INTR = 0xc103, /* Latch High (LH) */
AEL2020_GPIO_CTRL = 0xc108, /* Store Clear (SC) */
AEL2020_GPIO_STAT = 0xc10c, /* Read Only (RO) */
AEL2020_GPIO_CFG = 0xc110, /* Read Write (RW) */
AEL2020_GPIO_SDA = 0, /* IN: i2c serial data */
AEL2020_GPIO_MODDET = 1, /* IN: Module Detect */
AEL2020_GPIO_0 = 3, /* IN: unassigned */
AEL2020_GPIO_1 = 2, /* OUT: unassigned */
AEL2020_GPIO_LSTAT = AEL2020_GPIO_1, /* wired to link status LED */
};
enum { edc_none, edc_sr, edc_twinax };
/* PHY module I2C device address */
enum {
MODULE_DEV_ADDR = 0xa0,
SFF_DEV_ADDR = 0xa2,
};
/* PHY transceiver type */
enum {
phy_transtype_unknown = 0,
phy_transtype_sfp = 3,
phy_transtype_xfp = 6,
};
#define AEL2005_MODDET_IRQ 4
struct reg_val {
unsigned short mmd_addr;
unsigned short reg_addr;
unsigned short clear_bits;
unsigned short set_bits;
};
static int set_phy_regs(struct cphy *phy, const struct reg_val *rv)
{
int err;
for (err = 0; rv->mmd_addr && !err; rv++) {
if (rv->clear_bits == 0xffff)
err = t3_mdio_write(phy, rv->mmd_addr, rv->reg_addr,
rv->set_bits);
else
err = t3_mdio_change_bits(phy, rv->mmd_addr,
rv->reg_addr, rv->clear_bits,
rv->set_bits);
}
return err;
}
static void ael100x_txon(struct cphy *phy)
{
int tx_on_gpio =
phy->mdio.prtad == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
msleep(100);
t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
msleep(30);
}
/*
* Read an 8-bit word from a device attached to the PHY's i2c bus.
*/
static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr)
{
int i, err;
unsigned int stat, data;
err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL,
(dev_addr << 8) | (1 << 8) | word_addr);
if (err)
return err;
for (i = 0; i < 200; i++) {
msleep(1);
err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat);
if (err)
return err;
if ((stat & 3) == 1) {
err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA,
&data);
if (err)
return err;
return data >> 8;
}
}
CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %#x.%#x timed out\n",
phy->mdio.prtad, dev_addr, word_addr);
return -ETIMEDOUT;
}
static int ael1002_power_down(struct cphy *phy, int enable)
{
int err;
err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, !!enable);
if (!err)
err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_CTRL1_LPOWER, enable);
return err;
}
static int ael1002_reset(struct cphy *phy, int wait)
{
int err;
if ((err = ael1002_power_down(phy, 0)) ||
(err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL100X_TX_CONFIG1, 1)) ||
(err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_HI, 0)) ||
(err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_LO, 0)) ||
(err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_XFI_EQL, 0x18)) ||
(err = t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL1002_LB_EN,
0, 1 << 5)))
return err;
return 0;
}
static int ael1002_intr_noop(struct cphy *phy)
{
return 0;
}
/*
* Get link status for a 10GBASE-R device.
*/
static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed,
int *duplex, int *fc)
{
if (link_ok) {
unsigned int stat0, stat1, stat2;
int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD,
MDIO_PMA_RXDET, &stat0);
if (!err)
err = t3_mdio_read(phy, MDIO_MMD_PCS,
MDIO_PCS_10GBRT_STAT1, &stat1);
if (!err)
err = t3_mdio_read(phy, MDIO_MMD_PHYXS,
MDIO_PHYXS_LNSTAT, &stat2);
if (err)
return err;
*link_ok = (stat0 & stat1 & (stat2 >> 12)) & 1;
}
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
return 0;
}
static struct cphy_ops ael1002_ops = {
.reset = ael1002_reset,
.intr_enable = ael1002_intr_noop,
.intr_disable = ael1002_intr_noop,
.intr_clear = ael1002_intr_noop,
.intr_handler = ael1002_intr_noop,
.get_link_status = get_link_status_r,
.power_down = ael1002_power_down,
.mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
};
int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
"10GBASE-R");
ael100x_txon(phy);
return 0;
}
static int ael1006_reset(struct cphy *phy, int wait)
{
return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait);
}
static struct cphy_ops ael1006_ops = {
.reset = ael1006_reset,
.intr_enable = t3_phy_lasi_intr_enable,
.intr_disable = t3_phy_lasi_intr_disable,
.intr_clear = t3_phy_lasi_intr_clear,
.intr_handler = t3_phy_lasi_intr_handler,
.get_link_status = get_link_status_r,
.power_down = ael1002_power_down,
.mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
};
int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
"10GBASE-SR");
ael100x_txon(phy);
return 0;
}
/*
* Decode our module type.
*/
static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms)
{
int v;
if (delay_ms)
msleep(delay_ms);
/* see SFF-8472 for below */
v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3);
if (v < 0)
return v;
if (v == 0x10)
return phy_modtype_sr;
if (v == 0x20)
return phy_modtype_lr;
if (v == 0x40)
return phy_modtype_lrm;
v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6);
if (v < 0)
return v;
if (v != 4)
goto unknown;
v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10);
if (v < 0)
return v;
if (v & 0x80) {
v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12);
if (v < 0)
return v;
return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax;
}
unknown:
return phy_modtype_unknown;
}
/*
* Code to support the Aeluros/NetLogic 2005 10Gb PHY.
*/
static int ael2005_setup_sr_edc(struct cphy *phy)
{
static const struct reg_val regs[] = {
{ MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 },
{ MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a },
{ MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 },
{ 0, 0, 0, 0 }
};
int i, err;
err = set_phy_regs(phy, regs);
if (err)
return err;
msleep(50);
if (phy->priv != edc_sr)
err = t3_get_edc_fw(phy, EDC_OPT_AEL2005,
EDC_OPT_AEL2005_SIZE);
if (err)
return err;
for (i = 0; i < EDC_OPT_AEL2005_SIZE / sizeof(u16) && !err; i += 2)
err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
phy->phy_cache[i],
phy->phy_cache[i + 1]);
if (!err)
phy->priv = edc_sr;
return err;
}
static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
{
static const struct reg_val regs[] = {
{ MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 },
{ 0, 0, 0, 0 }
};
static const struct reg_val preemphasis[] = {
{ MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 },
{ MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 },
{ 0, 0, 0, 0 }
};
int i, err;
err = set_phy_regs(phy, regs);
if (!err && modtype == phy_modtype_twinax_long)
err = set_phy_regs(phy, preemphasis);
if (err)
return err;
msleep(50);
if (phy->priv != edc_twinax)
err = t3_get_edc_fw(phy, EDC_TWX_AEL2005,
EDC_TWX_AEL2005_SIZE);
if (err)
return err;
for (i = 0; i < EDC_TWX_AEL2005_SIZE / sizeof(u16) && !err; i += 2)
err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
phy->phy_cache[i],
phy->phy_cache[i + 1]);
if (!err)
phy->priv = edc_twinax;
return err;
}
static int ael2005_get_module_type(struct cphy *phy, int delay_ms)
{
int v;
unsigned int stat;
v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, &stat);
if (v)
return v;
if (stat & (1 << 8)) /* module absent */
return phy_modtype_none;
return ael2xxx_get_module_type(phy, delay_ms);
}
static int ael2005_intr_enable(struct cphy *phy)
{
int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x200);
return err ? err : t3_phy_lasi_intr_enable(phy);
}
static int ael2005_intr_disable(struct cphy *phy)
{
int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x100);
return err ? err : t3_phy_lasi_intr_disable(phy);
}
static int ael2005_intr_clear(struct cphy *phy)
{
int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0xd00);
return err ? err : t3_phy_lasi_intr_clear(phy);
}
static int ael2005_reset(struct cphy *phy, int wait)
{
static const struct reg_val regs0[] = {
{ MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 },
{ MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 },
{ MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 },
{ MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 },
{ MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8100 },
{ MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 },
{ MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 },
{ 0, 0, 0, 0 }
};
static const struct reg_val regs1[] = {
{ MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 },
{ MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 },
{ 0, 0, 0, 0 }
};
int err;
unsigned int lasi_ctrl;
err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
&lasi_ctrl);
if (err)
return err;
err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 0);
if (err)
return err;
msleep(125);
phy->priv = edc_none;
err = set_phy_regs(phy, regs0);
if (err)
return err;
msleep(50);
err = ael2005_get_module_type(phy, 0);
if (err < 0)
return err;
phy->modtype = err;
if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
err = ael2005_setup_twinax_edc(phy, err);
else
err = ael2005_setup_sr_edc(phy);
if (err)
return err;
err = set_phy_regs(phy, regs1);
if (err)
return err;
/* reset wipes out interrupts, reenable them if they were on */
if (lasi_ctrl & 1)
err = ael2005_intr_enable(phy);
return err;
}
static int ael2005_intr_handler(struct cphy *phy)
{
unsigned int stat;
int ret, edc_needed, cause = 0;
ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_STAT, &stat);
if (ret)
return ret;
if (stat & AEL2005_MODDET_IRQ) {
ret = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL,
0xd00);
if (ret)
return ret;
/* modules have max 300 ms init time after hot plug */
ret = ael2005_get_module_type(phy, 300);
if (ret < 0)
return ret;
phy->modtype = ret;
if (ret == phy_modtype_none)
edc_needed = phy->priv; /* on unplug retain EDC */
else if (ret == phy_modtype_twinax ||
ret == phy_modtype_twinax_long)
edc_needed = edc_twinax;
else
edc_needed = edc_sr;
if (edc_needed != phy->priv) {
ret = ael2005_reset(phy, 0);
return ret ? ret : cphy_cause_module_change;
}
cause = cphy_cause_module_change;
}
ret = t3_phy_lasi_intr_handler(phy);
if (ret < 0)
return ret;
ret |= cause;
return ret ? ret : cphy_cause_link_change;
}
static struct cphy_ops ael2005_ops = {
.reset = ael2005_reset,
.intr_enable = ael2005_intr_enable,
.intr_disable = ael2005_intr_disable,
.intr_clear = ael2005_intr_clear,
.intr_handler = ael2005_intr_handler,
.get_link_status = get_link_status_r,
.power_down = ael1002_power_down,
.mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
};
int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &ael2005_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_IRQ, "10GBASE-R");
msleep(125);
return t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS, 0,
1 << 5);
}
/*
* Setup EDC and other parameters for operation with an optical module.
*/
static int ael2020_setup_sr_edc(struct cphy *phy)
{
static const struct reg_val regs[] = {
/* set CDR offset to 10 */
{ MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a },
/* adjust 10G RX bias current */
{ MDIO_MMD_PMAPMD, 0xcb1b, 0xffff, 0x0200 },
{ MDIO_MMD_PMAPMD, 0xcb1c, 0xffff, 0x00f0 },
{ MDIO_MMD_PMAPMD, 0xcc06, 0xffff, 0x00e0 },
/* end */
{ 0, 0, 0, 0 }
};
int err;
err = set_phy_regs(phy, regs);
msleep(50);
if (err)
return err;
phy->priv = edc_sr;
return 0;
}
/*
* Setup EDC and other parameters for operation with an TWINAX module.
*/
static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype)
{
/* set uC to 40MHz */
static const struct reg_val uCclock40MHz[] = {
{ MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 },
{ MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 },
{ 0, 0, 0, 0 }
};
/* activate uC clock */
static const struct reg_val uCclockActivate[] = {
{ MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 },
{ 0, 0, 0, 0 }
};
/* set PC to start of SRAM and activate uC */
static const struct reg_val uCactivate[] = {
{ MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 },
{ MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 },
{ 0, 0, 0, 0 }
};
int i, err;
/* set uC clock and activate it */
err = set_phy_regs(phy, uCclock40MHz);
msleep(500);
if (err)
return err;
err = set_phy_regs(phy, uCclockActivate);
msleep(500);
if (err)
return err;
if (phy->priv != edc_twinax)
err = t3_get_edc_fw(phy, EDC_TWX_AEL2020,
EDC_TWX_AEL2020_SIZE);
if (err)
return err;
for (i = 0; i < EDC_TWX_AEL2020_SIZE / sizeof(u16) && !err; i += 2)
err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
phy->phy_cache[i],
phy->phy_cache[i + 1]);
/* activate uC */
err = set_phy_regs(phy, uCactivate);
if (!err)
phy->priv = edc_twinax;
return err;
}
/*
* Return Module Type.
*/
static int ael2020_get_module_type(struct cphy *phy, int delay_ms)
{
int v;
unsigned int stat;
v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_STAT, &stat);
if (v)
return v;
if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) {
/* module absent */
return phy_modtype_none;
}
return ael2xxx_get_module_type(phy, delay_ms);
}
/*
* Enable PHY interrupts. We enable "Module Detection" interrupts (on any
* state transition) and then generic Link Alarm Status Interrupt (LASI).
*/
static int ael2020_intr_enable(struct cphy *phy)
{
static const struct reg_val regs[] = {
/* output Module's Loss Of Signal (LOS) to LED */
{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT,
0xffff, 0x4 },
{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) },
/* enable module detect status change interrupts */
{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
0xffff, 0x2 << (AEL2020_GPIO_MODDET*4) },
/* end */
{ 0, 0, 0, 0 }
};
int err, link_ok = 0;
/* set up "link status" LED and enable module change interrupts */
err = set_phy_regs(phy, regs);
if (err)
return err;
err = get_link_status_r(phy, &link_ok, NULL, NULL, NULL);
if (err)
return err;
if (link_ok)
t3_link_changed(phy->adapter,
phy2portid(phy));
err = t3_phy_lasi_intr_enable(phy);
if (err)
return err;
return 0;
}
/*
* Disable PHY interrupts. The mirror of the above ...
*/
static int ael2020_intr_disable(struct cphy *phy)
{
static const struct reg_val regs[] = {
/* reset "link status" LED to "off" */
{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) },
/* disable module detect status change interrupts */
{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
0xffff, 0x1 << (AEL2020_GPIO_MODDET*4) },
/* end */
{ 0, 0, 0, 0 }
};
int err;
/* turn off "link status" LED and disable module change interrupts */
err = set_phy_regs(phy, regs);
if (err)
return err;
return t3_phy_lasi_intr_disable(phy);
}
/*
* Clear PHY interrupt state.
*/
static int ael2020_intr_clear(struct cphy *phy)
{
/*
* The GPIO Interrupt register on the AEL2020 is a "Latching High"
* (LH) register which is cleared to the current state when it's read.
* Thus, we simply read the register and discard the result.
*/
unsigned int stat;
int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
return err ? err : t3_phy_lasi_intr_clear(phy);
}
static const struct reg_val ael2020_reset_regs[] = {
/* Erratum #2: CDRLOL asserted, causing PMA link down status */
{ MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 },
/* force XAUI to send LF when RX_LOS is asserted */
{ MDIO_MMD_PMAPMD, 0xcd40, 0xffff, 0x0001 },
/* allow writes to transceiver module EEPROM on i2c bus */
{ MDIO_MMD_PMAPMD, 0xff02, 0xffff, 0x0023 },
{ MDIO_MMD_PMAPMD, 0xff03, 0xffff, 0x0000 },
{ MDIO_MMD_PMAPMD, 0xff04, 0xffff, 0x0000 },
/* end */
{ 0, 0, 0, 0 }
};
/*
* Reset the PHY and put it into a canonical operating state.
*/
static int ael2020_reset(struct cphy *phy, int wait)
{
int err;
unsigned int lasi_ctrl;
/* grab current interrupt state */
err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
&lasi_ctrl);
if (err)
return err;
err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 125);
if (err)
return err;
msleep(100);
/* basic initialization for all module types */
phy->priv = edc_none;
err = set_phy_regs(phy, ael2020_reset_regs);
if (err)
return err;
/* determine module type and perform appropriate initialization */
err = ael2020_get_module_type(phy, 0);
if (err < 0)
return err;
phy->modtype = (u8)err;
if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
err = ael2020_setup_twinax_edc(phy, err);
else
err = ael2020_setup_sr_edc(phy);
if (err)
return err;
/* reset wipes out interrupts, reenable them if they were on */
if (lasi_ctrl & 1)
err = ael2005_intr_enable(phy);
return err;
}
/*
* Handle a PHY interrupt.
*/
static int ael2020_intr_handler(struct cphy *phy)
{
unsigned int stat;
int ret, edc_needed, cause = 0;
ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
if (ret)
return ret;
if (stat & (0x1 << AEL2020_GPIO_MODDET)) {
/* modules have max 300 ms init time after hot plug */
ret = ael2020_get_module_type(phy, 300);
if (ret < 0)
return ret;
phy->modtype = (u8)ret;
if (ret == phy_modtype_none)
edc_needed = phy->priv; /* on unplug retain EDC */
else if (ret == phy_modtype_twinax ||
ret == phy_modtype_twinax_long)
edc_needed = edc_twinax;
else
edc_needed = edc_sr;
if (edc_needed != phy->priv) {
ret = ael2020_reset(phy, 0);
return ret ? ret : cphy_cause_module_change;
}
cause = cphy_cause_module_change;
}
ret = t3_phy_lasi_intr_handler(phy);
if (ret < 0)
return ret;
ret |= cause;
return ret ? ret : cphy_cause_link_change;
}
static struct cphy_ops ael2020_ops = {
.reset = ael2020_reset,
.intr_enable = ael2020_intr_enable,
.intr_disable = ael2020_intr_disable,
.intr_clear = ael2020_intr_clear,
.intr_handler = ael2020_intr_handler,
.get_link_status = get_link_status_r,
.power_down = ael1002_power_down,
.mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
};
int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
const struct mdio_ops *mdio_ops)
{
int err;
cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_IRQ, "10GBASE-R");
msleep(125);
err = set_phy_regs(phy, ael2020_reset_regs);
if (err)
return err;
return 0;
}
/*
* Get link status for a 10GBASE-X device.
*/
static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed,
int *duplex, int *fc)
{
if (link_ok) {
unsigned int stat0, stat1, stat2;
int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD,
MDIO_PMA_RXDET, &stat0);
if (!err)
err = t3_mdio_read(phy, MDIO_MMD_PCS,
MDIO_PCS_10GBX_STAT1, &stat1);
if (!err)
err = t3_mdio_read(phy, MDIO_MMD_PHYXS,
MDIO_PHYXS_LNSTAT, &stat2);
if (err)
return err;
*link_ok = (stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1;
}
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
return 0;
}
static struct cphy_ops qt2045_ops = {
.reset = ael1006_reset,
.intr_enable = t3_phy_lasi_intr_enable,
.intr_disable = t3_phy_lasi_intr_disable,
.intr_clear = t3_phy_lasi_intr_clear,
.intr_handler = t3_phy_lasi_intr_handler,
.get_link_status = get_link_status_x,
.power_down = ael1002_power_down,
.mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
};
int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
unsigned int stat;
cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
"10GBASE-CX4");
/*
* Some cards where the PHY is supposed to be at address 0 actually
* have it at 1.
*/
if (!phy_addr &&
!t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &stat) &&
stat == 0xffff)
phy->mdio.prtad = 1;
return 0;
}
static int xaui_direct_reset(struct cphy *phy, int wait)
{
return 0;
}
static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
int *speed, int *duplex, int *fc)
{
if (link_ok) {
unsigned int status;
int prtad = phy->mdio.prtad;
status = t3_read_reg(phy->adapter,
XGM_REG(A_XGM_SERDES_STAT0, prtad)) |
t3_read_reg(phy->adapter,
XGM_REG(A_XGM_SERDES_STAT1, prtad)) |
t3_read_reg(phy->adapter,
XGM_REG(A_XGM_SERDES_STAT2, prtad)) |
t3_read_reg(phy->adapter,
XGM_REG(A_XGM_SERDES_STAT3, prtad));
*link_ok = !(status & F_LOWSIG0);
}
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
return 0;
}
static int xaui_direct_power_down(struct cphy *phy, int enable)
{
return 0;
}
static struct cphy_ops xaui_direct_ops = {
.reset = xaui_direct_reset,
.intr_enable = ael1002_intr_noop,
.intr_disable = ael1002_intr_noop,
.intr_clear = ael1002_intr_noop,
.intr_handler = ael1002_intr_noop,
.get_link_status = xaui_direct_get_link_status,
.power_down = xaui_direct_power_down,
};
int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
"10GBASE-CX4");
return 0;
}

View File

@@ -0,0 +1,354 @@
/*
* Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "common.h"
#include "regs.h"
enum {
/* MDIO_DEV_PMA_PMD registers */
AQ_LINK_STAT = 0xe800,
AQ_IMASK_PMA = 0xf000,
/* MDIO_DEV_XGXS registers */
AQ_XAUI_RX_CFG = 0xc400,
AQ_XAUI_TX_CFG = 0xe400,
/* MDIO_DEV_ANEG registers */
AQ_1G_CTRL = 0xc400,
AQ_ANEG_STAT = 0xc800,
/* MDIO_DEV_VEND1 registers */
AQ_FW_VERSION = 0x0020,
AQ_IFLAG_GLOBAL = 0xfc00,
AQ_IMASK_GLOBAL = 0xff00,
};
enum {
IMASK_PMA = 1 << 2,
IMASK_GLOBAL = 1 << 15,
ADV_1G_FULL = 1 << 15,
ADV_1G_HALF = 1 << 14,
ADV_10G_FULL = 1 << 12,
AQ_RESET = (1 << 14) | (1 << 15),
AQ_LOWPOWER = 1 << 12,
};
static int aq100x_reset(struct cphy *phy, int wait)
{
/*
* Ignore the caller specified wait time; always wait for the reset to
* complete. Can take up to 3s.
*/
int err = t3_phy_reset(phy, MDIO_MMD_VEND1, 3000);
if (err)
CH_WARN(phy->adapter, "PHY%d: reset failed (0x%x).\n",
phy->mdio.prtad, err);
return err;
}
static int aq100x_intr_enable(struct cphy *phy)
{
int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AQ_IMASK_PMA, IMASK_PMA);
if (err)
return err;
err = t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, IMASK_GLOBAL);
return err;
}
static int aq100x_intr_disable(struct cphy *phy)
{
return t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, 0);
}
static int aq100x_intr_clear(struct cphy *phy)
{
unsigned int v;
t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &v);
t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
return 0;
}
static int aq100x_intr_handler(struct cphy *phy)
{
int err;
unsigned int cause, v;
err = t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &cause);
if (err)
return err;
/* Read (and reset) the latching version of the status */
t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
return cphy_cause_link_change;
}
static int aq100x_power_down(struct cphy *phy, int off)
{
return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_CTRL1_LPOWER, off);
}
static int aq100x_autoneg_enable(struct cphy *phy)
{
int err;
err = aq100x_power_down(phy, 0);
if (!err)
err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
MDIO_MMD_AN, MDIO_CTRL1,
BMCR_ANENABLE | BMCR_ANRESTART, 1);
return err;
}
static int aq100x_autoneg_restart(struct cphy *phy)
{
int err;
err = aq100x_power_down(phy, 0);
if (!err)
err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
MDIO_MMD_AN, MDIO_CTRL1,
BMCR_ANENABLE | BMCR_ANRESTART, 1);
return err;
}
static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map)
{
unsigned int adv;
int err;
/* 10G advertisement */
adv = 0;
if (advertise_map & ADVERTISED_10000baseT_Full)
adv |= ADV_10G_FULL;
err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
ADV_10G_FULL, adv);
if (err)
return err;
/* 1G advertisement */
adv = 0;
if (advertise_map & ADVERTISED_1000baseT_Full)
adv |= ADV_1G_FULL;
if (advertise_map & ADVERTISED_1000baseT_Half)
adv |= ADV_1G_HALF;
err = t3_mdio_change_bits(phy, MDIO_MMD_AN, AQ_1G_CTRL,
ADV_1G_FULL | ADV_1G_HALF, adv);
if (err)
return err;
/* 100M, pause advertisement */
adv = 0;
if (advertise_map & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
if (advertise_map & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
if (advertise_map & ADVERTISED_Pause)
adv |= ADVERTISE_PAUSE_CAP;
if (advertise_map & ADVERTISED_Asym_Pause)
adv |= ADVERTISE_PAUSE_ASYM;
err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
0xfe0, adv);
return err;
}
static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable)
{
return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
MDIO_MMD_PMAPMD, MDIO_CTRL1,
BMCR_LOOPBACK, enable);
}
static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex)
{
/* no can do */
return -1;
}
static int aq100x_get_link_status(struct cphy *phy, int *link_ok,
int *speed, int *duplex, int *fc)
{
int err;
unsigned int v;
if (link_ok) {
err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AQ_LINK_STAT, &v);
if (err)
return err;
*link_ok = v & 1;
if (!*link_ok)
return 0;
}
err = t3_mdio_read(phy, MDIO_MMD_AN, AQ_ANEG_STAT, &v);
if (err)
return err;
if (speed) {
switch (v & 0x6) {
case 0x6:
*speed = SPEED_10000;
break;
case 0x4:
*speed = SPEED_1000;
break;
case 0x2:
*speed = SPEED_100;
break;
case 0x0:
*speed = SPEED_10;
break;
}
}
if (duplex)
*duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF;
return 0;
}
static struct cphy_ops aq100x_ops = {
.reset = aq100x_reset,
.intr_enable = aq100x_intr_enable,
.intr_disable = aq100x_intr_disable,
.intr_clear = aq100x_intr_clear,
.intr_handler = aq100x_intr_handler,
.autoneg_enable = aq100x_autoneg_enable,
.autoneg_restart = aq100x_autoneg_restart,
.advertise = aq100x_advertise,
.set_loopback = aq100x_set_loopback,
.set_speed_duplex = aq100x_set_speed_duplex,
.get_link_status = aq100x_get_link_status,
.power_down = aq100x_power_down,
.mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
};
int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
const struct mdio_ops *mdio_ops)
{
unsigned int v, v2, gpio, wait;
int err;
cphy_init(phy, adapter, phy_addr, &aq100x_ops, mdio_ops,
SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full |
SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_AUI,
"1000/10GBASE-T");
/*
* The PHY has been out of reset ever since the system powered up. So
* we do a hard reset over here.
*/
gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL;
t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0);
msleep(1);
t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio);
/*
* Give it enough time to load the firmware and get ready for mdio.
*/
msleep(1000);
wait = 500; /* in 10ms increments */
do {
err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
if (err || v == 0xffff) {
/* Allow prep_adapter to succeed when ffff is read */
CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n",
phy_addr, err, v);
goto done;
}
v &= AQ_RESET;
if (v)
msleep(10);
} while (v && --wait);
if (v) {
CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n",
phy_addr, v);
goto done; /* let prep_adapter succeed */
}
/* Datasheet says 3s max but this has been observed */
wait = (500 - wait) * 10 + 1000;
if (wait > 3000)
CH_WARN(adapter, "PHY%d: reset took %ums\n", phy_addr, wait);
/* Firmware version check. */
t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_FW_VERSION, &v);
if (v != 101)
CH_WARN(adapter, "PHY%d: unsupported firmware %d\n",
phy_addr, v);
/*
* The PHY should start in really-low-power mode. Prepare it for normal
* operations.
*/
err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
if (err)
return err;
if (v & AQ_LOWPOWER) {
err = t3_mdio_change_bits(phy, MDIO_MMD_VEND1, MDIO_CTRL1,
AQ_LOWPOWER, 0);
if (err)
return err;
msleep(10);
} else
CH_WARN(adapter, "PHY%d does not start in low power mode.\n",
phy_addr);
/*
* Verify XAUI settings, but let prep succeed no matter what.
*/
v = v2 = 0;
t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_RX_CFG, &v);
t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_TX_CFG, &v2);
if (v != 0x1b || v2 != 0x1b)
CH_WARN(adapter,
"PHY%d: incorrect XAUI settings (0x%x, 0x%x).\n",
phy_addr, v, v2);
done:
return err;
}

View File

@@ -0,0 +1,775 @@
/*
* Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CHELSIO_COMMON_H
#define __CHELSIO_COMMON_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/mdio.h>
#include "version.h"
#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
#define CH_ALERT(adap, fmt, ...) \
dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
/*
* More powerful macro that selectively prints messages based on msg_enable.
* For info and debugging messages.
*/
#define CH_MSG(adapter, level, category, fmt, ...) do { \
if ((adapter)->msg_enable & NETIF_MSG_##category) \
dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
## __VA_ARGS__); \
} while (0)
#ifdef DEBUG
# define CH_DBG(adapter, category, fmt, ...) \
CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
#else
# define CH_DBG(adapter, category, fmt, ...)
#endif
/* Additional NETIF_MSG_* categories */
#define NETIF_MSG_MMIO 0x8000000
enum {
MAX_NPORTS = 2, /* max # of ports */
MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
EEPROMSIZE = 8192, /* Serial EEPROM size */
SERNUM_LEN = 16, /* Serial # length */
RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
PROTO_SRAM_LINES = 128, /* size of TP sram */
};
#define MAX_RX_COALESCING_LEN 12288U
enum {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
enum {
SUPPORTED_IRQ = 1 << 24
};
enum { /* adapter interrupt-maintained statistics */
STAT_ULP_CH0_PBL_OOB,
STAT_ULP_CH1_PBL_OOB,
STAT_PCI_CORR_ECC,
IRQ_NUM_STATS /* keep last */
};
#define TP_VERSION_MAJOR 1
#define TP_VERSION_MINOR 1
#define TP_VERSION_MICRO 0
#define S_TP_VERSION_MAJOR 16
#define M_TP_VERSION_MAJOR 0xFF
#define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR)
#define G_TP_VERSION_MAJOR(x) \
(((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR)
#define S_TP_VERSION_MINOR 8
#define M_TP_VERSION_MINOR 0xFF
#define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR)
#define G_TP_VERSION_MINOR(x) \
(((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR)
#define S_TP_VERSION_MICRO 0
#define M_TP_VERSION_MICRO 0xFF
#define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO)
#define G_TP_VERSION_MICRO(x) \
(((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
enum {
SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
};
enum sge_context_type { /* SGE egress context types */
SGE_CNTXT_RDMA = 0,
SGE_CNTXT_ETH = 2,
SGE_CNTXT_OFLD = 4,
SGE_CNTXT_CTRL = 5
};
enum {
AN_PKT_SIZE = 32, /* async notification packet size */
IMMED_PKT_SIZE = 48 /* packet size for immediate data */
};
struct sg_ent { /* SGE scatter/gather entry */
__be32 len[2];
__be64 addr[2];
};
#ifndef SGE_NUM_GENBITS
/* Must be 1 or 2 */
# define SGE_NUM_GENBITS 2
#endif
#define TX_DESC_FLITS 16U
#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
struct cphy;
struct adapter;
struct mdio_ops {
int (*read)(struct net_device *dev, int phy_addr, int mmd_addr,
u16 reg_addr);
int (*write)(struct net_device *dev, int phy_addr, int mmd_addr,
u16 reg_addr, u16 val);
unsigned mode_support;
};
struct adapter_info {
unsigned char nports0; /* # of ports on channel 0 */
unsigned char nports1; /* # of ports on channel 1 */
unsigned char phy_base_addr; /* MDIO PHY base address */
unsigned int gpio_out; /* GPIO output settings */
unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
unsigned long caps; /* adapter capabilities */
const struct mdio_ops *mdio_ops; /* MDIO operations */
const char *desc; /* product description */
};
struct mc5_stats {
unsigned long parity_err;
unsigned long active_rgn_full;
unsigned long nfa_srch_err;
unsigned long unknown_cmd;
unsigned long reqq_parity_err;
unsigned long dispq_parity_err;
unsigned long del_act_empty;
};
struct mc7_stats {
unsigned long corr_err;
unsigned long uncorr_err;
unsigned long parity_err;
unsigned long addr_err;
};
struct mac_stats {
u64 tx_octets; /* total # of octets in good frames */
u64 tx_octets_bad; /* total # of octets in error frames */
u64 tx_frames; /* all good frames */
u64 tx_mcast_frames; /* good multicast frames */
u64 tx_bcast_frames; /* good broadcast frames */
u64 tx_pause; /* # of transmitted pause frames */
u64 tx_deferred; /* frames with deferred transmissions */
u64 tx_late_collisions; /* # of late collisions */
u64 tx_total_collisions; /* # of total collisions */
u64 tx_excess_collisions; /* frame errors from excessive collissions */
u64 tx_underrun; /* # of Tx FIFO underruns */
u64 tx_len_errs; /* # of Tx length errors */
u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
u64 tx_excess_deferral; /* # of frames with excessive deferral */
u64 tx_fcs_errs; /* # of frames with bad FCS */
u64 tx_frames_64; /* # of Tx frames in a particular range */
u64 tx_frames_65_127;
u64 tx_frames_128_255;
u64 tx_frames_256_511;
u64 tx_frames_512_1023;
u64 tx_frames_1024_1518;
u64 tx_frames_1519_max;
u64 rx_octets; /* total # of octets in good frames */
u64 rx_octets_bad; /* total # of octets in error frames */
u64 rx_frames; /* all good frames */
u64 rx_mcast_frames; /* good multicast frames */
u64 rx_bcast_frames; /* good broadcast frames */
u64 rx_pause; /* # of received pause frames */
u64 rx_fcs_errs; /* # of received frames with bad FCS */
u64 rx_align_errs; /* alignment errors */
u64 rx_symbol_errs; /* symbol errors */
u64 rx_data_errs; /* data errors */
u64 rx_sequence_errs; /* sequence errors */
u64 rx_runt; /* # of runt frames */
u64 rx_jabber; /* # of jabber frames */
u64 rx_short; /* # of short frames */
u64 rx_too_long; /* # of oversized frames */
u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
u64 rx_frames_64; /* # of Rx frames in a particular range */
u64 rx_frames_65_127;
u64 rx_frames_128_255;
u64 rx_frames_256_511;
u64 rx_frames_512_1023;
u64 rx_frames_1024_1518;
u64 rx_frames_1519_max;
u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
unsigned long tx_fifo_parity_err;
unsigned long rx_fifo_parity_err;
unsigned long tx_fifo_urun;
unsigned long rx_fifo_ovfl;
unsigned long serdes_signal_loss;
unsigned long xaui_pcs_ctc_err;
unsigned long xaui_pcs_align_change;
unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */
unsigned long num_resets; /* # times reset due to stuck TX */
unsigned long link_faults; /* # detected link faults */
};
struct tp_mib_stats {
u32 ipInReceive_hi;
u32 ipInReceive_lo;
u32 ipInHdrErrors_hi;
u32 ipInHdrErrors_lo;
u32 ipInAddrErrors_hi;
u32 ipInAddrErrors_lo;
u32 ipInUnknownProtos_hi;
u32 ipInUnknownProtos_lo;
u32 ipInDiscards_hi;
u32 ipInDiscards_lo;
u32 ipInDelivers_hi;
u32 ipInDelivers_lo;
u32 ipOutRequests_hi;
u32 ipOutRequests_lo;
u32 ipOutDiscards_hi;
u32 ipOutDiscards_lo;
u32 ipOutNoRoutes_hi;
u32 ipOutNoRoutes_lo;
u32 ipReasmTimeout;
u32 ipReasmReqds;
u32 ipReasmOKs;
u32 ipReasmFails;
u32 reserved[8];
u32 tcpActiveOpens;
u32 tcpPassiveOpens;
u32 tcpAttemptFails;
u32 tcpEstabResets;
u32 tcpOutRsts;
u32 tcpCurrEstab;
u32 tcpInSegs_hi;
u32 tcpInSegs_lo;
u32 tcpOutSegs_hi;
u32 tcpOutSegs_lo;
u32 tcpRetransSeg_hi;
u32 tcpRetransSeg_lo;
u32 tcpInErrs_hi;
u32 tcpInErrs_lo;
u32 tcpRtoMin;
u32 tcpRtoMax;
};
struct tp_params {
unsigned int nchan; /* # of channels */
unsigned int pmrx_size; /* total PMRX capacity */
unsigned int pmtx_size; /* total PMTX capacity */
unsigned int cm_size; /* total CM capacity */
unsigned int chan_rx_size; /* per channel Rx size */
unsigned int chan_tx_size; /* per channel Tx size */
unsigned int rx_pg_size; /* Rx page size */
unsigned int tx_pg_size; /* Tx page size */
unsigned int rx_num_pgs; /* # of Rx pages */
unsigned int tx_num_pgs; /* # of Tx pages */
unsigned int ntimer_qs; /* # of timer queues */
};
struct qset_params { /* SGE queue set parameters */
unsigned int polling; /* polling/interrupt service for rspq */
unsigned int coalesce_usecs; /* irq coalescing timer */
unsigned int rspq_size; /* # of entries in response queue */
unsigned int fl_size; /* # of entries in regular free list */
unsigned int jumbo_size; /* # of entries in jumbo free list */
unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
unsigned int cong_thres; /* FL congestion threshold */
unsigned int vector; /* Interrupt (line or vector) number */
};
struct sge_params {
unsigned int max_pkt_size; /* max offload pkt size */
struct qset_params qset[SGE_QSETS];
};
struct mc5_params {
unsigned int mode; /* selects MC5 width */
unsigned int nservers; /* size of server region */
unsigned int nfilters; /* size of filter region */
unsigned int nroutes; /* size of routing region */
};
/* Default MC5 region sizes */
enum {
DEFAULT_NSERVERS = 512,
DEFAULT_NFILTERS = 128
};
/* MC5 modes, these must be non-0 */
enum {
MC5_MODE_144_BIT = 1,
MC5_MODE_72_BIT = 2
};
/* MC5 min active region size */
enum { MC5_MIN_TIDS = 16 };
struct vpd_params {
unsigned int cclk;
unsigned int mclk;
unsigned int uclk;
unsigned int mdc;
unsigned int mem_timing;
u8 sn[SERNUM_LEN + 1];
u8 eth_base[6];
u8 port_type[MAX_NPORTS];
unsigned short xauicfg[2];
};
struct pci_params {
unsigned int vpd_cap_addr;
unsigned short speed;
unsigned char width;
unsigned char variant;
};
enum {
PCI_VARIANT_PCI,
PCI_VARIANT_PCIX_MODE1_PARITY,
PCI_VARIANT_PCIX_MODE1_ECC,
PCI_VARIANT_PCIX_266_MODE2,
PCI_VARIANT_PCIE
};
struct adapter_params {
struct sge_params sge;
struct mc5_params mc5;
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
const struct adapter_info *info;
unsigned short mtus[NMTUS];
unsigned short a_wnd[NCCTRL_WIN];
unsigned short b_wnd[NCCTRL_WIN];
unsigned int nports; /* # of ethernet ports */
unsigned int chan_map; /* bitmap of in-use Tx channels */
unsigned int stats_update_period; /* MAC stats accumulation period */
unsigned int linkpoll_period; /* link poll period in 0.1s */
unsigned int rev; /* chip revision */
unsigned int offload;
};
enum { /* chip revisions */
T3_REV_A = 0,
T3_REV_B = 2,
T3_REV_B2 = 3,
T3_REV_C = 4,
};
struct trace_params {
u32 sip;
u32 sip_mask;
u32 dip;
u32 dip_mask;
u16 sport;
u16 sport_mask;
u16 dport;
u16 dport_mask;
u32 vlan:12;
u32 vlan_mask:12;
u32 intf:4;
u32 intf_mask:4;
u8 proto;
u8 proto_mask;
};
struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
unsigned short requested_speed; /* speed user has requested */
unsigned short speed; /* actual link speed */
unsigned char requested_duplex; /* duplex user has requested */
unsigned char duplex; /* actual link duplex */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
unsigned int link_ok; /* link up? */
};
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
struct mc5 {
struct adapter *adapter;
unsigned int tcam_size;
unsigned char part_type;
unsigned char parity_enabled;
unsigned char mode;
struct mc5_stats stats;
};
static inline unsigned int t3_mc5_size(const struct mc5 *p)
{
return p->tcam_size;
}
struct mc7 {
struct adapter *adapter; /* backpointer to adapter */
unsigned int size; /* memory size in bytes */
unsigned int width; /* MC7 interface width */
unsigned int offset; /* register address offset for MC7 instance */
const char *name; /* name of MC7 instance */
struct mc7_stats stats; /* MC7 statistics */
};
static inline unsigned int t3_mc7_size(const struct mc7 *p)
{
return p->size;
}
struct cmac {
struct adapter *adapter;
unsigned int offset;
unsigned int nucast; /* # of address filters for unicast MACs */
unsigned int tx_tcnt;
unsigned int tx_xcnt;
u64 tx_mcnt;
unsigned int rx_xcnt;
unsigned int rx_ocnt;
u64 rx_mcnt;
unsigned int toggle_cnt;
unsigned int txen;
u64 rx_pause;
struct mac_stats stats;
};
enum {
MAC_DIRECTION_RX = 1,
MAC_DIRECTION_TX = 2,
MAC_RXFIFO_SIZE = 32768
};
/* PHY loopback direction */
enum {
PHY_LOOPBACK_TX = 1,
PHY_LOOPBACK_RX = 2
};
/* PHY interrupt types */
enum {
cphy_cause_link_change = 1,
cphy_cause_fifo_error = 2,
cphy_cause_module_change = 4,
};
/* PHY module types */
enum {
phy_modtype_none,
phy_modtype_sr,
phy_modtype_lr,
phy_modtype_lrm,
phy_modtype_twinax,
phy_modtype_twinax_long,
phy_modtype_unknown
};
/* PHY operations */
struct cphy_ops {
int (*reset)(struct cphy *phy, int wait);
int (*intr_enable)(struct cphy *phy);
int (*intr_disable)(struct cphy *phy);
int (*intr_clear)(struct cphy *phy);
int (*intr_handler)(struct cphy *phy);
int (*autoneg_enable)(struct cphy *phy);
int (*autoneg_restart)(struct cphy *phy);
int (*advertise)(struct cphy *phy, unsigned int advertise_map);
int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
int *duplex, int *fc);
int (*power_down)(struct cphy *phy, int enable);
u32 mmds;
};
enum {
EDC_OPT_AEL2005 = 0,
EDC_OPT_AEL2005_SIZE = 1084,
EDC_TWX_AEL2005 = 1,
EDC_TWX_AEL2005_SIZE = 1464,
EDC_TWX_AEL2020 = 2,
EDC_TWX_AEL2020_SIZE = 1628,
EDC_MAX_SIZE = EDC_TWX_AEL2020_SIZE, /* Max cache size */
};
/* A PHY instance */
struct cphy {
u8 modtype; /* PHY module type */
short priv; /* scratch pad */
unsigned int caps; /* PHY capabilities */
struct adapter *adapter; /* associated adapter */
const char *desc; /* PHY description */
unsigned long fifo_errors; /* FIFO over/under-flows */
const struct cphy_ops *ops; /* PHY operations */
struct mdio_if_info mdio;
u16 phy_cache[EDC_MAX_SIZE]; /* EDC cache */
};
/* Convenience MDIO read/write wrappers */
static inline int t3_mdio_read(struct cphy *phy, int mmd, int reg,
unsigned int *valp)
{
int rc = phy->mdio.mdio_read(phy->mdio.dev, phy->mdio.prtad, mmd, reg);
*valp = (rc >= 0) ? rc : -1;
return (rc >= 0) ? 0 : rc;
}
static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg,
unsigned int val)
{
return phy->mdio.mdio_write(phy->mdio.dev, phy->mdio.prtad, mmd,
reg, val);
}
/* Convenience initializer */
static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
int phy_addr, struct cphy_ops *phy_ops,
const struct mdio_ops *mdio_ops,
unsigned int caps, const char *desc)
{
phy->caps = caps;
phy->adapter = adapter;
phy->desc = desc;
phy->ops = phy_ops;
if (mdio_ops) {
phy->mdio.prtad = phy_addr;
phy->mdio.mmds = phy_ops->mmds;
phy->mdio.mode_support = mdio_ops->mode_support;
phy->mdio.mdio_read = mdio_ops->read;
phy->mdio.mdio_write = mdio_ops->write;
}
}
/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
#define MAC_STATS_ACCUM_SECS 180
#define XGM_REG(reg_addr, idx) \
((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
struct addr_val_pair {
unsigned int reg_addr;
unsigned int val;
};
#include "adapter.h"
#ifndef PCI_VENDOR_ID_CHELSIO
# define PCI_VENDOR_ID_CHELSIO 0x1425
#endif
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
#define adapter_info(adap) ((adap)->params.info)
static inline int uses_xaui(const struct adapter *adap)
{
return adapter_info(adap)->caps & SUPPORTED_AUI;
}
static inline int is_10G(const struct adapter *adap)
{
return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
}
static inline int is_offload(const struct adapter *adap)
{
return adap->params.offload;
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
return adap->params.vpd.cclk / 1000;
}
static inline unsigned int is_pcie(const struct adapter *adap)
{
return adap->params.pci.variant == PCI_VARIANT_PCIE;
}
void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
u32 val);
void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
int n, unsigned int offset);
int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
int polarity, int attempts, int delay, u32 *valp);
static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
int polarity, int attempts, int delay)
{
return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
delay, NULL);
}
int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
unsigned int set);
int t3_phy_reset(struct cphy *phy, int mmd, int wait);
int t3_phy_advertise(struct cphy *phy, unsigned int advert);
int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert);
int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
int t3_phy_lasi_intr_enable(struct cphy *phy);
int t3_phy_lasi_intr_disable(struct cphy *phy);
int t3_phy_lasi_intr_clear(struct cphy *phy);
int t3_phy_lasi_intr_handler(struct cphy *phy);
void t3_intr_enable(struct adapter *adapter);
void t3_intr_disable(struct adapter *adapter);
void t3_intr_clear(struct adapter *adapter);
void t3_xgm_intr_enable(struct adapter *adapter, int idx);
void t3_xgm_intr_disable(struct adapter *adapter, int idx);
void t3_port_intr_enable(struct adapter *adapter, int idx);
void t3_port_intr_disable(struct adapter *adapter, int idx);
int t3_slow_intr_handler(struct adapter *adapter);
int t3_phy_intr_handler(struct adapter *adapter);
void t3_link_changed(struct adapter *adapter, int port_id);
void t3_link_fault(struct adapter *adapter, int port_id);
int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data);
int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data);
int t3_seeprom_wp(struct adapter *adapter, int enable);
int t3_get_tp_version(struct adapter *adapter, u32 *vers);
int t3_check_tpsram_version(struct adapter *adapter);
int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram,
unsigned int size);
int t3_set_proto_sram(struct adapter *adap, const u8 *data);
int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
int t3_get_fw_version(struct adapter *adapter, u32 *vers);
int t3_check_fw_version(struct adapter *adapter);
int t3_init_hw(struct adapter *adapter, u32 fw_params);
int t3_reset_adapter(struct adapter *adapter);
int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
int reset);
int t3_replay_prep_adapter(struct adapter *adapter);
void t3_led_ready(struct adapter *adapter);
void t3_fatal_err(struct adapter *adapter);
void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
const u8 * cpus, const u16 *rspq);
int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
unsigned int n, unsigned int *valp);
int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
u64 *buf);
int t3_mac_reset(struct cmac *mac);
void t3b_pcs_reset(struct cmac *mac);
void t3_mac_disable_exact_filters(struct cmac *mac);
void t3_mac_enable_exact_filters(struct cmac *mac);
int t3_mac_enable(struct cmac *mac, int which);
int t3_mac_disable(struct cmac *mac, int which);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, int n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
int t3b2_mac_watchdog_task(struct cmac *mac);
void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int nroutes);
void t3_mc5_intr_handler(struct mc5 *mc5);
void t3_tp_set_offload_mode(struct adapter *adap, int enable);
void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
unsigned short alpha[NCCTRL_WIN],
unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
void t3_config_trace_filter(struct adapter *adapter,
const struct trace_params *tp, int filter_index,
int invert, int enable);
int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
void t3_sge_prep(struct adapter *adap, struct sge_params *p);
void t3_sge_init(struct adapter *adap, struct sge_params *p);
int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
enum sge_context_type type, int respq, u64 base_addr,
unsigned int size, unsigned int token, int gen,
unsigned int cidx);
int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
int gts_enable, u64 base_addr, unsigned int size,
unsigned int esize, unsigned int cong_thres, int gen,
unsigned int cidx);
int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
int irq_vec_idx, u64 base_addr, unsigned int size,
unsigned int fl_thres, int gen, unsigned int cidx);
int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
unsigned int size, int rspq, int ovfl_mode,
unsigned int credits, unsigned int credit_thres);
int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
unsigned int credits);
int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
const struct mdio_ops *mdio_ops);
int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
#endif /* __CHELSIO_COMMON_H */

View File

@@ -0,0 +1,189 @@
/*
* Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
#define _CXGB3_OFFLOAD_CTL_DEFS_H
enum {
GET_MAX_OUTSTANDING_WR = 0,
GET_TX_MAX_CHUNK = 1,
GET_TID_RANGE = 2,
GET_STID_RANGE = 3,
GET_RTBL_RANGE = 4,
GET_L2T_CAPACITY = 5,
GET_MTUS = 6,
GET_WR_LEN = 7,
GET_IFF_FROM_MAC = 8,
GET_DDP_PARAMS = 9,
GET_PORTS = 10,
ULP_ISCSI_GET_PARAMS = 11,
ULP_ISCSI_SET_PARAMS = 12,
RDMA_GET_PARAMS = 13,
RDMA_CQ_OP = 14,
RDMA_CQ_SETUP = 15,
RDMA_CQ_DISABLE = 16,
RDMA_CTRL_QP_SETUP = 17,
RDMA_GET_MEM = 18,
RDMA_GET_MIB = 19,
GET_RX_PAGE_INFO = 50,
GET_ISCSI_IPV4ADDR = 51,
GET_EMBEDDED_INFO = 70,
};
/*
* Structure used to describe a TID range. Valid TIDs are [base, base+num).
*/
struct tid_range {
unsigned int base; /* first TID */
unsigned int num; /* number of TIDs in range */
};
/*
* Structure used to request the size and contents of the MTU table.
*/
struct mtutab {
unsigned int size; /* # of entries in the MTU table */
const unsigned short *mtus; /* the MTU table values */
};
struct net_device;
/*
* Structure used to request the adapter net_device owning a given MAC address.
*/
struct iff_mac {
struct net_device *dev; /* the net_device */
const unsigned char *mac_addr; /* MAC address to lookup */
u16 vlan_tag;
};
/* Structure used to request a port's iSCSI IPv4 address */
struct iscsi_ipv4addr {
struct net_device *dev; /* the net_device */
__be32 ipv4addr; /* the return iSCSI IPv4 address */
};
struct pci_dev;
/*
* Structure used to request the TCP DDP parameters.
*/
struct ddp_params {
unsigned int llimit; /* TDDP region start address */
unsigned int ulimit; /* TDDP region end address */
unsigned int tag_mask; /* TDDP tag mask */
struct pci_dev *pdev;
};
struct adap_ports {
unsigned int nports; /* number of ports on this adapter */
struct net_device *lldevs[2];
};
/*
* Structure used to return information to the iscsi layer.
*/
struct ulp_iscsi_info {
unsigned int offset;
unsigned int llimit;
unsigned int ulimit;
unsigned int tagmask;
u8 pgsz_factor[4];
unsigned int max_rxsz;
unsigned int max_txsz;
struct pci_dev *pdev;
};
/*
* Structure used to return information to the RDMA layer.
*/
struct rdma_info {
unsigned int tpt_base; /* TPT base address */
unsigned int tpt_top; /* TPT last entry address */
unsigned int pbl_base; /* PBL base address */
unsigned int pbl_top; /* PBL last entry address */
unsigned int rqt_base; /* RQT base address */
unsigned int rqt_top; /* RQT last entry address */
unsigned int udbell_len; /* user doorbell region length */
unsigned long udbell_physbase; /* user doorbell physical start addr */
void __iomem *kdb_addr; /* kernel doorbell register address */
struct pci_dev *pdev; /* associated PCI device */
};
/*
* Structure used to request an operation on an RDMA completion queue.
*/
struct rdma_cq_op {
unsigned int id;
unsigned int op;
unsigned int credits;
};
/*
* Structure used to setup RDMA completion queues.
*/
struct rdma_cq_setup {
unsigned int id;
unsigned long long base_addr;
unsigned int size;
unsigned int credits;
unsigned int credit_thres;
unsigned int ovfl_mode;
};
/*
* Structure used to setup the RDMA control egress context.
*/
struct rdma_ctrlqp_setup {
unsigned long long base_addr;
unsigned int size;
};
/*
* Offload TX/RX page information.
*/
struct ofld_page_info {
unsigned int page_size; /* Page size, should be a power of 2 */
unsigned int num; /* Number of pages */
};
/*
* Structure used to get firmware and protocol engine versions.
*/
struct ch_embedded_info {
u32 fw_vers;
u32 tp_vers;
};
#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */

View File

@@ -0,0 +1,114 @@
/*
* Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CHELSIO_DEFS_H
#define _CHELSIO_DEFS_H
#include <linux/skbuff.h>
#include <net/tcp.h>
#include "t3cdev.h"
#include "cxgb3_offload.h"
#define VALIDATE_TID 1
void *cxgb_alloc_mem(unsigned long size);
void cxgb_free_mem(void *addr);
/*
* Map an ATID or STID to their entries in the corresponding TID tables.
*/
static inline union active_open_entry *atid2entry(const struct tid_info *t,
unsigned int atid)
{
return &t->atid_tab[atid - t->atid_base];
}
static inline union listen_entry *stid2entry(const struct tid_info *t,
unsigned int stid)
{
return &t->stid_tab[stid - t->stid_base];
}
/*
* Find the connection corresponding to a TID.
*/
static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
unsigned int tid)
{
struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
&(t->tid_tab[tid]) : NULL;
return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
}
/*
* Find the connection corresponding to a server TID.
*/
static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
unsigned int tid)
{
union listen_entry *e;
if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
return NULL;
e = stid2entry(t, tid);
if ((void *)e->next >= (void *)t->tid_tab &&
(void *)e->next < (void *)&t->atid_tab[t->natids])
return NULL;
return &e->t3c_tid;
}
/*
* Find the connection corresponding to an active-open TID.
*/
static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
unsigned int tid)
{
union active_open_entry *e;
if (tid < t->atid_base || tid >= t->atid_base + t->natids)
return NULL;
e = atid2entry(t, tid);
if ((void *)e->next >= (void *)t->tid_tab &&
(void *)e->next < (void *)&t->atid_tab[t->natids])
return NULL;
return &e->t3c_tid;
}
int attach_t3cdev(struct t3cdev *dev);
void detach_t3cdev(struct t3cdev *dev);
#endif

View File

@@ -0,0 +1,177 @@
/*
* Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CHIOCTL_H__
#define __CHIOCTL_H__
/*
* Ioctl commands specific to this driver.
*/
enum {
CHELSIO_GETMTUTAB = 1029,
CHELSIO_SETMTUTAB = 1030,
CHELSIO_SET_PM = 1032,
CHELSIO_GET_PM = 1033,
CHELSIO_GET_MEM = 1038,
CHELSIO_LOAD_FW = 1041,
CHELSIO_SET_TRACE_FILTER = 1044,
CHELSIO_SET_QSET_PARAMS = 1045,
CHELSIO_GET_QSET_PARAMS = 1046,
CHELSIO_SET_QSET_NUM = 1047,
CHELSIO_GET_QSET_NUM = 1048,
};
struct ch_reg {
uint32_t cmd;
uint32_t addr;
uint32_t val;
};
struct ch_cntxt {
uint32_t cmd;
uint32_t cntxt_type;
uint32_t cntxt_id;
uint32_t data[4];
};
/* context types */
enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
struct ch_desc {
uint32_t cmd;
uint32_t queue_num;
uint32_t idx;
uint32_t size;
uint8_t data[128];
};
struct ch_mem_range {
uint32_t cmd;
uint32_t mem_id;
uint32_t addr;
uint32_t len;
uint32_t version;
uint8_t buf[0];
};
struct ch_qset_params {
uint32_t cmd;
uint32_t qset_idx;
int32_t txq_size[3];
int32_t rspq_size;
int32_t fl_size[2];
int32_t intr_lat;
int32_t polling;
int32_t lro;
int32_t cong_thres;
int32_t vector;
int32_t qnum;
};
struct ch_pktsched_params {
uint32_t cmd;
uint8_t sched;
uint8_t idx;
uint8_t min;
uint8_t max;
uint8_t binding;
};
#ifndef TCB_SIZE
# define TCB_SIZE 128
#endif
/* TCB size in 32-bit words */
#define TCB_WORDS (TCB_SIZE / 4)
enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
struct ch_mtus {
uint32_t cmd;
uint32_t nmtus;
uint16_t mtus[NMTUS];
};
struct ch_pm {
uint32_t cmd;
uint32_t tx_pg_sz;
uint32_t tx_num_pg;
uint32_t rx_pg_sz;
uint32_t rx_num_pg;
uint32_t pm_total;
};
struct ch_tcam {
uint32_t cmd;
uint32_t tcam_size;
uint32_t nservers;
uint32_t nroutes;
uint32_t nfilters;
};
struct ch_tcb {
uint32_t cmd;
uint32_t tcb_index;
uint32_t tcb_data[TCB_WORDS];
};
struct ch_tcam_word {
uint32_t cmd;
uint32_t addr;
uint32_t buf[3];
};
struct ch_trace {
uint32_t cmd;
uint32_t sip;
uint32_t sip_mask;
uint32_t dip;
uint32_t dip_mask;
uint16_t sport;
uint16_t sport_mask;
uint16_t dport;
uint16_t dport_mask;
uint32_t vlan:12;
uint32_t vlan_mask:12;
uint32_t intf:4;
uint32_t intf_mask:4;
uint8_t proto;
uint8_t proto_mask;
uint8_t invert_match:1;
uint8_t config_tx:1;
uint8_t config_rx:1;
uint8_t trace_tx:1;
uint8_t trace_rx:1;
};
#define SIOCCHIOCTL SIOCDEVPRIVATE
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,209 @@
/*
* Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CXGB3_OFFLOAD_H
#define _CXGB3_OFFLOAD_H
#include <linux/list.h>
#include <linux/skbuff.h>
#include "l2t.h"
#include "t3cdev.h"
#include "t3_cpl.h"
struct adapter;
void cxgb3_offload_init(void);
void cxgb3_adapter_ofld(struct adapter *adapter);
void cxgb3_adapter_unofld(struct adapter *adapter);
int cxgb3_offload_activate(struct adapter *adapter);
void cxgb3_offload_deactivate(struct adapter *adapter);
void cxgb3_set_dummy_ops(struct t3cdev *dev);
struct t3cdev *dev2t3cdev(struct net_device *dev);
/*
* Client registration. Users of T3 driver must register themselves.
* The T3 driver will call the add function of every client for each T3
* adapter activated, passing up the t3cdev ptr. Each client fills out an
* array of callback functions to process CPL messages.
*/
void cxgb3_register_client(struct cxgb3_client *client);
void cxgb3_unregister_client(struct cxgb3_client *client);
void cxgb3_add_clients(struct t3cdev *tdev);
void cxgb3_remove_clients(struct t3cdev *tdev);
void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port);
typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
struct sk_buff *skb, void *ctx);
enum {
OFFLOAD_STATUS_UP,
OFFLOAD_STATUS_DOWN,
OFFLOAD_PORT_DOWN,
OFFLOAD_PORT_UP,
OFFLOAD_DB_FULL,
OFFLOAD_DB_EMPTY,
OFFLOAD_DB_DROP
};
struct cxgb3_client {
char *name;
void (*add) (struct t3cdev *);
void (*remove) (struct t3cdev *);
cxgb3_cpl_handler_func *handlers;
int (*redirect)(void *ctx, struct dst_entry *old,
struct dst_entry *new, struct l2t_entry *l2t);
struct list_head client_list;
void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port);
};
/*
* TID allocation services.
*/
int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx);
int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx);
void *cxgb3_free_atid(struct t3cdev *dev, int atid);
void cxgb3_free_stid(struct t3cdev *dev, int stid);
void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx, unsigned int tid);
void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
struct t3c_tid_entry {
struct cxgb3_client *client;
void *ctx;
};
/* CPL message priority levels */
enum {
CPL_PRIORITY_DATA = 0, /* data messages */
CPL_PRIORITY_SETUP = 1, /* connection setup messages */
CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
CPL_PRIORITY_ACK = 1, /* RX ACK messages */
CPL_PRIORITY_CONTROL = 1 /* offload control messages */
};
/* Flags for return value of CPL message handlers */
enum {
CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */
CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
};
typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
/*
* Returns a pointer to the first byte of the CPL header in an sk_buff that
* contains a CPL message.
*/
static inline void *cplhdr(struct sk_buff *skb)
{
return skb->data;
}
void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
union listen_entry {
struct t3c_tid_entry t3c_tid;
union listen_entry *next;
};
union active_open_entry {
struct t3c_tid_entry t3c_tid;
union active_open_entry *next;
};
/*
* Holds the size, base address, free list start, etc of the TID, server TID,
* and active-open TID tables for a offload device.
* The tables themselves are allocated dynamically.
*/
struct tid_info {
struct t3c_tid_entry *tid_tab;
unsigned int ntids;
atomic_t tids_in_use;
union listen_entry *stid_tab;
unsigned int nstids;
unsigned int stid_base;
union active_open_entry *atid_tab;
unsigned int natids;
unsigned int atid_base;
/*
* The following members are accessed R/W so we put them in their own
* cache lines.
*
* XXX We could combine the atid fields above with the lock here since
* atids are use once (unlike other tids). OTOH the above fields are
* usually in cache due to tid_tab.
*/
spinlock_t atid_lock ____cacheline_aligned_in_smp;
union active_open_entry *afree;
unsigned int atids_in_use;
spinlock_t stid_lock ____cacheline_aligned;
union listen_entry *sfree;
unsigned int stids_in_use;
};
struct t3c_data {
struct list_head list_node;
struct t3cdev *dev;
unsigned int tx_max_chunk; /* max payload for TX_DATA */
unsigned int max_wrs; /* max in-flight WRs per connection */
unsigned int nmtus;
const unsigned short *mtus;
struct tid_info tid_maps;
struct t3c_tid_entry *tid_release_list;
spinlock_t tid_release_lock;
struct work_struct tid_release_task;
struct sk_buff *nofail_skb;
unsigned int release_list_incomplete;
};
/*
* t3cdev -> t3c_data accessor
*/
#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
#endif

View File

@@ -0,0 +1,177 @@
/*
* Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _FIRMWARE_EXPORTS_H_
#define _FIRMWARE_EXPORTS_H_
/* WR OPCODES supported by the firmware.
*/
#define FW_WROPCODE_FORWARD 0x01
#define FW_WROPCODE_BYPASS 0x05
#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
#define FW_WROPCODE_ULPTX_MEM_READ 0x02
#define FW_WROPCODE_ULPTX_PKT 0x04
#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
#define FW_WROPCODE_OFLD_TX_DATA 0x0D
#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
#define FW_WROPCODE_RI_RDMA_INIT 0x10
#define FW_WROPCODE_RI_RDMA_WRITE 0x11
#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
#define FW_WROPCODE_RI_SEND 0x14
#define FW_WROPCODE_RI_TERMINATE 0x15
#define FW_WROPCODE_RI_RDMA_READ 0x16
#define FW_WROPCODE_RI_RECEIVE 0x17
#define FW_WROPCODE_RI_BIND_MW 0x18
#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
#define FW_WROPCODE_RI_LOCAL_INV 0x1A
#define FW_WROPCODE_RI_MODIFY_QP 0x1B
#define FW_WROPCODE_RI_BYPASS 0x1C
#define FW_WROPOCDE_RSVD 0x1E
#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
#define FW_WROPCODE_MNGT 0x1D
#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
/* Maximum size of a WR sent from the host, limited by the SGE.
*
* Note: WR coming from ULP or TP are only limited by CIM.
*/
#define FW_WR_SIZE 128
/* Maximum number of outstanding WRs sent from the host. Value must be
* programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
* offload modules to limit the number of WRs per connection.
*/
#define FW_T3_WR_NUM 16
#define FW_N3_WR_NUM 7
#ifndef N3
# define FW_WR_NUM FW_T3_WR_NUM
#else
# define FW_WR_NUM FW_N3_WR_NUM
#endif
/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
* queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
* start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
*
* Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
* to RESP Queue[i].
*/
#define FW_TUNNEL_NUM 8
#define FW_TUNNEL_SGEEC_START 8
#define FW_TUNNEL_TID_START 65544
/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
* must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
* (or 'uP Token') FW_CTRL_TID_START.
*
* Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
*/
#define FW_CTRL_NUM 8
#define FW_CTRL_SGEEC_START 65528
#define FW_CTRL_TID_START 65536
/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
* queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
*
* Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
* OFFLOAD Queues, as the host is responsible for providing the correct TID in
* every WR.
*
* Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
*/
#define FW_OFLD_NUM 8
#define FW_OFLD_SGEEC_START 0
/*
*
*/
#define FW_RI_NUM 1
#define FW_RI_SGEEC_START 65527
#define FW_RI_TID_START 65552
/*
* The RX_PKT_TID
*/
#define FW_RX_PKT_NUM 1
#define FW_RX_PKT_TID_START 65553
/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
* by the firmware.
*/
#define FW_WRC_NUM \
(65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
/*
* FW type and version.
*/
#define S_FW_VERSION_TYPE 28
#define M_FW_VERSION_TYPE 0xF
#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE)
#define G_FW_VERSION_TYPE(x) \
(((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE)
#define S_FW_VERSION_MAJOR 16
#define M_FW_VERSION_MAJOR 0xFFF
#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR)
#define G_FW_VERSION_MAJOR(x) \
(((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR)
#define S_FW_VERSION_MINOR 8
#define M_FW_VERSION_MINOR 0xFF
#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR)
#define G_FW_VERSION_MINOR(x) \
(((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR)
#define S_FW_VERSION_MICRO 0
#define M_FW_VERSION_MICRO 0xFF
#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO)
#define G_FW_VERSION_MICRO(x) \
(((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
#endif /* _FIRMWARE_EXPORTS_H_ */

View File

@@ -0,0 +1,445 @@
/*
* Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/jhash.h>
#include <linux/slab.h>
#include <net/neighbour.h>
#include "common.h"
#include "t3cdev.h"
#include "cxgb3_defs.h"
#include "l2t.h"
#include "t3_cpl.h"
#include "firmware_exports.h"
#define VLAN_NONE 0xfff
/*
* Module locking notes: There is a RW lock protecting the L2 table as a
* whole plus a spinlock per L2T entry. Entry lookups and allocations happen
* under the protection of the table lock, individual entry changes happen
* while holding that entry's spinlock. The table lock nests outside the
* entry locks. Allocations of new entries take the table lock as writers so
* no other lookups can happen while allocating new entries. Entry updates
* take the table lock as readers so multiple entries can be updated in
* parallel. An L2T entry can be dropped by decrementing its reference count
* and therefore can happen in parallel with entry allocation but no entry
* can change state or increment its ref count during allocation as both of
* these perform lookups.
*/
static inline unsigned int vlan_prio(const struct l2t_entry *e)
{
return e->vlan >> 13;
}
static inline unsigned int arp_hash(u32 key, int ifindex,
const struct l2t_data *d)
{
return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
}
static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
{
neigh_hold(n);
if (e->neigh)
neigh_release(e->neigh);
e->neigh = n;
}
/*
* Set up an L2T entry and send any packets waiting in the arp queue. The
* supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
* entry locked.
*/
static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e)
{
struct cpl_l2t_write_req *req;
struct sk_buff *tmp;
if (!skb) {
skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
}
req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
V_L2T_W_PRIO(vlan_prio(e)));
memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
skb->priority = CPL_PRIORITY_CONTROL;
cxgb3_ofld_send(dev, skb);
skb_queue_walk_safe(&e->arpq, skb, tmp) {
__skb_unlink(skb, &e->arpq);
cxgb3_ofld_send(dev, skb);
}
e->state = L2T_STATE_VALID;
return 0;
}
/*
* Add a packet to the an L2T entry's queue of packets awaiting resolution.
* Must be called with the entry's lock held.
*/
static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
{
__skb_queue_tail(&e->arpq, skb);
}
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e)
{
again:
switch (e->state) {
case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
neigh_event_send(e->neigh, NULL);
spin_lock_bh(&e->lock);
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
case L2T_STATE_VALID: /* fast-path, send the packet on */
return cxgb3_ofld_send(dev, skb);
case L2T_STATE_RESOLVING:
spin_lock_bh(&e->lock);
if (e->state != L2T_STATE_RESOLVING) {
/* ARP already completed */
spin_unlock_bh(&e->lock);
goto again;
}
arpq_enqueue(e, skb);
spin_unlock_bh(&e->lock);
/*
* Only the first packet added to the arpq should kick off
* resolution. However, because the alloc_skb below can fail,
* we allow each packet added to the arpq to retry resolution
* as a way of recovering from transient memory exhaustion.
* A better way would be to use a work request to retry L2T
* entries when there's no memory.
*/
if (!neigh_event_send(e->neigh, NULL)) {
skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
GFP_ATOMIC);
if (!skb)
break;
spin_lock_bh(&e->lock);
if (!skb_queue_empty(&e->arpq))
setup_l2e_send_pending(dev, skb, e);
else /* we lost the race */
__kfree_skb(skb);
spin_unlock_bh(&e->lock);
}
}
return 0;
}
EXPORT_SYMBOL(t3_l2t_send_slow);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
{
again:
switch (e->state) {
case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
neigh_event_send(e->neigh, NULL);
spin_lock_bh(&e->lock);
if (e->state == L2T_STATE_STALE) {
e->state = L2T_STATE_VALID;
}
spin_unlock_bh(&e->lock);
return;
case L2T_STATE_VALID: /* fast-path, send the packet on */
return;
case L2T_STATE_RESOLVING:
spin_lock_bh(&e->lock);
if (e->state != L2T_STATE_RESOLVING) {
/* ARP already completed */
spin_unlock_bh(&e->lock);
goto again;
}
spin_unlock_bh(&e->lock);
/*
* Only the first packet added to the arpq should kick off
* resolution. However, because the alloc_skb below can fail,
* we allow each packet added to the arpq to retry resolution
* as a way of recovering from transient memory exhaustion.
* A better way would be to use a work request to retry L2T
* entries when there's no memory.
*/
neigh_event_send(e->neigh, NULL);
}
}
EXPORT_SYMBOL(t3_l2t_send_event);
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
static struct l2t_entry *alloc_l2e(struct l2t_data *d)
{
struct l2t_entry *end, *e, **p;
if (!atomic_read(&d->nfree))
return NULL;
/* there's definitely a free entry */
for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
if (atomic_read(&e->refcnt) == 0)
goto found;
for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
found:
d->rover = e + 1;
atomic_dec(&d->nfree);
/*
* The entry we found may be an inactive entry that is
* presently in the hash table. We need to remove it.
*/
if (e->state != L2T_STATE_UNUSED) {
int hash = arp_hash(e->addr, e->ifindex, d);
for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
if (*p == e) {
*p = e->next;
break;
}
e->state = L2T_STATE_UNUSED;
}
return e;
}
/*
* Called when an L2T entry has no more users. The entry is left in the hash
* table since it is likely to be reused but we also bump nfree to indicate
* that the entry can be reallocated for a different neighbor. We also drop
* the existing neighbor reference in case the neighbor is going away and is
* waiting on our reference.
*
* Because entries can be reallocated to other neighbors once their ref count
* drops to 0 we need to take the entry's lock to avoid races with a new
* incarnation.
*/
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
{
spin_lock_bh(&e->lock);
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
if (e->neigh) {
neigh_release(e->neigh);
e->neigh = NULL;
}
}
spin_unlock_bh(&e->lock);
atomic_inc(&d->nfree);
}
EXPORT_SYMBOL(t3_l2e_free);
/*
* Update an L2T entry that was previously used for the same next hop as neigh.
* Must be called with softirqs disabled.
*/
static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
{
unsigned int nud_state;
spin_lock(&e->lock); /* avoid race with t3_l2t_free */
if (neigh != e->neigh)
neigh_replace(e, neigh);
nud_state = neigh->nud_state;
if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
!(nud_state & NUD_VALID))
e->state = L2T_STATE_RESOLVING;
else if (nud_state & NUD_CONNECTED)
e->state = L2T_STATE_VALID;
else
e->state = L2T_STATE_STALE;
spin_unlock(&e->lock);
}
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
struct net_device *dev)
{
struct l2t_entry *e;
struct l2t_data *d = L2DATA(cdev);
u32 addr = *(u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
int hash = arp_hash(addr, ifidx, d);
struct port_info *p = netdev_priv(dev);
int smt_idx = p->port_id;
write_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx &&
e->smt_idx == smt_idx) {
l2t_hold(d, e);
if (atomic_read(&e->refcnt) == 1)
reuse_entry(e, neigh);
goto done;
}
/* Need to allocate a new entry */
e = alloc_l2e(d);
if (e) {
spin_lock(&e->lock); /* avoid race with t3_l2t_free */
e->next = d->l2tab[hash].first;
d->l2tab[hash].first = e;
e->state = L2T_STATE_RESOLVING;
e->addr = addr;
e->ifindex = ifidx;
e->smt_idx = smt_idx;
atomic_set(&e->refcnt, 1);
neigh_replace(e, neigh);
if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
e->vlan = vlan_dev_vlan_id(neigh->dev);
else
e->vlan = VLAN_NONE;
spin_unlock(&e->lock);
}
done:
write_unlock_bh(&d->lock);
return e;
}
EXPORT_SYMBOL(t3_l2t_get);
/*
* Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked,
* otherwise the packets is sent to the offload device.
*
* XXX: maybe we should abandon the latter behavior and just require a failure
* handler.
*/
static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
{
struct sk_buff *skb, *tmp;
skb_queue_walk_safe(arpq, skb, tmp) {
struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
__skb_unlink(skb, arpq);
if (cb->arp_failure_handler)
cb->arp_failure_handler(dev, skb);
else
cxgb3_ofld_send(dev, skb);
}
}
/*
* Called when the host's ARP layer makes a change to some entry that is
* loaded into the HW L2 table.
*/
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
{
struct sk_buff_head arpq;
struct l2t_entry *e;
struct l2t_data *d = L2DATA(dev);
u32 addr = *(u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
int hash = arp_hash(addr, ifidx, d);
read_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx) {
spin_lock(&e->lock);
goto found;
}
read_unlock_bh(&d->lock);
return;
found:
__skb_queue_head_init(&arpq);
read_unlock(&d->lock);
if (atomic_read(&e->refcnt)) {
if (neigh != e->neigh)
neigh_replace(e, neigh);
if (e->state == L2T_STATE_RESOLVING) {
if (neigh->nud_state & NUD_FAILED) {
skb_queue_splice_init(&e->arpq, &arpq);
} else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
setup_l2e_send_pending(dev, NULL, e);
} else {
e->state = neigh->nud_state & NUD_CONNECTED ?
L2T_STATE_VALID : L2T_STATE_STALE;
if (memcmp(e->dmac, neigh->ha, 6))
setup_l2e_send_pending(dev, NULL, e);
}
}
spin_unlock_bh(&e->lock);
if (!skb_queue_empty(&arpq))
handle_failed_resolution(dev, &arpq);
}
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
{
struct l2t_data *d;
int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
d = cxgb_alloc_mem(size);
if (!d)
return NULL;
d->nentries = l2t_capacity;
d->rover = &d->l2tab[1]; /* entry 0 is not used */
atomic_set(&d->nfree, l2t_capacity - 1);
rwlock_init(&d->lock);
for (i = 0; i < l2t_capacity; ++i) {
d->l2tab[i].idx = i;
d->l2tab[i].state = L2T_STATE_UNUSED;
__skb_queue_head_init(&d->l2tab[i].arpq);
spin_lock_init(&d->l2tab[i].lock);
atomic_set(&d->l2tab[i].refcnt, 0);
}
return d;
}
void t3_free_l2t(struct l2t_data *d)
{
cxgb_free_mem(d);
}

View File

@@ -0,0 +1,141 @@
/*
* Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CHELSIO_L2T_H
#define _CHELSIO_L2T_H
#include <linux/spinlock.h>
#include "t3cdev.h"
#include <linux/atomic.h>
enum {
L2T_STATE_VALID, /* entry is up to date */
L2T_STATE_STALE, /* entry may be used but needs revalidation */
L2T_STATE_RESOLVING, /* entry needs address resolution */
L2T_STATE_UNUSED /* entry not in use */
};
struct neighbour;
struct sk_buff;
/*
* Each L2T entry plays multiple roles. First of all, it keeps state for the
* corresponding entry of the HW L2 table and maintains a queue of offload
* packets awaiting address resolution. Second, it is a node of a hash table
* chain, where the nodes of the chain are linked together through their next
* pointer. Finally, each node is a bucket of a hash table, pointing to the
* first element in its chain through its first pointer.
*/
struct l2t_entry {
u16 state; /* entry state */
u16 idx; /* entry index */
u32 addr; /* dest IP address */
int ifindex; /* neighbor's net_device's ifindex */
u16 smt_idx; /* SMT index */
u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
struct neighbour *neigh; /* associated neighbour */
struct l2t_entry *first; /* start of hash chain */
struct l2t_entry *next; /* next l2t_entry on chain */
struct sk_buff_head arpq; /* queue of packets awaiting resolution */
spinlock_t lock;
atomic_t refcnt; /* entry reference count */
u8 dmac[6]; /* neighbour's MAC address */
};
struct l2t_data {
unsigned int nentries; /* number of entries */
struct l2t_entry *rover; /* starting point for next allocation */
atomic_t nfree; /* number of free entries */
rwlock_t lock;
struct l2t_entry l2tab[0];
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
struct sk_buff * skb);
/*
* Callback stored in an skb to handle address resolution failure.
*/
struct l2t_skb_cb {
arp_failure_handler_func arp_failure_handler;
};
#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
static inline void set_arp_failure_handler(struct sk_buff *skb,
arp_failure_handler_func hnd)
{
L2T_SKB_CB(skb)->arp_failure_handler = hnd;
}
/*
* Getting to the L2 data from an offload device.
*/
#define L2DATA(dev) ((dev)->l2opt)
#define W_TCB_L2T_IX 0
#define S_TCB_L2T_IX 7
#define M_TCB_L2T_IX 0x7ffULL
#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
struct net_device *dev);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
void t3_free_l2t(struct l2t_data *d);
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e)
{
if (likely(e->state == L2T_STATE_VALID))
return cxgb3_ofld_send(dev, skb);
return t3_l2t_send_slow(dev, skb, e);
}
static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
{
if (atomic_dec_and_test(&e->refcnt))
t3_l2e_free(d, e);
}
static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
{
if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
atomic_dec(&d->nfree);
}
#endif

View File

@@ -0,0 +1,438 @@
/*
* Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "common.h"
#include "regs.h"
enum {
IDT75P52100 = 4,
IDT75N43102 = 5
};
/* DBGI command mode */
enum {
DBGI_MODE_MBUS = 0,
DBGI_MODE_IDT52100 = 5
};
/* IDT 75P52100 commands */
#define IDT_CMD_READ 0
#define IDT_CMD_WRITE 1
#define IDT_CMD_SEARCH 2
#define IDT_CMD_LEARN 3
/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
#define IDT_LAR_ADR0 0x180006
#define IDT_LAR_MODE144 0xffff0000
/* IDT SCR and SSR addresses (low 32 bits) */
#define IDT_SCR_ADR0 0x180000
#define IDT_SSR0_ADR0 0x180002
#define IDT_SSR1_ADR0 0x180004
/* IDT GMR base address (low 32 bits) */
#define IDT_GMR_BASE_ADR0 0x180020
/* IDT data and mask array base addresses (low 32 bits) */
#define IDT_DATARY_BASE_ADR0 0
#define IDT_MSKARY_BASE_ADR0 0x80000
/* IDT 75N43102 commands */
#define IDT4_CMD_SEARCH144 3
#define IDT4_CMD_WRITE 4
#define IDT4_CMD_READ 5
/* IDT 75N43102 SCR address (low 32 bits) */
#define IDT4_SCR_ADR0 0x3
/* IDT 75N43102 GMR base addresses (low 32 bits) */
#define IDT4_GMR_BASE0 0x10
#define IDT4_GMR_BASE1 0x20
#define IDT4_GMR_BASE2 0x30
/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
#define IDT4_DATARY_BASE_ADR0 0x1000000
#define IDT4_MSKARY_BASE_ADR0 0x2000000
#define MAX_WRITE_ATTEMPTS 5
#define MAX_ROUTES 2048
/*
* Issue a command to the TCAM and wait for its completion. The address and
* any data required by the command must have been setup by the caller.
*/
static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
}
static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
u32 v3)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
}
static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
u32 v3)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
}
static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
u32 *v3)
{
*v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
*v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
*v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
}
/*
* Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
* command cmd. The data to be written must have been set up by the caller.
* Returns -1 on failure, 0 on success.
*/
static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
if (mc5_cmd_write(adapter, cmd) == 0)
return 0;
CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
addr_lo);
return -1;
}
static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
u32 data_array_base, u32 write_cmd,
int addr_shift)
{
unsigned int i;
struct adapter *adap = mc5->adapter;
/*
* We need the size of the TCAM data and mask arrays in terms of
* 72-bit entries.
*/
unsigned int size72 = mc5->tcam_size;
unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
if (mc5->mode == MC5_MODE_144_BIT) {
size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
server_base *= 2;
}
/* Clear the data array */
dbgi_wr_data3(adap, 0, 0, 0);
for (i = 0; i < size72; i++)
if (mc5_write(adap, data_array_base + (i << addr_shift),
write_cmd))
return -1;
/* Initialize the mask array. */
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
for (i = 0; i < size72; i++) {
if (i == server_base) /* entering server or routing region */
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
mc5->mode == MC5_MODE_144_BIT ?
0xfffffff9 : 0xfffffffd);
if (mc5_write(adap, mask_array_base + (i << addr_shift),
write_cmd))
return -1;
}
return 0;
}
static int init_idt52100(struct mc5 *mc5)
{
int i;
struct adapter *adap = mc5->adapter;
t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
/*
* Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
* GMRs 8-9 for ACK- and AOPEN searches.
*/
t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
/* Set DBGI command mode for IDT TCAM. */
t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
/* Set up LAR */
dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
goto err;
/* Set up SSRs */
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
goto err;
/* Set up GMRs */
for (i = 0; i < 32; ++i) {
if (i >= 12 && i < 15)
dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
else if (i == 15)
dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
else
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
goto err;
}
/* Set up SCR */
dbgi_wr_data3(adap, 1, 0, 0);
if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
goto err;
return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
err:
return -EIO;
}
static int init_idt43102(struct mc5 *mc5)
{
int i;
struct adapter *adap = mc5->adapter;
t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
V_RDLAT(0xd) | V_SRCHLAT(0x12));
/*
* Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
* for ACK- and AOPEN searches.
*/
t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
IDT4_CMD_SEARCH144 | 0x3800);
t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
/* Set DBGI command mode for IDT TCAM. */
t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
/* Set up GMRs */
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
for (i = 0; i < 7; ++i)
if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
goto err;
for (i = 0; i < 4; ++i)
if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
goto err;
dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
goto err;
dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
goto err;
/* Set up SCR */
dbgi_wr_data3(adap, 0xf0000000, 0, 0);
if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
goto err;
return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
err:
return -EIO;
}
/* Put MC5 in DBGI mode. */
static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
{
t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
}
/* Put MC5 in M-Bus mode. */
static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
{
t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
}
/*
* Initialization that requires the OS and protocol layers to already
* be initialized goes here.
*/
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int nroutes)
{
u32 cfg;
int err;
unsigned int tcam_size = mc5->tcam_size;
struct adapter *adap = mc5->adapter;
if (!tcam_size)
return 0;
if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
return -EINVAL;
/* Reset the TCAM */
cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
CH_ERR(adap, "TCAM reset timed out\n");
return -1;
}
t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
tcam_size - nroutes - nfilters);
t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
tcam_size - nroutes - nfilters - nservers);
mc5->parity_enabled = 1;
/* All the TCAM addresses we access have only the low 32 bits non 0 */
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
mc5_dbgi_mode_enable(mc5);
switch (mc5->part_type) {
case IDT75P52100:
err = init_idt52100(mc5);
break;
case IDT75N43102:
err = init_idt43102(mc5);
break;
default:
CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
err = -EINVAL;
break;
}
mc5_dbgi_mode_disable(mc5);
return err;
}
#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
/*
* MC5 interrupt handler
*/
void t3_mc5_intr_handler(struct mc5 *mc5)
{
struct adapter *adap = mc5->adapter;
u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
if ((cause & F_PARITYERR) && mc5->parity_enabled) {
CH_ALERT(adap, "MC5 parity error\n");
mc5->stats.parity_err++;
}
if (cause & F_REQQPARERR) {
CH_ALERT(adap, "MC5 request queue parity error\n");
mc5->stats.reqq_parity_err++;
}
if (cause & F_DISPQPARERR) {
CH_ALERT(adap, "MC5 dispatch queue parity error\n");
mc5->stats.dispq_parity_err++;
}
if (cause & F_ACTRGNFULL)
mc5->stats.active_rgn_full++;
if (cause & F_NFASRCHFAIL)
mc5->stats.nfa_srch_err++;
if (cause & F_UNKNOWNCMD)
mc5->stats.unknown_cmd++;
if (cause & F_DELACTEMPTY)
mc5->stats.del_act_empty++;
if (cause & MC5_INT_FATAL)
t3_fatal_err(adap);
t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
}
void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
{
#define K * 1024
static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
64 K, 128 K, 256 K, 32 K
};
#undef K
u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
mc5->adapter = adapter;
mc5->mode = (unsigned char)mode;
mc5->part_type = (unsigned char)G_TMTYPE(cfg);
if (cfg & F_TMTYPEHI)
mc5->part_type |= 4;
mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
if (mode == MC5_MODE_144_BIT)
mc5->tcam_size /= 2;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,255 @@
/*
* This file is automatically generated --- any changes will be lost.
*/
#ifndef _SGE_DEFS_H
#define _SGE_DEFS_H
#define S_EC_CREDITS 0
#define M_EC_CREDITS 0x7FFF
#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
#define S_EC_GTS 15
#define V_EC_GTS(x) ((x) << S_EC_GTS)
#define F_EC_GTS V_EC_GTS(1U)
#define S_EC_INDEX 16
#define M_EC_INDEX 0xFFFF
#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
#define S_EC_SIZE 0
#define M_EC_SIZE 0xFFFF
#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
#define S_EC_BASE_LO 16
#define M_EC_BASE_LO 0xFFFF
#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
#define S_EC_BASE_HI 0
#define M_EC_BASE_HI 0xF
#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
#define S_EC_RESPQ 4
#define M_EC_RESPQ 0x7
#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
#define S_EC_TYPE 7
#define M_EC_TYPE 0x7
#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
#define S_EC_GEN 10
#define V_EC_GEN(x) ((x) << S_EC_GEN)
#define F_EC_GEN V_EC_GEN(1U)
#define S_EC_UP_TOKEN 11
#define M_EC_UP_TOKEN 0xFFFFF
#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
#define S_EC_VALID 31
#define V_EC_VALID(x) ((x) << S_EC_VALID)
#define F_EC_VALID V_EC_VALID(1U)
#define S_RQ_MSI_VEC 20
#define M_RQ_MSI_VEC 0x3F
#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
#define S_RQ_INTR_EN 26
#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
#define S_RQ_GEN 28
#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
#define F_RQ_GEN V_RQ_GEN(1U)
#define S_CQ_INDEX 0
#define M_CQ_INDEX 0xFFFF
#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
#define S_CQ_SIZE 16
#define M_CQ_SIZE 0xFFFF
#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
#define S_CQ_BASE_HI 0
#define M_CQ_BASE_HI 0xFFFFF
#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
#define S_CQ_RSPQ 20
#define M_CQ_RSPQ 0x3F
#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
#define S_CQ_ASYNC_NOTIF 26
#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
#define S_CQ_ARMED 27
#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
#define F_CQ_ARMED V_CQ_ARMED(1U)
#define S_CQ_ASYNC_NOTIF_SOL 28
#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
#define S_CQ_GEN 29
#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
#define F_CQ_GEN V_CQ_GEN(1U)
#define S_CQ_ERR 30
#define V_CQ_ERR(x) ((x) << S_CQ_ERR)
#define F_CQ_ERR V_CQ_ERR(1U)
#define S_CQ_OVERFLOW_MODE 31
#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
#define S_CQ_CREDITS 0
#define M_CQ_CREDITS 0xFFFF
#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
#define S_CQ_CREDIT_THRES 16
#define M_CQ_CREDIT_THRES 0x1FFF
#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
#define S_FL_BASE_HI 0
#define M_FL_BASE_HI 0xFFFFF
#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
#define S_FL_INDEX_LO 20
#define M_FL_INDEX_LO 0xFFF
#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
#define S_FL_INDEX_HI 0
#define M_FL_INDEX_HI 0xF
#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
#define S_FL_SIZE 4
#define M_FL_SIZE 0xFFFF
#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
#define S_FL_GEN 20
#define V_FL_GEN(x) ((x) << S_FL_GEN)
#define F_FL_GEN V_FL_GEN(1U)
#define S_FL_ENTRY_SIZE_LO 21
#define M_FL_ENTRY_SIZE_LO 0x7FF
#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
#define S_FL_ENTRY_SIZE_HI 0
#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
#define S_FL_CONG_THRES 21
#define M_FL_CONG_THRES 0x3FF
#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
#define S_FL_GTS 31
#define V_FL_GTS(x) ((x) << S_FL_GTS)
#define F_FL_GTS V_FL_GTS(1U)
#define S_FLD_GEN1 31
#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
#define F_FLD_GEN1 V_FLD_GEN1(1U)
#define S_FLD_GEN2 0
#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
#define F_FLD_GEN2 V_FLD_GEN2(1U)
#define S_RSPD_TXQ1_CR 0
#define M_RSPD_TXQ1_CR 0x7F
#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
#define S_RSPD_TXQ1_GTS 7
#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
#define S_RSPD_TXQ2_CR 8
#define M_RSPD_TXQ2_CR 0x7F
#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
#define S_RSPD_TXQ2_GTS 15
#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
#define S_RSPD_TXQ0_CR 16
#define M_RSPD_TXQ0_CR 0x7F
#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
#define S_RSPD_TXQ0_GTS 23
#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
#define S_RSPD_EOP 24
#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
#define F_RSPD_EOP V_RSPD_EOP(1U)
#define S_RSPD_SOP 25
#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
#define F_RSPD_SOP V_RSPD_SOP(1U)
#define S_RSPD_ASYNC_NOTIF 26
#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
#define S_RSPD_FL0_GTS 27
#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
#define S_RSPD_FL1_GTS 28
#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
#define S_RSPD_IMM_DATA_VALID 29
#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
#define S_RSPD_OFFLOAD 30
#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
#define S_RSPD_GEN1 31
#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
#define S_RSPD_LEN 0
#define M_RSPD_LEN 0x7FFFFFFF
#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
#define S_RSPD_FLQ 31
#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
#define F_RSPD_FLQ V_RSPD_FLQ(1U)
#define S_RSPD_GEN2 0
#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
#define S_RSPD_INR_VEC 1
#define M_RSPD_INR_VEC 0x7F
#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
#endif /* _SGE_DEFS_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,70 @@
/*
* Copyright (C) 2006-2008 Chelsio Communications. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _T3CDEV_H_
#define _T3CDEV_H_
#include <linux/list.h>
#include <linux/atomic.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <net/neighbour.h>
#define T3CNAMSIZ 16
struct cxgb3_client;
enum t3ctype {
T3A = 0,
T3B,
T3C,
};
struct t3cdev {
char name[T3CNAMSIZ]; /* T3C device name */
enum t3ctype type;
struct list_head ofld_dev_list; /* for list linking */
struct net_device *lldev; /* LL dev associated with T3C messages */
struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
int (*send)(struct t3cdev *dev, struct sk_buff *skb);
int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
void *priv; /* driver private data */
void *l2opt; /* optional layer 2 data */
void *l3opt; /* optional layer 3 data */
void *l4opt; /* optional layer 4 data */
void *ulp; /* ulp stuff */
void *ulp_iscsi; /* ulp iscsi */
};
#endif /* _T3CDEV_H_ */

View File

@@ -0,0 +1,44 @@
/*
* Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
#ifndef __CHELSIO_VERSION_H
#define __CHELSIO_VERSION_H
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
/* Driver version */
#define DRV_VERSION "1.1.4-ko"
/* Firmware version */
#define FW_VERSION_MAJOR 7
#define FW_VERSION_MINOR 10
#define FW_VERSION_MICRO 0
#endif /* __CHELSIO_VERSION_H */

View File

@@ -0,0 +1,416 @@
/*
* Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "common.h"
/* VSC8211 PHY specific registers. */
enum {
VSC8211_SIGDET_CTRL = 19,
VSC8211_EXT_CTRL = 23,
VSC8211_INTR_ENABLE = 25,
VSC8211_INTR_STATUS = 26,
VSC8211_LED_CTRL = 27,
VSC8211_AUX_CTRL_STAT = 28,
VSC8211_EXT_PAGE_AXS = 31,
};
enum {
VSC_INTR_RX_ERR = 1 << 0,
VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
VSC_INTR_CABLE = 1 << 2, /* cable impairment */
VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
VSC_INTR_DPLX_CHG = 1 << 12, /* duplex change */
VSC_INTR_LINK_CHG = 1 << 13, /* link change */
VSC_INTR_SPD_CHG = 1 << 14, /* speed change */
VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
};
enum {
VSC_CTRL_CLAUSE37_VIEW = 1 << 4, /* Switch to Clause 37 view */
VSC_CTRL_MEDIA_MODE_HI = 0xf000 /* High part of media mode select */
};
#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
VSC_INTR_DPLX_CHG | VSC_INTR_SPD_CHG | \
VSC_INTR_NEG_DONE)
#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
VSC_INTR_ENABLE)
/* PHY specific auxiliary control & status register fields */
#define S_ACSR_ACTIPHY_TMR 0
#define M_ACSR_ACTIPHY_TMR 0x3
#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
#define S_ACSR_SPEED 3
#define M_ACSR_SPEED 0x3
#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
#define S_ACSR_DUPLEX 5
#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
#define S_ACSR_ACTIPHY 6
#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
/*
* Reset the PHY. This PHY completes reset immediately so we never wait.
*/
static int vsc8211_reset(struct cphy *cphy, int wait)
{
return t3_phy_reset(cphy, MDIO_DEVAD_NONE, 0);
}
static int vsc8211_intr_enable(struct cphy *cphy)
{
return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE,
INTR_MASK);
}
static int vsc8211_intr_disable(struct cphy *cphy)
{
return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE, 0);
}
static int vsc8211_intr_clear(struct cphy *cphy)
{
u32 val;
/* Clear PHY interrupts by reading the register. */
return t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &val);
}
static int vsc8211_autoneg_enable(struct cphy *cphy)
{
return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR,
BMCR_PDOWN | BMCR_ISOLATE,
BMCR_ANENABLE | BMCR_ANRESTART);
}
static int vsc8211_autoneg_restart(struct cphy *cphy)
{
return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR,
BMCR_PDOWN | BMCR_ISOLATE,
BMCR_ANRESTART);
}
static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
int *speed, int *duplex, int *fc)
{
unsigned int bmcr, status, lpa, adv;
int err, sp = -1, dplx = -1, pause = 0;
err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr);
if (!err)
err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status);
if (err)
return err;
if (link_ok) {
/*
* BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
* once more to get the current link state.
*/
if (!(status & BMSR_LSTATUS))
err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR,
&status);
if (err)
return err;
*link_ok = (status & BMSR_LSTATUS) != 0;
}
if (!(bmcr & BMCR_ANENABLE)) {
dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
if (bmcr & BMCR_SPEED1000)
sp = SPEED_1000;
else if (bmcr & BMCR_SPEED100)
sp = SPEED_100;
else
sp = SPEED_10;
} else if (status & BMSR_ANEGCOMPLETE) {
err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_AUX_CTRL_STAT,
&status);
if (err)
return err;
dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
sp = G_ACSR_SPEED(status);
if (sp == 0)
sp = SPEED_10;
else if (sp == 1)
sp = SPEED_100;
else
sp = SPEED_1000;
if (fc && dplx == DUPLEX_FULL) {
err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA,
&lpa);
if (!err)
err = t3_mdio_read(cphy, MDIO_DEVAD_NONE,
MII_ADVERTISE, &adv);
if (err)
return err;
if (lpa & adv & ADVERTISE_PAUSE_CAP)
pause = PAUSE_RX | PAUSE_TX;
else if ((lpa & ADVERTISE_PAUSE_CAP) &&
(lpa & ADVERTISE_PAUSE_ASYM) &&
(adv & ADVERTISE_PAUSE_ASYM))
pause = PAUSE_TX;
else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
(adv & ADVERTISE_PAUSE_CAP))
pause = PAUSE_RX;
}
}
if (speed)
*speed = sp;
if (duplex)
*duplex = dplx;
if (fc)
*fc = pause;
return 0;
}
static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok,
int *speed, int *duplex, int *fc)
{
unsigned int bmcr, status, lpa, adv;
int err, sp = -1, dplx = -1, pause = 0;
err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr);
if (!err)
err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status);
if (err)
return err;
if (link_ok) {
/*
* BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
* once more to get the current link state.
*/
if (!(status & BMSR_LSTATUS))
err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR,
&status);
if (err)
return err;
*link_ok = (status & BMSR_LSTATUS) != 0;
}
if (!(bmcr & BMCR_ANENABLE)) {
dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
if (bmcr & BMCR_SPEED1000)
sp = SPEED_1000;
else if (bmcr & BMCR_SPEED100)
sp = SPEED_100;
else
sp = SPEED_10;
} else if (status & BMSR_ANEGCOMPLETE) {
err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA, &lpa);
if (!err)
err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_ADVERTISE,
&adv);
if (err)
return err;
if (adv & lpa & ADVERTISE_1000XFULL) {
dplx = DUPLEX_FULL;
sp = SPEED_1000;
} else if (adv & lpa & ADVERTISE_1000XHALF) {
dplx = DUPLEX_HALF;
sp = SPEED_1000;
}
if (fc && dplx == DUPLEX_FULL) {
if (lpa & adv & ADVERTISE_1000XPAUSE)
pause = PAUSE_RX | PAUSE_TX;
else if ((lpa & ADVERTISE_1000XPAUSE) &&
(adv & lpa & ADVERTISE_1000XPSE_ASYM))
pause = PAUSE_TX;
else if ((lpa & ADVERTISE_1000XPSE_ASYM) &&
(adv & ADVERTISE_1000XPAUSE))
pause = PAUSE_RX;
}
}
if (speed)
*speed = sp;
if (duplex)
*duplex = dplx;
if (fc)
*fc = pause;
return 0;
}
#ifdef UNUSED
/*
* Enable/disable auto MDI/MDI-X in forced link speed mode.
*/
static int vsc8211_set_automdi(struct cphy *phy, int enable)
{
int err;
err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0x52b5);
if (err)
return err;
err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 18, 0x12);
if (err)
return err;
err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 17, enable ? 0x2803 : 0x3003);
if (err)
return err;
err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 16, 0x87fa);
if (err)
return err;
err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0);
if (err)
return err;
return 0;
}
int vsc8211_set_speed_duplex(struct cphy *phy, int speed, int duplex)
{
int err;
err = t3_set_phy_speed_duplex(phy, speed, duplex);
if (!err)
err = vsc8211_set_automdi(phy, 1);
return err;
}
#endif /* UNUSED */
static int vsc8211_power_down(struct cphy *cphy, int enable)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
enable ? BMCR_PDOWN : 0);
}
static int vsc8211_intr_handler(struct cphy *cphy)
{
unsigned int cause;
int err, cphy_cause = 0;
err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &cause);
if (err)
return err;
cause &= INTR_MASK;
if (cause & CFG_CHG_INTR_MASK)
cphy_cause |= cphy_cause_link_change;
if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
cphy_cause |= cphy_cause_fifo_error;
return cphy_cause;
}
static struct cphy_ops vsc8211_ops = {
.reset = vsc8211_reset,
.intr_enable = vsc8211_intr_enable,
.intr_disable = vsc8211_intr_disable,
.intr_clear = vsc8211_intr_clear,
.intr_handler = vsc8211_intr_handler,
.autoneg_enable = vsc8211_autoneg_enable,
.autoneg_restart = vsc8211_autoneg_restart,
.advertise = t3_phy_advertise,
.set_speed_duplex = t3_set_phy_speed_duplex,
.get_link_status = vsc8211_get_link_status,
.power_down = vsc8211_power_down,
};
static struct cphy_ops vsc8211_fiber_ops = {
.reset = vsc8211_reset,
.intr_enable = vsc8211_intr_enable,
.intr_disable = vsc8211_intr_disable,
.intr_clear = vsc8211_intr_clear,
.intr_handler = vsc8211_intr_handler,
.autoneg_enable = vsc8211_autoneg_enable,
.autoneg_restart = vsc8211_autoneg_restart,
.advertise = t3_phy_advertise_fiber,
.set_speed_duplex = t3_set_phy_speed_duplex,
.get_link_status = vsc8211_get_link_status_fiber,
.power_down = vsc8211_power_down,
};
int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
int err;
unsigned int val;
cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops,
SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII |
SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T");
msleep(20); /* PHY needs ~10ms to start responding to MDIO */
err = t3_mdio_read(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL, &val);
if (err)
return err;
if (val & VSC_CTRL_MEDIA_MODE_HI) {
/* copper interface, just need to configure the LEDs */
return t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_LED_CTRL,
0x100);
}
phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
SUPPORTED_MII | SUPPORTED_FIBRE | SUPPORTED_IRQ;
phy->desc = "1000BASE-X";
phy->ops = &vsc8211_fiber_ops;
err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 1);
if (err)
return err;
err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_SIGDET_CTRL, 1);
if (err)
return err;
err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0);
if (err)
return err;
err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL,
val | VSC_CTRL_CLAUSE37_VIEW);
if (err)
return err;
err = vsc8211_reset(phy, 0);
if (err)
return err;
udelay(5); /* delay after reset before next SMI */
return 0;
}

View File

@@ -0,0 +1,657 @@
/*
* Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "common.h"
#include "regs.h"
/*
* # of exact address filters. The first one is used for the station address,
* the rest are available for multicast addresses.
*/
#define EXACT_ADDR_FILTERS 8
static inline int macidx(const struct cmac *mac)
{
return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
}
static void xaui_serdes_reset(struct cmac *mac)
{
static const unsigned int clear[] = {
F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
};
int i;
struct adapter *adap = mac->adapter;
u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
F_RESETPLL23 | F_RESETPLL01);
t3_read_reg(adap, ctrl);
udelay(15);
for (i = 0; i < ARRAY_SIZE(clear); i++) {
t3_set_reg_field(adap, ctrl, clear[i], 0);
udelay(15);
}
}
void t3b_pcs_reset(struct cmac *mac)
{
t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
F_PCS_RESET_, 0);
udelay(20);
t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
F_PCS_RESET_);
}
int t3_mac_reset(struct cmac *mac)
{
static const struct addr_val_pair mac_reset_avp[] = {
{A_XGM_TX_CTRL, 0},
{A_XGM_RX_CTRL, 0},
{A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
{A_XGM_RX_HASH_LOW, 0},
{A_XGM_RX_HASH_HIGH, 0},
{A_XGM_RX_EXACT_MATCH_LOW_1, 0},
{A_XGM_RX_EXACT_MATCH_LOW_2, 0},
{A_XGM_RX_EXACT_MATCH_LOW_3, 0},
{A_XGM_RX_EXACT_MATCH_LOW_4, 0},
{A_XGM_RX_EXACT_MATCH_LOW_5, 0},
{A_XGM_RX_EXACT_MATCH_LOW_6, 0},
{A_XGM_RX_EXACT_MATCH_LOW_7, 0},
{A_XGM_RX_EXACT_MATCH_LOW_8, 0},
{A_XGM_STAT_CTRL, F_CLRSTATS}
};
u32 val;
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
F_RXSTRFRWRD | F_DISERRFRAMES,
uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX);
if (uses_xaui(adap)) {
if (adap->params.rev == 0) {
t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
F_RXENABLE | F_TXENABLE);
if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
F_CMULOCK, 1, 5, 2)) {
CH_ERR(adap,
"MAC %d XAUI SERDES CMU lock failed\n",
macidx(mac));
return -1;
}
t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
F_SERDESRESET_);
} else
xaui_serdes_reset(mac);
}
t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
if (is_10G(adap))
val |= F_PCS_RESET_;
else if (uses_xaui(adap))
val |= F_PCS_RESET_ | F_XG2G_RESET_;
else
val |= F_RGMII_RESET_ | F_XG2G_RESET_;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
if ((val & F_PCS_RESET_) && adap->params.rev) {
msleep(1);
t3b_pcs_reset(mac);
}
memset(&mac->stats, 0, sizeof(mac->stats));
return 0;
}
static int t3b2_mac_reset(struct cmac *mac)
{
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset, store;
int idx = macidx(mac);
u32 val;
if (!macidx(mac))
t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
else
t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
/* Stop NIC traffic to reduce the number of TXTOGGLES */
t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0);
/* Ensure TX drains */
t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0);
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
/* Store A_TP_TX_DROP_CFG_CH0 */
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx);
msleep(10);
/* Change DROP_CFG to 0xc0000011 */
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011);
/* Check for xgm Rx fifo empty */
/* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */
if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
0x80000000, 1, 1000, 2)) {
CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
macidx(mac));
return -1;
}
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0);
t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
val = F_MAC_RESET_;
if (is_10G(adap))
val |= F_PCS_RESET_;
else if (uses_xaui(adap))
val |= F_PCS_RESET_ | F_XG2G_RESET_;
else
val |= F_RGMII_RESET_ | F_XG2G_RESET_;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
if ((val & F_PCS_RESET_) && adap->params.rev) {
msleep(1);
t3b_pcs_reset(mac);
}
t3_write_reg(adap, A_XGM_RX_CFG + oft,
F_DISPAUSEFRAMES | F_EN1536BFRAMES |
F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
/* Restore the DROP_CFG */
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
t3_write_reg(adap, A_TP_PIO_DATA, store);
if (!idx)
t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
else
t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
/* re-enable nic traffic */
t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
/* Set: re-enable NIC traffic */
t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
return 0;
}
/*
* Set the exact match register 'idx' to recognize the given Ethernet address.
*/
static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
{
u32 addr_lo, addr_hi;
unsigned int oft = mac->offset + idx * 8;
addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
addr_hi = (addr[5] << 8) | addr[4];
t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
}
/* Set one of the station's unicast MAC addresses. */
int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
{
if (idx >= mac->nucast)
return -EINVAL;
set_addr_filter(mac, idx, addr);
return 0;
}
/*
* Specify the number of exact address filters that should be reserved for
* unicast addresses. Caller should reload the unicast and multicast addresses
* after calling this.
*/
int t3_mac_set_num_ucast(struct cmac *mac, int n)
{
if (n > EXACT_ADDR_FILTERS)
return -EINVAL;
mac->nucast = n;
return 0;
}
void t3_mac_disable_exact_filters(struct cmac *mac)
{
unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
u32 v = t3_read_reg(mac->adapter, reg);
t3_write_reg(mac->adapter, reg, v);
}
t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
}
void t3_mac_enable_exact_filters(struct cmac *mac)
{
unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
u32 v = t3_read_reg(mac->adapter, reg);
t3_write_reg(mac->adapter, reg, v);
}
t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
}
/* Calculate the RX hash filter index of an Ethernet address */
static int hash_hw_addr(const u8 * addr)
{
int hash = 0, octet, bit, i = 0, c;
for (octet = 0; octet < 6; ++octet)
for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
hash ^= (c & 1) << i;
if (++i == 6)
i = 0;
}
return hash;
}
int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
{
u32 val, hash_lo, hash_hi;
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
if (dev->flags & IFF_PROMISC)
val |= F_COPYALLFRAMES;
t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
if (dev->flags & IFF_ALLMULTI)
hash_lo = hash_hi = 0xffffffff;
else {
struct netdev_hw_addr *ha;
int exact_addr_idx = mac->nucast;
hash_lo = hash_hi = 0;
netdev_for_each_mc_addr(ha, dev)
if (exact_addr_idx < EXACT_ADDR_FILTERS)
set_addr_filter(mac, exact_addr_idx++,
ha->addr);
else {
int hash = hash_hw_addr(ha->addr);
if (hash < 32)
hash_lo |= (1 << hash);
else
hash_hi |= (1 << (hash - 32));
}
}
t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
return 0;
}
static int rx_fifo_hwm(int mtu)
{
int hwm;
hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
return min(hwm, MAC_RXFIFO_SIZE - 8192);
}
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
{
int hwm, lwm, divisor;
int ipg;
unsigned int thres, v, reg;
struct adapter *adap = mac->adapter;
/*
* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
* packet size register includes header, but not FCS.
*/
mtu += 14;
if (mtu > 1536)
mtu += 4;
if (mtu > MAX_FRAME_SIZE - 4)
return -EINVAL;
t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
if (adap->params.rev >= T3_REV_B2 &&
(t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
t3_mac_disable_exact_filters(mac);
v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
reg = adap->params.rev == T3_REV_B2 ?
A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG;
/* drain RX FIFO */
if (t3_wait_op_done(adap, reg + mac->offset,
F_RXFIFO_EMPTY, 1, 20, 5)) {
t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
t3_mac_enable_exact_filters(mac);
return -EIO;
}
t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
V_RXMAXPKTSIZE(mtu));
t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
t3_mac_enable_exact_filters(mac);
} else
t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
V_RXMAXPKTSIZE(mtu));
/*
* Adjust the PAUSE frame watermarks. We always set the LWM, and the
* HWM only if flow-control is enabled.
*/
hwm = rx_fifo_hwm(mtu);
lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
v |= V_RXFIFOPAUSELWM(lwm / 8);
if (G_RXFIFOPAUSEHWM(v))
v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
V_RXFIFOPAUSEHWM(hwm / 8);
t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
/* Adjust the TX FIFO threshold based on the MTU */
thres = (adap->params.vpd.cclk * 1000) / 15625;
thres = (thres * mtu) / 1000;
if (is_10G(adap))
thres /= 10;
thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
thres = max(thres, 8U); /* need at least 8 */
ipg = (adap->params.rev == T3_REV_C) ? 0 : 1;
t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
V_TXFIFOTHRESH(thres) | V_TXIPG(ipg));
if (adap->params.rev > 0) {
divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
(hwm - lwm) * 4 / divisor);
}
t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
MAC_RXFIFO_SIZE * 4 * 8 / 512);
return 0;
}
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
{
u32 val;
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
if (duplex >= 0 && duplex != DUPLEX_FULL)
return -EINVAL;
if (speed >= 0) {
if (speed == SPEED_10)
val = V_PORTSPEED(0);
else if (speed == SPEED_100)
val = V_PORTSPEED(1);
else if (speed == SPEED_1000)
val = V_PORTSPEED(2);
else if (speed == SPEED_10000)
val = V_PORTSPEED(3);
else
return -EINVAL;
t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
V_PORTSPEED(M_PORTSPEED), val);
}
val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
if (fc & PAUSE_TX) {
u32 rx_max_pkt_size =
G_RXMAXPKTSIZE(t3_read_reg(adap,
A_XGM_RX_MAX_PKT_SIZE + oft));
val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(rx_max_pkt_size) / 8);
}
t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
(fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
return 0;
}
int t3_mac_enable(struct cmac *mac, int which)
{
int idx = macidx(mac);
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
struct mac_stats *s = &mac->stats;
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
t3_write_reg(adap, A_TP_PIO_DATA,
adap->params.rev == T3_REV_C ?
0xc4ffff01 : 0xc0ede401);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx,
adap->params.rev == T3_REV_C ? 0 : 1 << idx);
t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
mac->tx_mcnt = s->tx_frames;
mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
A_TP_PIO_DATA)));
mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_TX_SPI4_SOP_EOP_CNT +
oft)));
mac->rx_mcnt = s->rx_frames;
mac->rx_pause = s->rx_pause;
mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_RX_SPI4_SOP_EOP_CNT +
oft)));
mac->rx_ocnt = s->rx_fifo_ovfl;
mac->txen = F_TXEN;
mac->toggle_cnt = 0;
}
if (which & MAC_DIRECTION_RX)
t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
return 0;
}
int t3_mac_disable(struct cmac *mac, int which)
{
struct adapter *adap = mac->adapter;
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
mac->txen = 0;
}
if (which & MAC_DIRECTION_RX) {
int val = F_MAC_RESET_;
t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
F_PCS_RESET_, 0);
msleep(100);
t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
if (is_10G(adap))
val |= F_PCS_RESET_;
else if (uses_xaui(adap))
val |= F_PCS_RESET_ | F_XG2G_RESET_;
else
val |= F_RGMII_RESET_ | F_XG2G_RESET_;
t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
}
return 0;
}
int t3b2_mac_watchdog_task(struct cmac *mac)
{
struct adapter *adap = mac->adapter;
struct mac_stats *s = &mac->stats;
unsigned int tx_tcnt, tx_xcnt;
u64 tx_mcnt = s->tx_frames;
int status;
status = 0;
tx_xcnt = 1; /* By default tx_xcnt is making progress */
tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */
if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_TX_SPI4_SOP_EOP_CNT +
mac->offset)));
if (tx_xcnt == 0) {
t3_write_reg(adap, A_TP_PIO_ADDR,
A_TP_TX_DROP_CNT_CH0 + macidx(mac));
tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
A_TP_PIO_DATA)));
} else {
goto out;
}
} else {
mac->toggle_cnt = 0;
goto out;
}
if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
if (mac->toggle_cnt > 4) {
status = 2;
goto out;
} else {
status = 1;
goto out;
}
} else {
mac->toggle_cnt = 0;
goto out;
}
out:
mac->tx_tcnt = tx_tcnt;
mac->tx_xcnt = tx_xcnt;
mac->tx_mcnt = s->tx_frames;
mac->rx_pause = s->rx_pause;
if (status == 1) {
t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
mac->toggle_cnt++;
} else if (status == 2) {
t3b2_mac_reset(mac);
mac->toggle_cnt = 0;
}
return status;
}
/*
* This function is called periodically to accumulate the current values of the
* RMON counters into the port statistics. Since the packet counters are only
* 32 bits they can overflow in ~286 secs at 10G, so the function should be
* called more frequently than that. The byte counters are 45-bit wide, they
* would overflow in ~7.8 hours.
*/
const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
{
#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
#define RMON_UPDATE(mac, name, reg) \
(mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
(mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
u32 v, lo;
RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
if (mac->adapter->params.rev == T3_REV_B2)
v &= 0x7fffffff;
mac->stats.rx_too_long += v;
RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
RMON_UPDATE(mac, tx_pause, TX_PAUSE);
/* This counts error frames in general (bad FCS, underrun, etc). */
RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
/* The next stat isn't clear-on-read. */
t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
lo = (u32) mac->stats.rx_cong_drops;
mac->stats.rx_cong_drops += (u64) (v - lo);
return &mac->stats;
}

View File

@@ -0,0 +1,7 @@
#
# Chelsio T4 driver
#
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o

View File

@@ -0,0 +1,722 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CXGB4_H__
#define __CXGB4_H__
#include <linux/bitops.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <asm/io.h>
#include "cxgb4_uld.h"
#include "t4_hw.h"
#define FW_VERSION_MAJOR 1
#define FW_VERSION_MINOR 1
#define FW_VERSION_MICRO 0
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
};
enum {
MEM_EDC0,
MEM_EDC1,
MEM_MC
};
enum dev_master {
MASTER_CANT,
MASTER_MAY,
MASTER_MUST
};
enum dev_state {
DEV_STATE_UNINIT,
DEV_STATE_INIT,
DEV_STATE_ERR
};
enum {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
struct port_stats {
u64 tx_octets; /* total # of octets in good frames */
u64 tx_frames; /* all good frames */
u64 tx_bcast_frames; /* all broadcast frames */
u64 tx_mcast_frames; /* all multicast frames */
u64 tx_ucast_frames; /* all unicast frames */
u64 tx_error_frames; /* all error frames */
u64 tx_frames_64; /* # of Tx frames in a particular range */
u64 tx_frames_65_127;
u64 tx_frames_128_255;
u64 tx_frames_256_511;
u64 tx_frames_512_1023;
u64 tx_frames_1024_1518;
u64 tx_frames_1519_max;
u64 tx_drop; /* # of dropped Tx frames */
u64 tx_pause; /* # of transmitted pause frames */
u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
u64 rx_octets; /* total # of octets in good frames */
u64 rx_frames; /* all good frames */
u64 rx_bcast_frames; /* all broadcast frames */
u64 rx_mcast_frames; /* all multicast frames */
u64 rx_ucast_frames; /* all unicast frames */
u64 rx_too_long; /* # of frames exceeding MTU */
u64 rx_jabber; /* # of jabber frames */
u64 rx_fcs_err; /* # of received frames with bad FCS */
u64 rx_len_err; /* # of received frames with length error */
u64 rx_symbol_err; /* symbol errors */
u64 rx_runt; /* # of short frames */
u64 rx_frames_64; /* # of Rx frames in a particular range */
u64 rx_frames_65_127;
u64 rx_frames_128_255;
u64 rx_frames_256_511;
u64 rx_frames_512_1023;
u64 rx_frames_1024_1518;
u64 rx_frames_1519_max;
u64 rx_pause; /* # of received pause frames */
u64 rx_ppp0; /* # of received PPP prio 0 frames */
u64 rx_ppp1; /* # of received PPP prio 1 frames */
u64 rx_ppp2; /* # of received PPP prio 2 frames */
u64 rx_ppp3; /* # of received PPP prio 3 frames */
u64 rx_ppp4; /* # of received PPP prio 4 frames */
u64 rx_ppp5; /* # of received PPP prio 5 frames */
u64 rx_ppp6; /* # of received PPP prio 6 frames */
u64 rx_ppp7; /* # of received PPP prio 7 frames */
u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
u64 rx_trunc0; /* buffer-group 0 truncated packets */
u64 rx_trunc1; /* buffer-group 1 truncated packets */
u64 rx_trunc2; /* buffer-group 2 truncated packets */
u64 rx_trunc3; /* buffer-group 3 truncated packets */
};
struct lb_port_stats {
u64 octets;
u64 frames;
u64 bcast_frames;
u64 mcast_frames;
u64 ucast_frames;
u64 error_frames;
u64 frames_64;
u64 frames_65_127;
u64 frames_128_255;
u64 frames_256_511;
u64 frames_512_1023;
u64 frames_1024_1518;
u64 frames_1519_max;
u64 drop;
u64 ovflow0;
u64 ovflow1;
u64 ovflow2;
u64 ovflow3;
u64 trunc0;
u64 trunc1;
u64 trunc2;
u64 trunc3;
};
struct tp_tcp_stats {
u32 tcpOutRsts;
u64 tcpInSegs;
u64 tcpOutSegs;
u64 tcpRetransSegs;
};
struct tp_err_stats {
u32 macInErrs[4];
u32 hdrInErrs[4];
u32 tcpInErrs[4];
u32 tnlCongDrops[4];
u32 ofldChanDrops[4];
u32 tnlTxDrops[4];
u32 ofldVlanDrops[4];
u32 tcp6InErrs[4];
u32 ofldNoNeigh;
u32 ofldCongDefer;
};
struct tp_params {
unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
};
struct vpd_params {
unsigned int cclk;
u8 ec[EC_LEN + 1];
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
};
struct pci_params {
unsigned char speed;
unsigned char width;
};
struct adapter_params {
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
unsigned int sf_size; /* serial flash size in bytes */
unsigned int sf_nsec; /* # of flash sectors */
unsigned int sf_fw_start; /* start of FW image in flash */
unsigned int fw_vers;
unsigned int tp_vers;
u8 api_vers[7];
unsigned short mtus[NMTUS];
unsigned short a_wnd[NCCTRL_WIN];
unsigned short b_wnd[NCCTRL_WIN];
unsigned char nports; /* # of ethernet ports */
unsigned char portvec;
unsigned char rev; /* chip revision */
unsigned char offload;
unsigned int ofldq_wr_cred;
};
struct trace_params {
u32 data[TRACE_LEN / 4];
u32 mask[TRACE_LEN / 4];
unsigned short snap_len;
unsigned short min_len;
unsigned char skip_ofst;
unsigned char skip_len;
unsigned char invert;
unsigned char port;
};
struct link_config {
unsigned short supported; /* link capabilities */
unsigned short advertising; /* advertised capabilities */
unsigned short requested_speed; /* speed user has requested */
unsigned short speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
unsigned char link_ok; /* link up? */
};
#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
enum {
MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
};
enum {
MAX_EGRQ = 128, /* max # of egress queues, including FLs */
MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
};
struct adapter;
struct sge_rspq;
struct port_info {
struct adapter *adapter;
u16 viid;
s16 xact_addr_filt; /* index of exact MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
s8 mdio_addr;
u8 port_type;
u8 mod_type;
u8 port_id;
u8 tx_chan;
u8 lport; /* associated offload logical port */
u8 nqsets; /* # of qsets */
u8 first_qset; /* index of first qset */
u8 rss_mode;
struct link_config link_cfg;
u16 *rss;
};
struct dentry;
struct work_struct;
enum { /* adapter flags */
FULL_INIT_DONE = (1 << 0),
USING_MSI = (1 << 1),
USING_MSIX = (1 << 2),
FW_OK = (1 << 4),
};
struct rx_sw_desc;
struct sge_fl { /* SGE free-buffer queue state */
unsigned int avail; /* # of available Rx buffers */
unsigned int pend_cred; /* new buffers since last FL DB ring */
unsigned int cidx; /* consumer index */
unsigned int pidx; /* producer index */
unsigned long alloc_failed; /* # of times buffer allocation failed */
unsigned long large_alloc_failed;
unsigned long starving;
/* RO fields */
unsigned int cntxt_id; /* SGE context id for the free list */
unsigned int size; /* capacity of free list */
struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
__be64 *desc; /* address of HW Rx descriptor ring */
dma_addr_t addr; /* bus address of HW ring start */
};
/* A packet gather list */
struct pkt_gl {
skb_frag_t frags[MAX_SKB_FRAGS];
void *va; /* virtual address of first byte */
unsigned int nfrags; /* # of fragments */
unsigned int tot_len; /* total length of fragments */
};
typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
struct sge_rspq { /* state for an SGE response queue */
struct napi_struct napi;
const __be64 *cur_desc; /* current descriptor in queue */
unsigned int cidx; /* consumer index */
u8 gen; /* current generation bit */
u8 intr_params; /* interrupt holdoff parameters */
u8 next_intr_params; /* holdoff params for next interrupt */
u8 pktcnt_idx; /* interrupt packet threshold */
u8 uld; /* ULD handling this queue */
u8 idx; /* queue index within its group */
int offset; /* offset into current Rx buffer */
u16 cntxt_id; /* SGE context id for the response q */
u16 abs_id; /* absolute SGE id for the response q */
__be64 *desc; /* address of HW response ring */
dma_addr_t phys_addr; /* physical address of the ring */
unsigned int iqe_len; /* entry size */
unsigned int size; /* capacity of response queue */
struct adapter *adap;
struct net_device *netdev; /* associated net device */
rspq_handler_t handler;
};
struct sge_eth_stats { /* Ethernet queue statistics */
unsigned long pkts; /* # of ethernet packets */
unsigned long lro_pkts; /* # of LRO super packets */
unsigned long lro_merged; /* # of wire packets merged by LRO */
unsigned long rx_cso; /* # of Rx checksum offloads */
unsigned long vlan_ex; /* # of Rx VLAN extractions */
unsigned long rx_drops; /* # of packets dropped due to no mem */
};
struct sge_eth_rxq { /* SW Ethernet Rx queue */
struct sge_rspq rspq;
struct sge_fl fl;
struct sge_eth_stats stats;
} ____cacheline_aligned_in_smp;
struct sge_ofld_stats { /* offload queue statistics */
unsigned long pkts; /* # of packets */
unsigned long imm; /* # of immediate-data packets */
unsigned long an; /* # of asynchronous notifications */
unsigned long nomem; /* # of responses deferred due to no mem */
};
struct sge_ofld_rxq { /* SW offload Rx queue */
struct sge_rspq rspq;
struct sge_fl fl;
struct sge_ofld_stats stats;
} ____cacheline_aligned_in_smp;
struct tx_desc {
__be64 flit[8];
};
struct tx_sw_desc;
struct sge_txq {
unsigned int in_use; /* # of in-use Tx descriptors */
unsigned int size; /* # of descriptors */
unsigned int cidx; /* SW consumer index */
unsigned int pidx; /* producer index */
unsigned long stops; /* # of times q has been stopped */
unsigned long restarts; /* # of queue restarts */
unsigned int cntxt_id; /* SGE context id for the Tx q */
struct tx_desc *desc; /* address of HW Tx descriptor ring */
struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
struct sge_qstat *stat; /* queue status entry */
dma_addr_t phys_addr; /* physical address of the ring */
};
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
struct sge_txq q;
struct netdev_queue *txq; /* associated netdev TX queue */
unsigned long tso; /* # of TSO requests */
unsigned long tx_cso; /* # of Tx checksum offloads */
unsigned long vlan_ins; /* # of Tx VLAN insertions */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
} ____cacheline_aligned_in_smp;
struct sge_ofld_txq { /* state for an SGE offload Tx queue */
struct sge_txq q;
struct adapter *adap;
struct sk_buff_head sendq; /* list of backpressured packets */
struct tasklet_struct qresume_tsk; /* restarts the queue */
u8 full; /* the Tx ring is full */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
} ____cacheline_aligned_in_smp;
struct sge_ctrl_txq { /* state for an SGE control Tx queue */
struct sge_txq q;
struct adapter *adap;
struct sk_buff_head sendq; /* list of backpressured packets */
struct tasklet_struct qresume_tsk; /* restarts the queue */
u8 full; /* the Tx ring is full */
} ____cacheline_aligned_in_smp;
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock;
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 ofldqsets; /* # of active offload queue sets */
u16 rdmaqs; /* # of available RDMA Rx queues */
u16 ofld_rxq[MAX_OFLD_QSETS];
u16 rdma_rxq[NCHAN];
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
unsigned int starve_thres;
u8 idma_state[2];
unsigned int egr_start;
unsigned int ingr_start;
void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
DECLARE_BITMAP(starving_fl, MAX_EGRQ);
DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */
};
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
struct l2t_data;
struct adapter {
void __iomem *regs;
struct pci_dev *pdev;
struct device *pdev_dev;
unsigned int fn;
unsigned int flags;
int msg_enable;
struct adapter_params params;
struct cxgb4_virt_res vres;
unsigned int swintr;
unsigned int wol;
struct {
unsigned short vec;
char desc[IFNAMSIZ + 10];
} msix_info[MAX_INGQ + 1];
struct sge sge;
struct net_device *port[MAX_NPORTS];
u8 chan_map[NCHAN]; /* channel -> port map */
struct l2t_data *l2t;
void *uld_handle[CXGB4_ULD_MAX];
struct list_head list_node;
struct tid_info tids;
void **tid_release_head;
spinlock_t tid_release_lock;
struct work_struct tid_release_task;
bool tid_release_task_busy;
struct dentry *debugfs_root;
spinlock_t stats_lock;
};
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
}
static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
{
writel(val, adap->regs + reg_addr);
}
#ifndef readq
static inline u64 readq(const volatile void __iomem *addr)
{
return readl(addr) + ((u64)readl(addr + 4) << 32);
}
static inline void writeq(u64 val, volatile void __iomem *addr)
{
writel(val, addr);
writel(val >> 32, addr + 4);
}
#endif
static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
{
return readq(adap->regs + reg_addr);
}
static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
{
writeq(val, adap->regs + reg_addr);
}
/**
* netdev2pinfo - return the port_info structure associated with a net_device
* @dev: the netdev
*
* Return the struct port_info associated with a net_device
*/
static inline struct port_info *netdev2pinfo(const struct net_device *dev)
{
return netdev_priv(dev);
}
/**
* adap2pinfo - return the port_info of a port
* @adap: the adapter
* @idx: the port index
*
* Return the port_info structure for the port of the given index.
*/
static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
{
return netdev_priv(adap->port[idx]);
}
/**
* netdev2adap - return the adapter structure associated with a net_device
* @dev: the netdev
*
* Return the struct adapter associated with a net_device
*/
static inline struct adapter *netdev2adap(const struct net_device *dev)
{
return netdev2pinfo(dev)->adapter;
}
void t4_os_portmod_changed(const struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
void *t4_alloc_mem(size_t size);
void t4_free_sge_resources(struct adapter *adap);
irq_handler_t t4_intr_handler(struct adapter *adap);
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
struct sge_fl *fl, rspq_handler_t hnd);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *netdevq,
unsigned int iqid);
int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int cmplqid);
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
void t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
return adap->params.vpd.cclk / 1000;
}
static inline unsigned int us_to_core_ticks(const struct adapter *adap,
unsigned int us)
{
return (us * adap->params.vpd.cclk) / 1000;
}
void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
u32 val);
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
void *rpl, bool sleep_ok);
static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
int size, void *rpl)
{
return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
}
static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
int size, void *rpl)
{
return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
}
void t4_intr_enable(struct adapter *adapter);
void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
int t4_wait_dev_ready(struct adapter *adap);
int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
int t4_check_fw_version(struct adapter *adapter);
int t4_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
unsigned int flags);
int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
const u8 *addr);
int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
u64 mask0, u64 mask1, unsigned int crc, bool enable);
int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
enum dev_master master, enum dev_state *state);
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
int t4_early_init(struct adapter *adap, unsigned int mbox);
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val);
int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
const u32 *val);
int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
unsigned int rxqi, unsigned int rxq, unsigned int tc,
unsigned int vi, unsigned int cmask, unsigned int pmask,
unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
unsigned int *rss_size);
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int idx, const u8 *addr, bool persist, bool add_smt);
int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool ucast, u64 vec, bool sleep_ok);
int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool rx_en, bool tx_en);
int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
unsigned int nblinks);
int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 *valp);
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 val);
int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int iqtype, unsigned int iqid,
unsigned int fl0id, unsigned int fl1id);
int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
#endif /* __CXGB4_H__ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,239 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CXGB4_OFLD_H
#define __CXGB4_OFLD_H
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/atomic.h>
/* CPL message priority levels */
enum {
CPL_PRIORITY_DATA = 0, /* data messages */
CPL_PRIORITY_SETUP = 1, /* connection setup messages */
CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
CPL_PRIORITY_ACK = 1, /* RX ACK messages */
CPL_PRIORITY_CONTROL = 1 /* control messages */
};
#define INIT_TP_WR(w, tid) do { \
(w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \
FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
(w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
FW_WR_FLOWID(tid)); \
(w)->wr.wr_lo = cpu_to_be64(0); \
} while (0)
#define INIT_TP_WR_CPL(w, cpl, tid) do { \
INIT_TP_WR(w, tid); \
OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \
} while (0)
#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
(w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \
(w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
FW_WR_FLOWID(tid)); \
(w)->wr.wr_lo = cpu_to_be64(0); \
} while (0)
/* Special asynchronous notification message */
#define CXGB4_MSG_AN ((void *)1)
struct serv_entry {
void *data;
};
union aopen_entry {
void *data;
union aopen_entry *next;
};
/*
* Holds the size, base address, free list start, etc of the TID, server TID,
* and active-open TID tables. The tables themselves are allocated dynamically.
*/
struct tid_info {
void **tid_tab;
unsigned int ntids;
struct serv_entry *stid_tab;
unsigned long *stid_bmap;
unsigned int nstids;
unsigned int stid_base;
union aopen_entry *atid_tab;
unsigned int natids;
unsigned int nftids;
unsigned int ftid_base;
spinlock_t atid_lock ____cacheline_aligned_in_smp;
union aopen_entry *afree;
unsigned int atids_in_use;
spinlock_t stid_lock;
unsigned int stids_in_use;
atomic_t tids_in_use;
};
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
{
return tid < t->ntids ? t->tid_tab[tid] : NULL;
}
static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
{
return atid < t->natids ? t->atid_tab[atid].data : NULL;
}
static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
{
stid -= t->stid_base;
return stid < t->nstids ? t->stid_tab[stid].data : NULL;
}
static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
unsigned int tid)
{
t->tid_tab[tid] = data;
atomic_inc(&t->tids_in_use);
}
int cxgb4_alloc_atid(struct tid_info *t, void *data);
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
struct in6_addr;
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, unsigned int queue);
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
skb_set_queue_mapping(skb, (queue << 1) | prio);
}
enum cxgb4_uld {
CXGB4_ULD_RDMA,
CXGB4_ULD_ISCSI,
CXGB4_ULD_MAX
};
enum cxgb4_state {
CXGB4_STATE_UP,
CXGB4_STATE_START_RECOVERY,
CXGB4_STATE_DOWN,
CXGB4_STATE_DETACH
};
struct pci_dev;
struct l2t_data;
struct net_device;
struct pkt_gl;
struct tp_tcp_stats;
struct cxgb4_range {
unsigned int start;
unsigned int size;
};
struct cxgb4_virt_res { /* virtualized HW resources */
struct cxgb4_range ddp;
struct cxgb4_range iscsi;
struct cxgb4_range stag;
struct cxgb4_range rq;
struct cxgb4_range pbl;
struct cxgb4_range qp;
struct cxgb4_range cq;
struct cxgb4_range ocq;
};
#define OCQ_WIN_OFFSET(pdev, vres) \
(pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
/*
* Block of information the LLD provides to ULDs attaching to a device.
*/
struct cxgb4_lld_info {
struct pci_dev *pdev; /* associated PCI device */
struct l2t_data *l2t; /* L2 table */
struct tid_info *tids; /* TID table */
struct net_device **ports; /* device ports */
const struct cxgb4_virt_res *vr; /* assorted HW resources */
const unsigned short *mtus; /* MTU table */
const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
unsigned short nrxq; /* # of Rx queues */
unsigned short ntxq; /* # of Tx queues */
unsigned char nchan:4; /* # of channels */
unsigned char nports:4; /* # of ports */
unsigned char wr_cred; /* WR 16-byte credits */
unsigned char adapter_type; /* type of adapter */
unsigned char fw_api_ver; /* FW API version */
unsigned int fw_vers; /* FW version */
unsigned int iscsi_iolen; /* iSCSI max I/O length */
unsigned short udb_density; /* # of user DB/page */
unsigned short ucq_density; /* # of user CQs/page */
void __iomem *gts_reg; /* address of GTS register */
void __iomem *db_reg; /* address of kernel doorbell */
};
struct cxgb4_uld_info {
const char *name;
void *(*add)(const struct cxgb4_lld_info *p);
int (*rx_handler)(void *handle, const __be64 *rsp,
const struct pkt_gl *gl);
int (*state_change)(void *handle, enum cxgb4_state new_state);
};
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
unsigned int cxgb4_port_chan(const struct net_device *dev);
unsigned int cxgb4_port_viid(const struct net_device *dev);
unsigned int cxgb4_port_idx(const struct net_device *dev);
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx);
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
const unsigned int *pgsz_order);
struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len);
#endif /* !__CXGB4_OFLD_H */

View File

@@ -0,0 +1,597 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/jhash.h>
#include <net/neighbour.h>
#include "cxgb4.h"
#include "l2t.h"
#include "t4_msg.h"
#include "t4fw_api.h"
#define VLAN_NONE 0xfff
/* identifies sync vs async L2T_WRITE_REQs */
#define F_SYNC_WR (1 << 12)
enum {
L2T_STATE_VALID, /* entry is up to date */
L2T_STATE_STALE, /* entry may be used but needs revalidation */
L2T_STATE_RESOLVING, /* entry needs address resolution */
L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
/* when state is one of the below the entry is not hashed */
L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
L2T_STATE_UNUSED /* entry not in use */
};
struct l2t_data {
rwlock_t lock;
atomic_t nfree; /* number of free entries */
struct l2t_entry *rover; /* starting point for next allocation */
struct l2t_entry l2tab[L2T_SIZE];
};
static inline unsigned int vlan_prio(const struct l2t_entry *e)
{
return e->vlan >> 13;
}
static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
{
if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
atomic_dec(&d->nfree);
}
/*
* To avoid having to check address families we do not allow v4 and v6
* neighbors to be on the same hash chain. We keep v4 entries in the first
* half of available hash buckets and v6 in the second.
*/
enum {
L2T_SZ_HALF = L2T_SIZE / 2,
L2T_HASH_MASK = L2T_SZ_HALF - 1
};
static inline unsigned int arp_hash(const u32 *key, int ifindex)
{
return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
}
static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
{
u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
}
static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
{
return addr_len == 4 ? arp_hash(addr, ifindex) :
ipv6_hash(addr, ifindex);
}
/*
* Checks if an L2T entry is for the given IP/IPv6 address. It does not check
* whether the L2T entry and the address are of the same address family.
* Callers ensure an address is only checked against L2T entries of the same
* family, something made trivial by the separation of IP and IPv6 hash chains
* mentioned above. Returns 0 if there's a match,
*/
static int addreq(const struct l2t_entry *e, const u32 *addr)
{
if (e->v6)
return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
(e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
return e->addr[0] ^ addr[0];
}
static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
{
neigh_hold(n);
if (e->neigh)
neigh_release(e->neigh);
e->neigh = n;
}
/*
* Write an L2T entry. Must be called with the entry locked.
* The write may be synchronous or asynchronous.
*/
static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
{
struct sk_buff *skb;
struct cpl_l2t_write_req *req;
skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
e->idx | (sync ? F_SYNC_WR : 0) |
TID_QID(adap->sge.fw_evtq.abs_id)));
req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
req->l2t_idx = htons(e->idx);
req->vlan = htons(e->vlan);
if (e->neigh)
memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
t4_ofld_send(adap, skb);
if (sync && e->state != L2T_STATE_SWITCHING)
e->state = L2T_STATE_SYNC_WRITE;
return 0;
}
/*
* Send packets waiting in an L2T entry's ARP queue. Must be called with the
* entry locked.
*/
static void send_pending(struct adapter *adap, struct l2t_entry *e)
{
while (e->arpq_head) {
struct sk_buff *skb = e->arpq_head;
e->arpq_head = skb->next;
skb->next = NULL;
t4_ofld_send(adap, skb);
}
e->arpq_tail = NULL;
}
/*
* Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a
* synchronous L2T_WRITE. Note that the TID in the reply is really the L2T
* index it refers to.
*/
void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
{
unsigned int tid = GET_TID(rpl);
unsigned int idx = tid & (L2T_SIZE - 1);
if (unlikely(rpl->status != CPL_ERR_NONE)) {
dev_err(adap->pdev_dev,
"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
rpl->status, idx);
return;
}
if (tid & F_SYNC_WR) {
struct l2t_entry *e = &adap->l2t->l2tab[idx];
spin_lock(&e->lock);
if (e->state != L2T_STATE_SWITCHING) {
send_pending(adap, e);
e->state = (e->neigh->nud_state & NUD_STALE) ?
L2T_STATE_STALE : L2T_STATE_VALID;
}
spin_unlock(&e->lock);
}
}
/*
* Add a packet to an L2T entry's queue of packets awaiting resolution.
* Must be called with the entry's lock held.
*/
static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
{
skb->next = NULL;
if (e->arpq_head)
e->arpq_tail->next = skb;
else
e->arpq_head = skb;
e->arpq_tail = skb;
}
int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
struct l2t_entry *e)
{
struct adapter *adap = netdev2adap(dev);
again:
switch (e->state) {
case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
neigh_event_send(e->neigh, NULL);
spin_lock_bh(&e->lock);
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
case L2T_STATE_VALID: /* fast-path, send the packet on */
return t4_ofld_send(adap, skb);
case L2T_STATE_RESOLVING:
case L2T_STATE_SYNC_WRITE:
spin_lock_bh(&e->lock);
if (e->state != L2T_STATE_SYNC_WRITE &&
e->state != L2T_STATE_RESOLVING) {
spin_unlock_bh(&e->lock);
goto again;
}
arpq_enqueue(e, skb);
spin_unlock_bh(&e->lock);
if (e->state == L2T_STATE_RESOLVING &&
!neigh_event_send(e->neigh, NULL)) {
spin_lock_bh(&e->lock);
if (e->state == L2T_STATE_RESOLVING && e->arpq_head)
write_l2e(adap, e, 1);
spin_unlock_bh(&e->lock);
}
}
return 0;
}
EXPORT_SYMBOL(cxgb4_l2t_send);
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
static struct l2t_entry *alloc_l2e(struct l2t_data *d)
{
struct l2t_entry *end, *e, **p;
if (!atomic_read(&d->nfree))
return NULL;
/* there's definitely a free entry */
for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
if (atomic_read(&e->refcnt) == 0)
goto found;
for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
;
found:
d->rover = e + 1;
atomic_dec(&d->nfree);
/*
* The entry we found may be an inactive entry that is
* presently in the hash table. We need to remove it.
*/
if (e->state < L2T_STATE_SWITCHING)
for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
if (*p == e) {
*p = e->next;
e->next = NULL;
break;
}
e->state = L2T_STATE_UNUSED;
return e;
}
/*
* Called when an L2T entry has no more users.
*/
static void t4_l2e_free(struct l2t_entry *e)
{
struct l2t_data *d;
spin_lock_bh(&e->lock);
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
if (e->neigh) {
neigh_release(e->neigh);
e->neigh = NULL;
}
while (e->arpq_head) {
struct sk_buff *skb = e->arpq_head;
e->arpq_head = skb->next;
kfree_skb(skb);
}
e->arpq_tail = NULL;
}
spin_unlock_bh(&e->lock);
d = container_of(e, struct l2t_data, l2tab[e->idx]);
atomic_inc(&d->nfree);
}
void cxgb4_l2t_release(struct l2t_entry *e)
{
if (atomic_dec_and_test(&e->refcnt))
t4_l2e_free(e);
}
EXPORT_SYMBOL(cxgb4_l2t_release);
/*
* Update an L2T entry that was previously used for the same next hop as neigh.
* Must be called with softirqs disabled.
*/
static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
{
unsigned int nud_state;
spin_lock(&e->lock); /* avoid race with t4_l2t_free */
if (neigh != e->neigh)
neigh_replace(e, neigh);
nud_state = neigh->nud_state;
if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
!(nud_state & NUD_VALID))
e->state = L2T_STATE_RESOLVING;
else if (nud_state & NUD_CONNECTED)
e->state = L2T_STATE_VALID;
else
e->state = L2T_STATE_STALE;
spin_unlock(&e->lock);
}
struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
const struct net_device *physdev,
unsigned int priority)
{
u8 lport;
u16 vlan;
struct l2t_entry *e;
int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *)neigh->primary_key;
int ifidx = neigh->dev->ifindex;
int hash = addr_hash(addr, addr_len, ifidx);
if (neigh->dev->flags & IFF_LOOPBACK)
lport = netdev2pinfo(physdev)->tx_chan + 4;
else
lport = netdev2pinfo(physdev)->lport;
if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
vlan = vlan_dev_vlan_id(neigh->dev);
else
vlan = VLAN_NONE;
write_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (!addreq(e, addr) && e->ifindex == ifidx &&
e->vlan == vlan && e->lport == lport) {
l2t_hold(d, e);
if (atomic_read(&e->refcnt) == 1)
reuse_entry(e, neigh);
goto done;
}
/* Need to allocate a new entry */
e = alloc_l2e(d);
if (e) {
spin_lock(&e->lock); /* avoid race with t4_l2t_free */
e->state = L2T_STATE_RESOLVING;
memcpy(e->addr, addr, addr_len);
e->ifindex = ifidx;
e->hash = hash;
e->lport = lport;
e->v6 = addr_len == 16;
atomic_set(&e->refcnt, 1);
neigh_replace(e, neigh);
e->vlan = vlan;
e->next = d->l2tab[hash].first;
d->l2tab[hash].first = e;
spin_unlock(&e->lock);
}
done:
write_unlock_bh(&d->lock);
return e;
}
EXPORT_SYMBOL(cxgb4_l2t_get);
/*
* Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked,
* otherwise the packet is sent to the device.
*/
static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq)
{
while (arpq) {
struct sk_buff *skb = arpq;
const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
arpq = skb->next;
skb->next = NULL;
if (cb->arp_err_handler)
cb->arp_err_handler(cb->handle, skb);
else
t4_ofld_send(adap, skb);
}
}
/*
* Called when the host's neighbor layer makes a change to some entry that is
* loaded into the HW L2 table.
*/
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
{
struct l2t_entry *e;
struct sk_buff *arpq = NULL;
struct l2t_data *d = adap->l2t;
int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
int hash = addr_hash(addr, addr_len, ifidx);
read_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (!addreq(e, addr) && e->ifindex == ifidx) {
spin_lock(&e->lock);
if (atomic_read(&e->refcnt))
goto found;
spin_unlock(&e->lock);
break;
}
read_unlock_bh(&d->lock);
return;
found:
read_unlock(&d->lock);
if (neigh != e->neigh)
neigh_replace(e, neigh);
if (e->state == L2T_STATE_RESOLVING) {
if (neigh->nud_state & NUD_FAILED) {
arpq = e->arpq_head;
e->arpq_head = e->arpq_tail = NULL;
} else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
e->arpq_head) {
write_l2e(adap, e, 1);
}
} else {
e->state = neigh->nud_state & NUD_CONNECTED ?
L2T_STATE_VALID : L2T_STATE_STALE;
if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
write_l2e(adap, e, 0);
}
spin_unlock_bh(&e->lock);
if (arpq)
handle_failed_resolution(adap, arpq);
}
struct l2t_data *t4_init_l2t(void)
{
int i;
struct l2t_data *d;
d = t4_alloc_mem(sizeof(*d));
if (!d)
return NULL;
d->rover = d->l2tab;
atomic_set(&d->nfree, L2T_SIZE);
rwlock_init(&d->lock);
for (i = 0; i < L2T_SIZE; ++i) {
d->l2tab[i].idx = i;
d->l2tab[i].state = L2T_STATE_UNUSED;
spin_lock_init(&d->l2tab[i].lock);
atomic_set(&d->l2tab[i].refcnt, 0);
}
return d;
}
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
{
struct l2t_entry *l2tab = seq->private;
return pos >= L2T_SIZE ? NULL : &l2tab[pos];
}
static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
{
return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = l2t_get_idx(seq, *pos);
if (v)
++*pos;
return v;
}
static void l2t_seq_stop(struct seq_file *seq, void *v)
{
}
static char l2e_state(const struct l2t_entry *e)
{
switch (e->state) {
case L2T_STATE_VALID: return 'V';
case L2T_STATE_STALE: return 'S';
case L2T_STATE_SYNC_WRITE: return 'W';
case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R';
case L2T_STATE_SWITCHING: return 'X';
default:
return 'U';
}
}
static int l2t_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, " Idx IP address "
"Ethernet address VLAN/P LP State Users Port\n");
else {
char ip[60];
struct l2t_entry *e = v;
spin_lock_bh(&e->lock);
if (e->state == L2T_STATE_SWITCHING)
ip[0] = '\0';
else
sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n",
e->idx, ip, e->dmac,
e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
l2e_state(e), atomic_read(&e->refcnt),
e->neigh ? e->neigh->dev->name : "");
spin_unlock_bh(&e->lock);
}
return 0;
}
static const struct seq_operations l2t_seq_ops = {
.start = l2t_seq_start,
.next = l2t_seq_next,
.stop = l2t_seq_stop,
.show = l2t_seq_show
};
static int l2t_seq_open(struct inode *inode, struct file *file)
{
int rc = seq_open(file, &l2t_seq_ops);
if (!rc) {
struct adapter *adap = inode->i_private;
struct seq_file *seq = file->private_data;
seq->private = adap->l2t->l2tab;
}
return rc;
}
const struct file_operations t4_l2t_fops = {
.owner = THIS_MODULE,
.open = l2t_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};

View File

@@ -0,0 +1,107 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CXGB4_L2T_H
#define __CXGB4_L2T_H
#include <linux/spinlock.h>
#include <linux/if_ether.h>
#include <linux/atomic.h>
struct adapter;
struct l2t_data;
struct neighbour;
struct net_device;
struct file_operations;
struct cpl_l2t_write_rpl;
/*
* Each L2T entry plays multiple roles. First of all, it keeps state for the
* corresponding entry of the HW L2 table and maintains a queue of offload
* packets awaiting address resolution. Second, it is a node of a hash table
* chain, where the nodes of the chain are linked together through their next
* pointer. Finally, each node is a bucket of a hash table, pointing to the
* first element in its chain through its first pointer.
*/
struct l2t_entry {
u16 state; /* entry state */
u16 idx; /* entry index */
u32 addr[4]; /* next hop IP or IPv6 address */
int ifindex; /* neighbor's net_device's ifindex */
struct neighbour *neigh; /* associated neighbour */
struct l2t_entry *first; /* start of hash chain */
struct l2t_entry *next; /* next l2t_entry on chain */
struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
struct sk_buff *arpq_tail;
spinlock_t lock;
atomic_t refcnt; /* entry reference count */
u16 hash; /* hash bucket the entry is on */
u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
u8 v6; /* whether entry is for IPv6 */
u8 lport; /* associated offload logical interface */
u8 dmac[ETH_ALEN]; /* neighbour's MAC address */
};
typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb);
/*
* Callback stored in an skb to handle address resolution failure.
*/
struct l2t_skb_cb {
void *handle;
arp_err_handler_t arp_err_handler;
};
#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle,
arp_err_handler_t handler)
{
L2T_SKB_CB(skb)->handle = handle;
L2T_SKB_CB(skb)->arp_err_handler = handler;
}
void cxgb4_l2t_release(struct l2t_entry *e);
int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
struct l2t_entry *e);
struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
const struct net_device *physdev,
unsigned int priority);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_data *t4_init_l2t(void);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
extern const struct file_operations t4_l2t_fops;
#endif /* __CXGB4_L2T_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,140 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __T4_HW_H
#define __T4_HW_H
#include <linux/types.h>
enum {
NCHAN = 4, /* # of HW channels */
MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
EEPROMSIZE = 17408, /* Serial EEPROM physical size */
EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
NEXACT_MAC = 336, /* # of exact MAC address filters */
L2T_SIZE = 4096, /* # of L2T entries */
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
NWOL_PAT = 8, /* # of WoL patterns */
WOL_PAT_LEN = 128, /* length of WoL patterns */
};
enum {
SF_PAGE_SIZE = 256, /* serial flash page size */
};
enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */
SGE_INTRDST_PCI = 0, /* interrupt destination is PCI-E */
SGE_INTRDST_IQ = 1, /* destination is an ingress queue */
SGE_UPDATEDEL_NONE = 0, /* ingress queue pidx update delivery */
SGE_UPDATEDEL_INTR = 1, /* interrupt */
SGE_UPDATEDEL_STPG = 2, /* status page */
SGE_UPDATEDEL_BOTH = 3, /* interrupt and status page */
SGE_HOSTFCMODE_NONE = 0, /* egress queue cidx updates */
SGE_HOSTFCMODE_IQ = 1, /* sent to ingress queue */
SGE_HOSTFCMODE_STPG = 2, /* sent to status page */
SGE_HOSTFCMODE_BOTH = 3, /* ingress queue and status page */
SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */
SGE_FETCHBURSTMIN_32B = 1,
SGE_FETCHBURSTMIN_64B = 2,
SGE_FETCHBURSTMIN_128B = 3,
SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */
SGE_FETCHBURSTMAX_128B = 1,
SGE_FETCHBURSTMAX_256B = 2,
SGE_FETCHBURSTMAX_512B = 3,
SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */
SGE_CIDXFLUSHTHRESH_2 = 1,
SGE_CIDXFLUSHTHRESH_4 = 2,
SGE_CIDXFLUSHTHRESH_8 = 3,
SGE_CIDXFLUSHTHRESH_16 = 4,
SGE_CIDXFLUSHTHRESH_32 = 5,
SGE_CIDXFLUSHTHRESH_64 = 6,
SGE_CIDXFLUSHTHRESH_128 = 7,
SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
};
struct sge_qstat { /* data written to SGE queue status entries */
__be32 qid;
__be16 cidx;
__be16 pidx;
};
/*
* Structure for last 128 bits of response descriptors
*/
struct rsp_ctrl {
__be32 hdrbuflen_pidx;
__be32 pldbuflen_qid;
union {
u8 type_gen;
__be64 last_flit;
};
};
#define RSPD_NEWBUF 0x80000000U
#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU)
#define RSPD_QID(x) RSPD_LEN(x)
#define RSPD_GEN(x) ((x) >> 7)
#define RSPD_TYPE(x) (((x) >> 4) & 3)
#define QINTR_CNT_EN 0x1
#define QINTR_TIMER_IDX(x) ((x) << 1)
#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
#endif /* __T4_HW_H */

View File

@@ -0,0 +1,678 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __T4_MSG_H
#define __T4_MSG_H
#include <linux/types.h>
enum {
CPL_PASS_OPEN_REQ = 0x1,
CPL_PASS_ACCEPT_RPL = 0x2,
CPL_ACT_OPEN_REQ = 0x3,
CPL_SET_TCB_FIELD = 0x5,
CPL_GET_TCB = 0x6,
CPL_CLOSE_CON_REQ = 0x8,
CPL_CLOSE_LISTSRV_REQ = 0x9,
CPL_ABORT_REQ = 0xA,
CPL_ABORT_RPL = 0xB,
CPL_RX_DATA_ACK = 0xD,
CPL_TX_PKT = 0xE,
CPL_L2T_WRITE_REQ = 0x12,
CPL_TID_RELEASE = 0x1A,
CPL_CLOSE_LISTSRV_RPL = 0x20,
CPL_L2T_WRITE_RPL = 0x23,
CPL_PASS_OPEN_RPL = 0x24,
CPL_ACT_OPEN_RPL = 0x25,
CPL_PEER_CLOSE = 0x26,
CPL_ABORT_REQ_RSS = 0x2B,
CPL_ABORT_RPL_RSS = 0x2D,
CPL_CLOSE_CON_RPL = 0x32,
CPL_ISCSI_HDR = 0x33,
CPL_RDMA_CQE = 0x35,
CPL_RDMA_CQE_READ_RSP = 0x36,
CPL_RDMA_CQE_ERR = 0x37,
CPL_RX_DATA = 0x39,
CPL_SET_TCB_RPL = 0x3A,
CPL_RX_PKT = 0x3B,
CPL_RX_DDP_COMPLETE = 0x3F,
CPL_ACT_ESTABLISH = 0x40,
CPL_PASS_ESTABLISH = 0x41,
CPL_RX_DATA_DDP = 0x42,
CPL_PASS_ACCEPT_REQ = 0x44,
CPL_RDMA_READ_REQ = 0x60,
CPL_PASS_OPEN_REQ6 = 0x81,
CPL_ACT_OPEN_REQ6 = 0x83,
CPL_RDMA_TERMINATE = 0xA2,
CPL_RDMA_WRITE = 0xA4,
CPL_SGE_EGR_UPDATE = 0xA5,
CPL_TRACE_PKT = 0xB0,
CPL_FW4_MSG = 0xC0,
CPL_FW4_PLD = 0xC1,
CPL_FW4_ACK = 0xC3,
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
CPL_TX_PKT_LSO = 0xED,
CPL_TX_PKT_XT = 0xEE,
NUM_CPL_CMDS
};
enum CPL_error {
CPL_ERR_NONE = 0,
CPL_ERR_TCAM_FULL = 3,
CPL_ERR_BAD_LENGTH = 15,
CPL_ERR_BAD_ROUTE = 18,
CPL_ERR_CONN_RESET = 20,
CPL_ERR_CONN_EXIST_SYNRECV = 21,
CPL_ERR_CONN_EXIST = 22,
CPL_ERR_ARP_MISS = 23,
CPL_ERR_BAD_SYN = 24,
CPL_ERR_CONN_TIMEDOUT = 30,
CPL_ERR_XMIT_TIMEDOUT = 31,
CPL_ERR_PERSIST_TIMEDOUT = 32,
CPL_ERR_FINWAIT2_TIMEDOUT = 33,
CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
CPL_ERR_RTX_NEG_ADVICE = 35,
CPL_ERR_PERSIST_NEG_ADVICE = 36,
CPL_ERR_ABORT_FAILED = 42,
CPL_ERR_IWARP_FLM = 50,
};
enum {
ULP_MODE_NONE = 0,
ULP_MODE_ISCSI = 2,
ULP_MODE_RDMA = 4,
ULP_MODE_TCPDDP = 5,
ULP_MODE_FCOE = 6,
};
enum {
ULP_CRC_HEADER = 1 << 0,
ULP_CRC_DATA = 1 << 1
};
enum {
CPL_ABORT_SEND_RST = 0,
CPL_ABORT_NO_RST,
};
enum { /* TX_PKT_XT checksum types */
TX_CSUM_TCP = 0,
TX_CSUM_UDP = 1,
TX_CSUM_CRC16 = 4,
TX_CSUM_CRC32 = 5,
TX_CSUM_CRC32C = 6,
TX_CSUM_FCOE = 7,
TX_CSUM_TCPIP = 8,
TX_CSUM_UDPIP = 9,
TX_CSUM_TCPIP6 = 10,
TX_CSUM_UDPIP6 = 11,
TX_CSUM_IP = 12,
};
union opcode_tid {
__be32 opcode_tid;
u8 opcode;
};
#define CPL_OPCODE(x) ((x) << 24)
#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
/* partitioning of TID fields that also carry a queue id */
#define GET_TID_TID(x) ((x) & 0x3fff)
#define GET_TID_QID(x) (((x) >> 14) & 0x3ff)
#define TID_QID(x) ((x) << 14)
struct rss_header {
u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 channel:2;
u8 filter_hit:1;
u8 filter_tid:1;
u8 hash_type:2;
u8 ipv6:1;
u8 send2fw:1;
#else
u8 send2fw:1;
u8 ipv6:1;
u8 hash_type:2;
u8 filter_tid:1;
u8 filter_hit:1;
u8 channel:2;
#endif
__be16 qid;
__be32 hash_val;
};
struct work_request_hdr {
__be32 wr_hi;
__be32 wr_mid;
__be64 wr_lo;
};
#define WR_HDR struct work_request_hdr wr
struct cpl_pass_open_req {
WR_HDR;
union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
__be32 peer_ip;
__be64 opt0;
#define TX_CHAN(x) ((x) << 2)
#define DELACK(x) ((x) << 5)
#define ULP_MODE(x) ((x) << 8)
#define RCV_BUFSIZ(x) ((x) << 12)
#define DSCP(x) ((x) << 22)
#define SMAC_SEL(x) ((u64)(x) << 28)
#define L2T_IDX(x) ((u64)(x) << 36)
#define NAGLE(x) ((u64)(x) << 49)
#define WND_SCALE(x) ((u64)(x) << 50)
#define KEEP_ALIVE(x) ((u64)(x) << 54)
#define MSS_IDX(x) ((u64)(x) << 60)
__be64 opt1;
#define SYN_RSS_ENABLE (1 << 0)
#define SYN_RSS_QUEUE(x) ((x) << 2)
#define CONN_POLICY_ASK (1 << 22)
};
struct cpl_pass_open_req6 {
WR_HDR;
union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be64 local_ip_hi;
__be64 local_ip_lo;
__be64 peer_ip_hi;
__be64 peer_ip_lo;
__be64 opt0;
__be64 opt1;
};
struct cpl_pass_open_rpl {
union opcode_tid ot;
u8 rsvd[3];
u8 status;
};
struct cpl_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
__be32 opt2;
#define RSS_QUEUE(x) ((x) << 0)
#define RSS_QUEUE_VALID (1 << 10)
#define RX_COALESCE_VALID(x) ((x) << 11)
#define RX_COALESCE(x) ((x) << 12)
#define TX_QUEUE(x) ((x) << 23)
#define RX_CHANNEL(x) ((x) << 26)
#define WND_SCALE_EN(x) ((x) << 28)
#define TSTAMPS_EN(x) ((x) << 29)
#define SACK_EN(x) ((x) << 30)
__be64 opt0;
};
struct cpl_act_open_req {
WR_HDR;
union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
__be32 peer_ip;
__be64 opt0;
__be32 params;
__be32 opt2;
};
struct cpl_act_open_req6 {
WR_HDR;
union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be64 local_ip_hi;
__be64 local_ip_lo;
__be64 peer_ip_hi;
__be64 peer_ip_lo;
__be64 opt0;
__be32 params;
__be32 opt2;
};
struct cpl_act_open_rpl {
union opcode_tid ot;
__be32 atid_status;
#define GET_AOPEN_STATUS(x) ((x) & 0xff)
#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff)
};
struct cpl_pass_establish {
union opcode_tid ot;
__be32 rsvd;
__be32 tos_stid;
#define GET_POPEN_TID(x) ((x) & 0xffffff)
#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
__be16 mac_idx;
__be16 tcp_opt;
#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1)
#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
__be32 snd_isn;
__be32 rcv_isn;
};
struct cpl_act_establish {
union opcode_tid ot;
__be32 rsvd;
__be32 tos_atid;
__be16 mac_idx;
__be16 tcp_opt;
__be32 snd_isn;
__be32 rcv_isn;
};
struct cpl_get_tcb {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
#define QUEUENO(x) ((x) << 0)
#define REPLY_CHAN(x) ((x) << 14)
#define NO_REPLY(x) ((x) << 15)
__be16 cookie;
};
struct cpl_set_tcb_field {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
__be16 word_cookie;
#define TCB_WORD(x) ((x) << 0)
#define TCB_COOKIE(x) ((x) << 5)
__be64 mask;
__be64 val;
};
struct cpl_set_tcb_rpl {
union opcode_tid ot;
__be16 rsvd;
u8 cookie;
u8 status;
__be64 oldval;
};
struct cpl_close_con_req {
WR_HDR;
union opcode_tid ot;
__be32 rsvd;
};
struct cpl_close_con_rpl {
union opcode_tid ot;
u8 rsvd[3];
u8 status;
__be32 snd_nxt;
__be32 rcv_nxt;
};
struct cpl_close_listsvr_req {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
#define LISTSVR_IPV6 (1 << 14)
__be16 rsvd;
};
struct cpl_close_listsvr_rpl {
union opcode_tid ot;
u8 rsvd[3];
u8 status;
};
struct cpl_abort_req_rss {
union opcode_tid ot;
u8 rsvd[3];
u8 status;
};
struct cpl_abort_req {
WR_HDR;
union opcode_tid ot;
__be32 rsvd0;
u8 rsvd1;
u8 cmd;
u8 rsvd2[6];
};
struct cpl_abort_rpl_rss {
union opcode_tid ot;
u8 rsvd[3];
u8 status;
};
struct cpl_abort_rpl {
WR_HDR;
union opcode_tid ot;
__be32 rsvd0;
u8 rsvd1;
u8 cmd;
u8 rsvd2[6];
};
struct cpl_peer_close {
union opcode_tid ot;
__be32 rcv_nxt;
};
struct cpl_tid_release {
WR_HDR;
union opcode_tid ot;
__be32 rsvd;
};
struct cpl_tx_pkt_core {
__be32 ctrl0;
#define TXPKT_VF(x) ((x) << 0)
#define TXPKT_PF(x) ((x) << 8)
#define TXPKT_VF_VLD (1 << 11)
#define TXPKT_OVLAN_IDX(x) ((x) << 12)
#define TXPKT_INTF(x) ((x) << 16)
#define TXPKT_INS_OVLAN (1 << 21)
#define TXPKT_OPCODE(x) ((x) << 24)
__be16 pack;
__be16 len;
__be64 ctrl1;
#define TXPKT_CSUM_END(x) ((x) << 12)
#define TXPKT_CSUM_START(x) ((x) << 20)
#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20)
#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30)
#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40)
#define TXPKT_VLAN(x) ((u64)(x) << 44)
#define TXPKT_VLAN_VLD (1ULL << 60)
#define TXPKT_IPCSUM_DIS (1ULL << 62)
#define TXPKT_L4CSUM_DIS (1ULL << 63)
};
struct cpl_tx_pkt {
WR_HDR;
struct cpl_tx_pkt_core c;
};
#define cpl_tx_pkt_xt cpl_tx_pkt
struct cpl_tx_pkt_lso_core {
__be32 lso_ctrl;
#define LSO_TCPHDR_LEN(x) ((x) << 0)
#define LSO_IPHDR_LEN(x) ((x) << 4)
#define LSO_ETHHDR_LEN(x) ((x) << 16)
#define LSO_IPV6(x) ((x) << 20)
#define LSO_LAST_SLICE (1 << 22)
#define LSO_FIRST_SLICE (1 << 23)
#define LSO_OPCODE(x) ((x) << 24)
__be16 ipid_ofst;
__be16 mss;
__be32 seqno_offset;
__be32 len;
/* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
};
struct cpl_tx_pkt_lso {
WR_HDR;
struct cpl_tx_pkt_lso_core c;
/* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
};
struct cpl_iscsi_hdr {
union opcode_tid ot;
__be16 pdu_len_ddp;
#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
#define ISCSI_DDP (1 << 15)
__be16 len;
__be32 seq;
__be16 urg;
u8 rsvd;
u8 status;
};
struct cpl_rx_data {
union opcode_tid ot;
__be16 rsvd;
__be16 len;
__be32 seq;
__be16 urg;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 dack_mode:2;
u8 psh:1;
u8 heartbeat:1;
u8 ddp_off:1;
u8 :3;
#else
u8 :3;
u8 ddp_off:1;
u8 heartbeat:1;
u8 psh:1;
u8 dack_mode:2;
#endif
u8 status;
};
struct cpl_rx_data_ack {
WR_HDR;
union opcode_tid ot;
__be32 credit_dack;
#define RX_CREDITS(x) ((x) << 0)
#define RX_FORCE_ACK(x) ((x) << 28)
};
struct cpl_rx_pkt {
struct rss_header rsshdr;
u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 iff:4;
u8 csum_calc:1;
u8 ipmi_pkt:1;
u8 vlan_ex:1;
u8 ip_frag:1;
#else
u8 ip_frag:1;
u8 vlan_ex:1;
u8 ipmi_pkt:1;
u8 csum_calc:1;
u8 iff:4;
#endif
__be16 csum;
__be16 vlan;
__be16 len;
__be32 l2info;
#define RXF_UDP (1 << 22)
#define RXF_TCP (1 << 23)
#define RXF_IP (1 << 24)
#define RXF_IP6 (1 << 25)
__be16 hdr_len;
__be16 err_vec;
};
struct cpl_trace_pkt {
u8 opcode;
u8 intf;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 runt:4;
u8 filter_hit:4;
u8 :6;
u8 err:1;
u8 trunc:1;
#else
u8 filter_hit:4;
u8 runt:4;
u8 trunc:1;
u8 err:1;
u8 :6;
#endif
__be16 rsvd;
__be16 len;
__be64 tstamp;
};
struct cpl_l2t_write_req {
WR_HDR;
union opcode_tid ot;
__be16 params;
#define L2T_W_INFO(x) ((x) << 2)
#define L2T_W_PORT(x) ((x) << 8)
#define L2T_W_NOREPLY(x) ((x) << 15)
__be16 l2t_idx;
__be16 vlan;
u8 dst_mac[6];
};
struct cpl_l2t_write_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd[3];
};
struct cpl_rdma_terminate {
union opcode_tid ot;
__be16 rsvd;
__be16 len;
};
struct cpl_sge_egr_update {
__be32 opcode_qid;
#define EGR_QID(x) ((x) & 0x1FFFF)
__be16 cidx;
__be16 pidx;
};
struct cpl_fw4_pld {
u8 opcode;
u8 rsvd0[3];
u8 type;
u8 rsvd1;
__be16 len;
__be64 data;
__be64 rsvd2;
};
struct cpl_fw6_pld {
u8 opcode;
u8 rsvd[5];
__be16 len;
__be64 data[4];
};
struct cpl_fw4_msg {
u8 opcode;
u8 type;
__be16 rsvd0;
__be32 rsvd1;
__be64 data[2];
};
struct cpl_fw4_ack {
union opcode_tid ot;
u8 credits;
u8 rsvd0[2];
u8 seq_vld;
__be32 snd_nxt;
__be32 snd_una;
__be64 rsvd1;
};
struct cpl_fw6_msg {
u8 opcode;
u8 type;
__be16 rsvd0;
__be32 rsvd1;
__be64 data[4];
};
/* cpl_fw6_msg.type values */
enum {
FW6_TYPE_CMD_RPL = 0,
};
enum {
ULP_TX_MEM_READ = 2,
ULP_TX_MEM_WRITE = 3,
ULP_TX_PKT = 4
};
enum {
ULP_TX_SC_NOOP = 0x80,
ULP_TX_SC_IMM = 0x81,
ULP_TX_SC_DSGL = 0x82,
ULP_TX_SC_ISGL = 0x83
};
struct ulptx_sge_pair {
__be32 len[2];
__be64 addr[2];
};
struct ulptx_sgl {
__be32 cmd_nsge;
#define ULPTX_CMD(x) ((x) << 24)
#define ULPTX_NSGE(x) ((x) << 0)
__be32 len0;
__be64 addr0;
struct ulptx_sge_pair sge[0];
};
struct ulp_mem_io {
WR_HDR;
__be32 cmd;
#define ULP_MEMIO_ORDER(x) ((x) << 23)
__be32 len16; /* command length */
__be32 dlen; /* data length in 32-byte units */
#define ULP_MEMIO_DATA_LEN(x) ((x) << 0)
__be32 lock_addr;
#define ULP_MEMIO_ADDR(x) ((x) << 0)
#define ULP_MEMIO_LOCK(x) ((x) << 31)
};
#endif /* __T4_MSG_H */

View File

@@ -0,0 +1,885 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __T4_REGS_H
#define __T4_REGS_H
#define MYPF_BASE 0x1b000
#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
#define PF0_BASE 0x1e000
#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
#define PF_STRIDE 0x400
#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
#define MYPORT_BASE 0x1c000
#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
#define PORT0_BASE 0x20000
#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
#define PORT_STRIDE 0x2000
#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
#define SGE_PF_KDOORBELL 0x0
#define QID_MASK 0xffff8000U
#define QID_SHIFT 15
#define QID(x) ((x) << QID_SHIFT)
#define DBPRIO 0x00004000U
#define PIDX_MASK 0x00003fffU
#define PIDX_SHIFT 0
#define PIDX(x) ((x) << PIDX_SHIFT)
#define SGE_PF_GTS 0x4
#define INGRESSQID_MASK 0xffff0000U
#define INGRESSQID_SHIFT 16
#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT)
#define TIMERREG_MASK 0x0000e000U
#define TIMERREG_SHIFT 13
#define TIMERREG(x) ((x) << TIMERREG_SHIFT)
#define SEINTARM_MASK 0x00001000U
#define SEINTARM_SHIFT 12
#define SEINTARM(x) ((x) << SEINTARM_SHIFT)
#define CIDXINC_MASK 0x00000fffU
#define CIDXINC_SHIFT 0
#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
#define SGE_CONTROL 0x1008
#define DCASYSTYPE 0x00080000U
#define RXPKTCPLMODE 0x00040000U
#define EGRSTATUSPAGESIZE 0x00020000U
#define PKTSHIFT_MASK 0x00001c00U
#define PKTSHIFT_SHIFT 10
#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
#define INGPCIEBOUNDARY_MASK 0x00000380U
#define INGPCIEBOUNDARY_SHIFT 7
#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
#define INGPADBOUNDARY_MASK 0x00000070U
#define INGPADBOUNDARY_SHIFT 4
#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
>> INGPADBOUNDARY_SHIFT)
#define EGRPCIEBOUNDARY_MASK 0x0000000eU
#define EGRPCIEBOUNDARY_SHIFT 1
#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
#define GLOBALENABLE 0x00000001U
#define SGE_HOST_PAGE_SIZE 0x100c
#define HOSTPAGESIZEPF0_MASK 0x0000000fU
#define HOSTPAGESIZEPF0_SHIFT 0
#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT)
#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
#define QUEUESPERPAGEPF0_MASK 0x0000000fU
#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
#define SGE_INT_CAUSE1 0x1024
#define SGE_INT_CAUSE2 0x1030
#define SGE_INT_CAUSE3 0x103c
#define ERR_FLM_DBP 0x80000000U
#define ERR_FLM_IDMA1 0x40000000U
#define ERR_FLM_IDMA0 0x20000000U
#define ERR_FLM_HINT 0x10000000U
#define ERR_PCIE_ERROR3 0x08000000U
#define ERR_PCIE_ERROR2 0x04000000U
#define ERR_PCIE_ERROR1 0x02000000U
#define ERR_PCIE_ERROR0 0x01000000U
#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U
#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U
#define ERR_INVALID_CIDX_INC 0x00200000U
#define ERR_ITP_TIME_PAUSED 0x00100000U
#define ERR_CPL_OPCODE_0 0x00080000U
#define ERR_DROPPED_DB 0x00040000U
#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U
#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U
#define ERR_BAD_DB_PIDX3 0x00008000U
#define ERR_BAD_DB_PIDX2 0x00004000U
#define ERR_BAD_DB_PIDX1 0x00002000U
#define ERR_BAD_DB_PIDX0 0x00001000U
#define ERR_ING_PCIE_CHAN 0x00000800U
#define ERR_ING_CTXT_PRIO 0x00000400U
#define ERR_EGR_CTXT_PRIO 0x00000200U
#define DBFIFO_HP_INT 0x00000100U
#define DBFIFO_LP_INT 0x00000080U
#define REG_ADDRESS_ERR 0x00000040U
#define INGRESS_SIZE_ERR 0x00000020U
#define EGRESS_SIZE_ERR 0x00000010U
#define ERR_INV_CTXT3 0x00000008U
#define ERR_INV_CTXT2 0x00000004U
#define ERR_INV_CTXT1 0x00000002U
#define ERR_INV_CTXT0 0x00000001U
#define SGE_INT_ENABLE3 0x1040
#define SGE_FL_BUFFER_SIZE0 0x1044
#define SGE_FL_BUFFER_SIZE1 0x1048
#define SGE_INGRESS_RX_THRESHOLD 0x10a0
#define THRESHOLD_0_MASK 0x3f000000U
#define THRESHOLD_0_SHIFT 24
#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT)
#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT)
#define THRESHOLD_1_MASK 0x003f0000U
#define THRESHOLD_1_SHIFT 16
#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT)
#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT)
#define THRESHOLD_2_MASK 0x00003f00U
#define THRESHOLD_2_SHIFT 8
#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT)
#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT)
#define THRESHOLD_3_MASK 0x0000003fU
#define THRESHOLD_3_SHIFT 0
#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT)
#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
#define SGE_TIMER_VALUE_0_AND_1 0x10b8
#define TIMERVALUE0_MASK 0xffff0000U
#define TIMERVALUE0_SHIFT 16
#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT)
#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT)
#define TIMERVALUE1_MASK 0x0000ffffU
#define TIMERVALUE1_SHIFT 0
#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT)
#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
#define SGE_TIMER_VALUE_2_AND_3 0x10bc
#define SGE_TIMER_VALUE_4_AND_5 0x10c0
#define SGE_DEBUG_INDEX 0x10cc
#define SGE_DEBUG_DATA_HIGH 0x10d0
#define SGE_DEBUG_DATA_LOW 0x10d4
#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
#define PCIE_PF_CLI 0x44
#define PCIE_INT_CAUSE 0x3004
#define UNXSPLCPLERR 0x20000000U
#define PCIEPINT 0x10000000U
#define PCIESINT 0x08000000U
#define RPLPERR 0x04000000U
#define RXWRPERR 0x02000000U
#define RXCPLPERR 0x01000000U
#define PIOTAGPERR 0x00800000U
#define MATAGPERR 0x00400000U
#define INTXCLRPERR 0x00200000U
#define FIDPERR 0x00100000U
#define CFGSNPPERR 0x00080000U
#define HRSPPERR 0x00040000U
#define HREQPERR 0x00020000U
#define HCNTPERR 0x00010000U
#define DRSPPERR 0x00008000U
#define DREQPERR 0x00004000U
#define DCNTPERR 0x00002000U
#define CRSPPERR 0x00001000U
#define CREQPERR 0x00000800U
#define CCNTPERR 0x00000400U
#define TARTAGPERR 0x00000200U
#define PIOREQPERR 0x00000100U
#define PIOCPLPERR 0x00000080U
#define MSIXDIPERR 0x00000040U
#define MSIXDATAPERR 0x00000020U
#define MSIXADDRHPERR 0x00000010U
#define MSIXADDRLPERR 0x00000008U
#define MSIDATAPERR 0x00000004U
#define MSIADDRHPERR 0x00000002U
#define MSIADDRLPERR 0x00000001U
#define PCIE_NONFAT_ERR 0x3010
#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
#define PCIEOFST_MASK 0xfffffc00U
#define BIR_MASK 0x00000300U
#define BIR_SHIFT 8
#define BIR(x) ((x) << BIR_SHIFT)
#define WINDOW_MASK 0x000000ffU
#define WINDOW_SHIFT 0
#define WINDOW(x) ((x) << WINDOW_SHIFT)
#define PCIE_MEM_ACCESS_OFFSET 0x306c
#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
#define RNPP 0x80000000U
#define RPCP 0x20000000U
#define RCIP 0x08000000U
#define RCCP 0x04000000U
#define RFTP 0x00800000U
#define PTRP 0x00100000U
#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
#define TPCP 0x40000000U
#define TNPP 0x20000000U
#define TFTP 0x10000000U
#define TCAP 0x08000000U
#define TCIP 0x04000000U
#define RCAP 0x02000000U
#define PLUP 0x00800000U
#define PLDN 0x00400000U
#define OTDD 0x00200000U
#define GTRP 0x00100000U
#define RDPE 0x00040000U
#define TDCE 0x00020000U
#define TDUE 0x00010000U
#define MC_INT_CAUSE 0x7518
#define ECC_UE_INT_CAUSE 0x00000004U
#define ECC_CE_INT_CAUSE 0x00000002U
#define PERR_INT_CAUSE 0x00000001U
#define MC_ECC_STATUS 0x751c
#define ECC_CECNT_MASK 0xffff0000U
#define ECC_CECNT_SHIFT 16
#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT)
#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
#define ECC_UECNT_MASK 0x0000ffffU
#define ECC_UECNT_SHIFT 0
#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT)
#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
#define MC_BIST_CMD 0x7600
#define START_BIST 0x80000000U
#define BIST_CMD_GAP_MASK 0x0000ff00U
#define BIST_CMD_GAP_SHIFT 8
#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT)
#define BIST_OPCODE_MASK 0x00000003U
#define BIST_OPCODE_SHIFT 0
#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT)
#define MC_BIST_CMD_ADDR 0x7604
#define MC_BIST_CMD_LEN 0x7608
#define MC_BIST_DATA_PATTERN 0x760c
#define BIST_DATA_TYPE_MASK 0x0000000fU
#define BIST_DATA_TYPE_SHIFT 0
#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT)
#define MC_BIST_STATUS_RDATA 0x7688
#define MA_EXT_MEMORY_BAR 0x77c8
#define EXT_MEM_SIZE_MASK 0x00000fffU
#define EXT_MEM_SIZE_SHIFT 0
#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
#define MA_TARGET_MEM_ENABLE 0x77d8
#define EXT_MEM_ENABLE 0x00000004U
#define EDRAM1_ENABLE 0x00000002U
#define EDRAM0_ENABLE 0x00000001U
#define MA_INT_CAUSE 0x77e0
#define MEM_PERR_INT_CAUSE 0x00000002U
#define MEM_WRAP_INT_CAUSE 0x00000001U
#define MA_INT_WRAP_STATUS 0x77e4
#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U
#define MEM_WRAP_ADDRESS_SHIFT 4
#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT)
#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU
#define MEM_WRAP_CLIENT_NUM_SHIFT 0
#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
#define MA_PARITY_ERROR_STATUS 0x77f4
#define EDC_0_BASE_ADDR 0x7900
#define EDC_BIST_CMD 0x7904
#define EDC_BIST_CMD_ADDR 0x7908
#define EDC_BIST_CMD_LEN 0x790c
#define EDC_BIST_DATA_PATTERN 0x7910
#define EDC_BIST_STATUS_RDATA 0x7928
#define EDC_INT_CAUSE 0x7978
#define ECC_UE_PAR 0x00000020U
#define ECC_CE_PAR 0x00000010U
#define PERR_PAR_CAUSE 0x00000008U
#define EDC_ECC_STATUS 0x797c
#define EDC_1_BASE_ADDR 0x7980
#define CIM_BOOT_CFG 0x7b00
#define BOOTADDR_MASK 0xffffff00U
#define CIM_PF_MAILBOX_DATA 0x240
#define CIM_PF_MAILBOX_CTRL 0x280
#define MBMSGVALID 0x00000008U
#define MBINTREQ 0x00000004U
#define MBOWNER_MASK 0x00000003U
#define MBOWNER_SHIFT 0
#define MBOWNER(x) ((x) << MBOWNER_SHIFT)
#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
#define CIM_PF_HOST_INT_CAUSE 0x28c
#define MBMSGRDYINT 0x00080000U
#define CIM_HOST_INT_CAUSE 0x7b2c
#define TIEQOUTPARERRINT 0x00100000U
#define TIEQINPARERRINT 0x00080000U
#define MBHOSTPARERR 0x00040000U
#define MBUPPARERR 0x00020000U
#define IBQPARERR 0x0001f800U
#define IBQTP0PARERR 0x00010000U
#define IBQTP1PARERR 0x00008000U
#define IBQULPPARERR 0x00004000U
#define IBQSGELOPARERR 0x00002000U
#define IBQSGEHIPARERR 0x00001000U
#define IBQNCSIPARERR 0x00000800U
#define OBQPARERR 0x000007e0U
#define OBQULP0PARERR 0x00000400U
#define OBQULP1PARERR 0x00000200U
#define OBQULP2PARERR 0x00000100U
#define OBQULP3PARERR 0x00000080U
#define OBQSGEPARERR 0x00000040U
#define OBQNCSIPARERR 0x00000020U
#define PREFDROPINT 0x00000002U
#define UPACCNONZERO 0x00000001U
#define CIM_HOST_UPACC_INT_CAUSE 0x7b34
#define EEPROMWRINT 0x40000000U
#define TIMEOUTMAINT 0x20000000U
#define TIMEOUTINT 0x10000000U
#define RSPOVRLOOKUPINT 0x08000000U
#define REQOVRLOOKUPINT 0x04000000U
#define BLKWRPLINT 0x02000000U
#define BLKRDPLINT 0x01000000U
#define SGLWRPLINT 0x00800000U
#define SGLRDPLINT 0x00400000U
#define BLKWRCTLINT 0x00200000U
#define BLKRDCTLINT 0x00100000U
#define SGLWRCTLINT 0x00080000U
#define SGLRDCTLINT 0x00040000U
#define BLKWREEPROMINT 0x00020000U
#define BLKRDEEPROMINT 0x00010000U
#define SGLWREEPROMINT 0x00008000U
#define SGLRDEEPROMINT 0x00004000U
#define BLKWRFLASHINT 0x00002000U
#define BLKRDFLASHINT 0x00001000U
#define SGLWRFLASHINT 0x00000800U
#define SGLRDFLASHINT 0x00000400U
#define BLKWRBOOTINT 0x00000200U
#define BLKRDBOOTINT 0x00000100U
#define SGLWRBOOTINT 0x00000080U
#define SGLRDBOOTINT 0x00000040U
#define ILLWRBEINT 0x00000020U
#define ILLRDBEINT 0x00000010U
#define ILLRDINT 0x00000008U
#define ILLWRINT 0x00000004U
#define ILLTRANSINT 0x00000002U
#define RSVDSPACEINT 0x00000001U
#define TP_OUT_CONFIG 0x7d04
#define VLANEXTENABLE_MASK 0x0000f000U
#define VLANEXTENABLE_SHIFT 12
#define TP_PARA_REG2 0x7d68
#define MAXRXDATA_MASK 0xffff0000U
#define MAXRXDATA_SHIFT 16
#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT)
#define TP_TIMER_RESOLUTION 0x7d90
#define TIMERRESOLUTION_MASK 0x00ff0000U
#define TIMERRESOLUTION_SHIFT 16
#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
#define TP_SHIFT_CNT 0x7dc0
#define TP_CCTRL_TABLE 0x7ddc
#define TP_MTU_TABLE 0x7de4
#define MTUINDEX_MASK 0xff000000U
#define MTUINDEX_SHIFT 24
#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT)
#define MTUWIDTH_MASK 0x000f0000U
#define MTUWIDTH_SHIFT 16
#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT)
#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT)
#define MTUVALUE_MASK 0x00003fffU
#define MTUVALUE_SHIFT 0
#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT)
#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT)
#define TP_RSS_LKP_TABLE 0x7dec
#define LKPTBLROWVLD 0x80000000U
#define LKPTBLQUEUE1_MASK 0x000ffc00U
#define LKPTBLQUEUE1_SHIFT 10
#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT)
#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT)
#define LKPTBLQUEUE0_MASK 0x000003ffU
#define LKPTBLQUEUE0_SHIFT 0
#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT)
#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT)
#define TP_PIO_ADDR 0x7e40
#define TP_PIO_DATA 0x7e44
#define TP_MIB_INDEX 0x7e50
#define TP_MIB_DATA 0x7e54
#define TP_INT_CAUSE 0x7e74
#define FLMTXFLSTEMPTY 0x40000000U
#define TP_INGRESS_CONFIG 0x141
#define VNIC 0x00000800U
#define CSUM_HAS_PSEUDO_HDR 0x00000400U
#define RM_OVLAN 0x00000200U
#define LOOKUPEVERYPKT 0x00000100U
#define TP_MIB_MAC_IN_ERR_0 0x0
#define TP_MIB_TCP_OUT_RST 0xc
#define TP_MIB_TCP_IN_SEG_HI 0x10
#define TP_MIB_TCP_IN_SEG_LO 0x11
#define TP_MIB_TCP_OUT_SEG_HI 0x12
#define TP_MIB_TCP_OUT_SEG_LO 0x13
#define TP_MIB_TCP_RXT_SEG_HI 0x14
#define TP_MIB_TCP_RXT_SEG_LO 0x15
#define TP_MIB_TNL_CNG_DROP_0 0x18
#define TP_MIB_TCP_V6IN_ERR_0 0x28
#define TP_MIB_TCP_V6OUT_RST 0x2c
#define TP_MIB_OFD_ARP_DROP 0x36
#define TP_MIB_TNL_DROP_0 0x44
#define TP_MIB_OFD_VLN_DROP_0 0x58
#define ULP_TX_INT_CAUSE 0x8dcc
#define PBL_BOUND_ERR_CH3 0x80000000U
#define PBL_BOUND_ERR_CH2 0x40000000U
#define PBL_BOUND_ERR_CH1 0x20000000U
#define PBL_BOUND_ERR_CH0 0x10000000U
#define PM_RX_INT_CAUSE 0x8fdc
#define ZERO_E_CMD_ERROR 0x00400000U
#define PMRX_FRAMING_ERROR 0x003ffff0U
#define OCSPI_PAR_ERROR 0x00000008U
#define DB_OPTIONS_PAR_ERROR 0x00000004U
#define IESPI_PAR_ERROR 0x00000002U
#define E_PCMD_PAR_ERROR 0x00000001U
#define PM_TX_INT_CAUSE 0x8ffc
#define PCMD_LEN_OVFL0 0x80000000U
#define PCMD_LEN_OVFL1 0x40000000U
#define PCMD_LEN_OVFL2 0x20000000U
#define ZERO_C_CMD_ERROR 0x10000000U
#define PMTX_FRAMING_ERROR 0x0ffffff0U
#define OESPI_PAR_ERROR 0x00000008U
#define ICSPI_PAR_ERROR 0x00000002U
#define C_PCMD_PAR_ERROR 0x00000001U
#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c
#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410
#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414
#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418
#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c
#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420
#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424
#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428
#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c
#define MPS_PORT_STAT_TX_PORT_64B_L 0x430
#define MPS_PORT_STAT_TX_PORT_64B_H 0x434
#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438
#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c
#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440
#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444
#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448
#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c
#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450
#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454
#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458
#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c
#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460
#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464
#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468
#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c
#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470
#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474
#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478
#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c
#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480
#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484
#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488
#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c
#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490
#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494
#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498
#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c
#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0
#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4
#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8
#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac
#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0
#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4
#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0
#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4
#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8
#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc
#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0
#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4
#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8
#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc
#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0
#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4
#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8
#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec
#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0
#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4
#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8
#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc
#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500
#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504
#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508
#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c
#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510
#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514
#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518
#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c
#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c
#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550
#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554
#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558
#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c
#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560
#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564
#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568
#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c
#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570
#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574
#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578
#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c
#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580
#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584
#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588
#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c
#define MPS_PORT_STAT_RX_PORT_64B_L 0x590
#define MPS_PORT_STAT_RX_PORT_64B_H 0x594
#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598
#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c
#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0
#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4
#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8
#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac
#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0
#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4
#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8
#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc
#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0
#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4
#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8
#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc
#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0
#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4
#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8
#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc
#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0
#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4
#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8
#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec
#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0
#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4
#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8
#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc
#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600
#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604
#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608
#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
#define MPS_CMN_CTL 0x9000
#define NUMPORTS_MASK 0x00000003U
#define NUMPORTS_SHIFT 0
#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT)
#define MPS_INT_CAUSE 0x9008
#define STATINT 0x00000020U
#define TXINT 0x00000010U
#define RXINT 0x00000008U
#define TRCINT 0x00000004U
#define CLSINT 0x00000002U
#define PLINT 0x00000001U
#define MPS_TX_INT_CAUSE 0x9408
#define PORTERR 0x00010000U
#define FRMERR 0x00008000U
#define SECNTERR 0x00004000U
#define BUBBLE 0x00002000U
#define TXDESCFIFO 0x00001e00U
#define TXDATAFIFO 0x000001e0U
#define NCSIFIFO 0x00000010U
#define TPFIFO 0x0000000fU
#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c
#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c
#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650
#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654
#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658
#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c
#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660
#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664
#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668
#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c
#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670
#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674
#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678
#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c
#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680
#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684
#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688
#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c
#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690
#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694
#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698
#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c
#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0
#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4
#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8
#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac
#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0
#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
#define MPS_TRC_CFG 0x9800
#define TRCFIFOEMPTY 0x00000010U
#define TRCIGNOREDROPINPUT 0x00000008U
#define TRCKEEPDUPLICATES 0x00000004U
#define TRCEN 0x00000002U
#define TRCMULTIFILTER 0x00000001U
#define MPS_TRC_RSS_CONTROL 0x9808
#define RSSCONTROL_MASK 0x00ff0000U
#define RSSCONTROL_SHIFT 16
#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
#define QUEUENUMBER_MASK 0x0000ffffU
#define QUEUENUMBER_SHIFT 0
#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT)
#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
#define TFINVERTMATCH 0x01000000U
#define TFPKTTOOLARGE 0x00800000U
#define TFEN 0x00400000U
#define TFPORT_MASK 0x003c0000U
#define TFPORT_SHIFT 18
#define TFPORT(x) ((x) << TFPORT_SHIFT)
#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
#define TFDROP 0x00020000U
#define TFSOPEOPERR 0x00010000U
#define TFLENGTH_MASK 0x00001f00U
#define TFLENGTH_SHIFT 8
#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT)
#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
#define TFOFFSET_MASK 0x0000001fU
#define TFOFFSET_SHIFT 0
#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT)
#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
#define TFMINPKTSIZE_MASK 0x01ff0000U
#define TFMINPKTSIZE_SHIFT 16
#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT)
#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
#define TFCAPTUREMAX_MASK 0x00003fffU
#define TFCAPTUREMAX_SHIFT 0
#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT)
#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
#define MPS_TRC_INT_CAUSE 0x985c
#define MISCPERR 0x00000100U
#define PKTFIFO 0x000000f0U
#define FILTMEM 0x0000000fU
#define MPS_TRC_FILTER0_MATCH 0x9c00
#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
#define MPS_TRC_FILTER1_MATCH 0x9d00
#define MPS_CLS_INT_CAUSE 0xd028
#define PLERRENB 0x00000008U
#define HASHSRAM 0x00000004U
#define MATCHTCAM 0x00000002U
#define MATCHSRAM 0x00000001U
#define MPS_RX_PERR_INT_CAUSE 0x11074
#define CPL_INTR_CAUSE 0x19054
#define CIM_OP_MAP_PERR 0x00000020U
#define CIM_OVFL_ERROR 0x00000010U
#define TP_FRAMING_ERROR 0x00000008U
#define SGE_FRAMING_ERROR 0x00000004U
#define CIM_FRAMING_ERROR 0x00000002U
#define ZERO_SWITCH_ERROR 0x00000001U
#define SMB_INT_CAUSE 0x19090
#define MSTTXFIFOPARINT 0x00200000U
#define MSTRXFIFOPARINT 0x00100000U
#define SLVFIFOPARINT 0x00080000U
#define ULP_RX_INT_CAUSE 0x19158
#define ULP_RX_ISCSI_TAGMASK 0x19164
#define ULP_RX_ISCSI_PSZ 0x19168
#define HPZ3_MASK 0x0f000000U
#define HPZ3_SHIFT 24
#define HPZ3(x) ((x) << HPZ3_SHIFT)
#define HPZ2_MASK 0x000f0000U
#define HPZ2_SHIFT 16
#define HPZ2(x) ((x) << HPZ2_SHIFT)
#define HPZ1_MASK 0x00000f00U
#define HPZ1_SHIFT 8
#define HPZ1(x) ((x) << HPZ1_SHIFT)
#define HPZ0_MASK 0x0000000fU
#define HPZ0_SHIFT 0
#define HPZ0(x) ((x) << HPZ0_SHIFT)
#define ULP_RX_TDDP_PSZ 0x19178
#define SF_DATA 0x193f8
#define SF_OP 0x193fc
#define BUSY 0x80000000U
#define SF_LOCK 0x00000010U
#define SF_CONT 0x00000008U
#define BYTECNT_MASK 0x00000006U
#define BYTECNT_SHIFT 1
#define BYTECNT(x) ((x) << BYTECNT_SHIFT)
#define OP_WR 0x00000001U
#define PL_PF_INT_CAUSE 0x3c0
#define PFSW 0x00000008U
#define PFSGE 0x00000004U
#define PFCIM 0x00000002U
#define PFMPS 0x00000001U
#define PL_PF_INT_ENABLE 0x3c4
#define PL_PF_CTL 0x3c8
#define SWINT 0x00000001U
#define PL_WHOAMI 0x19400
#define SOURCEPF_MASK 0x00000700U
#define SOURCEPF_SHIFT 8
#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT)
#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
#define ISVF 0x00000080U
#define VFID_MASK 0x0000007fU
#define VFID_SHIFT 0
#define VFID(x) ((x) << VFID_SHIFT)
#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT)
#define PL_INT_CAUSE 0x1940c
#define ULP_TX 0x08000000U
#define SGE 0x04000000U
#define HMA 0x02000000U
#define CPL_SWITCH 0x01000000U
#define ULP_RX 0x00800000U
#define PM_RX 0x00400000U
#define PM_TX 0x00200000U
#define MA 0x00100000U
#define TP 0x00080000U
#define LE 0x00040000U
#define EDC1 0x00020000U
#define EDC0 0x00010000U
#define MC 0x00008000U
#define PCIE 0x00004000U
#define PMU 0x00002000U
#define XGMAC_KR1 0x00001000U
#define XGMAC_KR0 0x00000800U
#define XGMAC1 0x00000400U
#define XGMAC0 0x00000200U
#define SMB 0x00000100U
#define SF 0x00000080U
#define PL 0x00000040U
#define NCSI 0x00000020U
#define MPS 0x00000010U
#define MI 0x00000008U
#define DBG 0x00000004U
#define I2CM 0x00000002U
#define CIM 0x00000001U
#define PL_INT_MAP0 0x19414
#define PL_RST 0x19428
#define PIORST 0x00000002U
#define PIORSTMODE 0x00000001U
#define PL_PL_INT_CAUSE 0x19430
#define FATALPERR 0x00000010U
#define PERRVFID 0x00000001U
#define PL_REV 0x1943c
#define LE_DB_CONFIG 0x19c04
#define HASHEN 0x00100000U
#define LE_DB_SERVER_INDEX 0x19c18
#define LE_DB_ACT_CNT_IPV4 0x19c20
#define LE_DB_ACT_CNT_IPV6 0x19c24
#define LE_DB_INT_CAUSE 0x19c3c
#define REQQPARERR 0x00010000U
#define UNKNOWNCMD 0x00008000U
#define PARITYERR 0x00000040U
#define LIPMISS 0x00000020U
#define LIP0 0x00000010U
#define LE_DB_TID_HASHBASE 0x19df8
#define NCSI_INT_CAUSE 0x1a0d8
#define CIM_DM_PRTY_ERR 0x00000100U
#define MPS_DM_PRTY_ERR 0x00000080U
#define TXFIFO_PRTY_ERR 0x00000002U
#define RXFIFO_PRTY_ERR 0x00000001U
#define XGMAC_PORT_CFG2 0x1018
#define PATEN 0x00040000U
#define MAGICEN 0x00020000U
#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
#define XGMAC_PORT_EPIO_DATA0 0x10c0
#define XGMAC_PORT_EPIO_DATA1 0x10c4
#define XGMAC_PORT_EPIO_DATA2 0x10c8
#define XGMAC_PORT_EPIO_DATA3 0x10cc
#define XGMAC_PORT_EPIO_OP 0x10d0
#define EPIOWR 0x00000100U
#define ADDRESS_MASK 0x000000ffU
#define ADDRESS_SHIFT 0
#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
#define XGMAC_PORT_INT_CAUSE 0x10dc
#endif /* __T4_REGS_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
#
# Chelsio T4 SR-IOV Virtual Function Driver
#
obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf.o
cxgb4vf-objs := cxgb4vf_main.o t4vf_hw.o sge.o

View File

@@ -0,0 +1,534 @@
/*
* This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
* driver for Linux.
*
* Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file should not be included directly. Include t4vf_common.h instead.
*/
#ifndef __CXGB4VF_ADAPTER_H__
#define __CXGB4VF_ADAPTER_H__
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include "../cxgb4/t4_hw.h"
/*
* Constants of the implementation.
*/
enum {
MAX_NPORTS = 1, /* max # of "ports" */
MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */
MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS,
/*
* MSI-X interrupt index usage.
*/
MSIX_FW = 0, /* MSI-X index for firmware Q */
MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */
MSIX_EXTRAS = 1,
MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
/*
* The maximum number of Ingress and Egress Queues is determined by
* the maximum number of "Queue Sets" which we support plus any
* ancillary queues. Each "Queue Set" requires one Ingress Queue
* for RX Packet Ingress Event notifications and two Egress Queues for
* a Free List and an Ethernet TX list.
*/
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS,
MAX_EGRQ = MAX_ETH_QSETS*2,
};
/*
* Forward structure definition references.
*/
struct adapter;
struct sge_eth_rxq;
struct sge_rspq;
/*
* Per-"port" information. This is really per-Virtual Interface information
* but the use of the "port" nomanclature makes it easier to go back and forth
* between the PF and VF drivers ...
*/
struct port_info {
struct adapter *adapter; /* our adapter */
u16 viid; /* virtual interface ID */
s16 xact_addr_filt; /* index of our MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
u8 pidx; /* index into adapter port[] */
u8 port_id; /* physical port ID */
u8 nqsets; /* # of "Queue Sets" */
u8 first_qset; /* index of first "Queue Set" */
struct link_config link_cfg; /* physical port configuration */
};
/*
* Scatter Gather Engine resources for the "adapter". Our ingress and egress
* queues are organized into "Queue Sets" with one ingress and one egress
* queue per Queue Set. These Queue Sets are aportionable between the "ports"
* (Virtual Interfaces). One extra ingress queue is used to receive
* asynchronous messages from the firmware. Note that the "Queue IDs" that we
* use here are really "Relative Queue IDs" which are returned as part of the
* firmware command to allocate queues. These queue IDs are relative to the
* absolute Queue ID base of the section of the Queue ID space allocated to
* the PF/VF.
*/
/*
* SGE free-list queue state.
*/
struct rx_sw_desc;
struct sge_fl {
unsigned int avail; /* # of available RX buffers */
unsigned int pend_cred; /* new buffers since last FL DB ring */
unsigned int cidx; /* consumer index */
unsigned int pidx; /* producer index */
unsigned long alloc_failed; /* # of buffer allocation failures */
unsigned long large_alloc_failed;
unsigned long starving; /* # of times FL was found starving */
/*
* Write-once/infrequently fields.
* -------------------------------
*/
unsigned int cntxt_id; /* SGE relative QID for the free list */
unsigned int abs_id; /* SGE absolute QID for the free list */
unsigned int size; /* capacity of free list */
struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */
__be64 *desc; /* address of HW RX descriptor ring */
dma_addr_t addr; /* PCI bus address of hardware ring */
};
/*
* An ingress packet gather list.
*/
struct pkt_gl {
skb_frag_t frags[MAX_SKB_FRAGS];
void *va; /* virtual address of first byte */
unsigned int nfrags; /* # of fragments */
unsigned int tot_len; /* total length of fragments */
};
typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *,
const struct pkt_gl *);
/*
* State for an SGE Response Queue.
*/
struct sge_rspq {
struct napi_struct napi; /* NAPI scheduling control */
const __be64 *cur_desc; /* current descriptor in queue */
unsigned int cidx; /* consumer index */
u8 gen; /* current generation bit */
u8 next_intr_params; /* holdoff params for next interrupt */
int offset; /* offset into current FL buffer */
unsigned int unhandled_irqs; /* bogus interrupts */
/*
* Write-once/infrequently fields.
* -------------------------------
*/
u8 intr_params; /* interrupt holdoff parameters */
u8 pktcnt_idx; /* interrupt packet threshold */
u8 idx; /* queue index within its group */
u16 cntxt_id; /* SGE rel QID for the response Q */
u16 abs_id; /* SGE abs QID for the response Q */
__be64 *desc; /* address of hardware response ring */
dma_addr_t phys_addr; /* PCI bus address of ring */
unsigned int iqe_len; /* entry size */
unsigned int size; /* capcity of response Q */
struct adapter *adapter; /* our adapter */
struct net_device *netdev; /* associated net device */
rspq_handler_t handler; /* the handler for this response Q */
};
/*
* Ethernet queue statistics
*/
struct sge_eth_stats {
unsigned long pkts; /* # of ethernet packets */
unsigned long lro_pkts; /* # of LRO super packets */
unsigned long lro_merged; /* # of wire packets merged by LRO */
unsigned long rx_cso; /* # of Rx checksum offloads */
unsigned long vlan_ex; /* # of Rx VLAN extractions */
unsigned long rx_drops; /* # of packets dropped due to no mem */
};
/*
* State for an Ethernet Receive Queue.
*/
struct sge_eth_rxq {
struct sge_rspq rspq; /* Response Queue */
struct sge_fl fl; /* Free List */
struct sge_eth_stats stats; /* receive statistics */
};
/*
* SGE Transmit Queue state. This contains all of the resources associated
* with the hardware status of a TX Queue which is a circular ring of hardware
* TX Descriptors. For convenience, it also contains a pointer to a parallel
* "Software Descriptor" array but we don't know anything about it here other
* than its type name.
*/
struct tx_desc {
/*
* Egress Queues are measured in units of SGE_EQ_IDXSIZE by the
* hardware: Sizes, Producer and Consumer indices, etc.
*/
__be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)];
};
struct tx_sw_desc;
struct sge_txq {
unsigned int in_use; /* # of in-use TX descriptors */
unsigned int size; /* # of descriptors */
unsigned int cidx; /* SW consumer index */
unsigned int pidx; /* producer index */
unsigned long stops; /* # of times queue has been stopped */
unsigned long restarts; /* # of queue restarts */
/*
* Write-once/infrequently fields.
* -------------------------------
*/
unsigned int cntxt_id; /* SGE relative QID for the TX Q */
unsigned int abs_id; /* SGE absolute QID for the TX Q */
struct tx_desc *desc; /* address of HW TX descriptor ring */
struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */
struct sge_qstat *stat; /* queue status entry */
dma_addr_t phys_addr; /* PCI bus address of hardware ring */
};
/*
* State for an Ethernet Transmit Queue.
*/
struct sge_eth_txq {
struct sge_txq q; /* SGE TX Queue */
struct netdev_queue *txq; /* associated netdev TX queue */
unsigned long tso; /* # of TSO requests */
unsigned long tx_cso; /* # of TX checksum offloads */
unsigned long vlan_ins; /* # of TX VLAN insertions */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
};
/*
* The complete set of Scatter/Gather Engine resources.
*/
struct sge {
/*
* Our "Queue Sets" ...
*/
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
/*
* Extra ingress queues for asynchronous firmware events and
* forwarded interrupts (when in MSI mode).
*/
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock;
/*
* State for managing "starving Free Lists" -- Free Lists which have
* fallen below a certain threshold of buffers available to the
* hardware and attempts to refill them up to that threshold have
* failed. We have a regular "slow tick" timer process which will
* make periodic attempts to refill these starving Free Lists ...
*/
DECLARE_BITMAP(starving_fl, MAX_EGRQ);
struct timer_list rx_timer;
/*
* State for cleaning up completed TX descriptors.
*/
struct timer_list tx_timer;
/*
* Write-once/infrequently fields.
* -------------------------------
*/
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
/*
* Reverse maps from Absolute Queue IDs to associated queue pointers.
* The absolute Queue IDs are in a compact range which start at a
* [potentially large] Base Queue ID. We perform the reverse map by
* first converting the Absolute Queue ID into a Relative Queue ID by
* subtracting off the Base Queue ID and then use a Relative Queue ID
* indexed table to get the pointer to the corresponding software
* queue structure.
*/
unsigned int egr_base;
unsigned int ingr_base;
void *egr_map[MAX_EGRQ];
struct sge_rspq *ingr_map[MAX_INGQ];
};
/*
* Utility macros to convert Absolute- to Relative-Queue indices and Egress-
* and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide
* pointers to Ingress- and Egress-Queues can be used as both L- and R-values
*/
#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base))
#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base))
#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)])
#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)])
/*
* Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
*/
#define for_each_ethrxq(sge, iter) \
for (iter = 0; iter < (sge)->ethqsets; iter++)
/*
* Per-"adapter" (Virtual Function) information.
*/
struct adapter {
/* PCI resources */
void __iomem *regs;
struct pci_dev *pdev;
struct device *pdev_dev;
/* "adapter" resources */
unsigned long registered_device_map;
unsigned long open_device_map;
unsigned long flags;
struct adapter_params params;
/* queue and interrupt resources */
struct {
unsigned short vec;
char desc[22];
} msix_info[MSIX_ENTRIES];
struct sge sge;
/* Linux network device resources */
struct net_device *port[MAX_NPORTS];
const char *name;
unsigned int msg_enable;
/* debugfs resources */
struct dentry *debugfs_root;
/* various locks */
spinlock_t stats_lock;
};
enum { /* adapter flags */
FULL_INIT_DONE = (1UL << 0),
USING_MSI = (1UL << 1),
USING_MSIX = (1UL << 2),
QUEUES_BOUND = (1UL << 3),
};
/*
* The following register read/write routine definitions are required by
* the common code.
*/
/**
* t4_read_reg - read a HW register
* @adapter: the adapter
* @reg_addr: the register address
*
* Returns the 32-bit value of the given HW register.
*/
static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
{
return readl(adapter->regs + reg_addr);
}
/**
* t4_write_reg - write a HW register
* @adapter: the adapter
* @reg_addr: the register address
* @val: the value to write
*
* Write a 32-bit value into the given HW register.
*/
static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
{
writel(val, adapter->regs + reg_addr);
}
#ifndef readq
static inline u64 readq(const volatile void __iomem *addr)
{
return readl(addr) + ((u64)readl(addr + 4) << 32);
}
static inline void writeq(u64 val, volatile void __iomem *addr)
{
writel(val, addr);
writel(val >> 32, addr + 4);
}
#endif
/**
* t4_read_reg64 - read a 64-bit HW register
* @adapter: the adapter
* @reg_addr: the register address
*
* Returns the 64-bit value of the given HW register.
*/
static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
{
return readq(adapter->regs + reg_addr);
}
/**
* t4_write_reg64 - write a 64-bit HW register
* @adapter: the adapter
* @reg_addr: the register address
* @val: the value to write
*
* Write a 64-bit value into the given HW register.
*/
static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
u64 val)
{
writeq(val, adapter->regs + reg_addr);
}
/**
* port_name - return the string name of a port
* @adapter: the adapter
* @pidx: the port index
*
* Return the string name of the selected port.
*/
static inline const char *port_name(struct adapter *adapter, int pidx)
{
return adapter->port[pidx]->name;
}
/**
* t4_os_set_hw_addr - store a port's MAC address in SW
* @adapter: the adapter
* @pidx: the port index
* @hw_addr: the Ethernet address
*
* Store the Ethernet address of the given port in SW. Called by the common
* code when it retrieves a port's Ethernet address from EEPROM.
*/
static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
u8 hw_addr[])
{
memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN);
}
/**
* netdev2pinfo - return the port_info structure associated with a net_device
* @dev: the netdev
*
* Return the struct port_info associated with a net_device
*/
static inline struct port_info *netdev2pinfo(const struct net_device *dev)
{
return netdev_priv(dev);
}
/**
* adap2pinfo - return the port_info of a port
* @adap: the adapter
* @pidx: the port index
*
* Return the port_info structure for the adapter.
*/
static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx)
{
return netdev_priv(adapter->port[pidx]);
}
/**
* netdev2adap - return the adapter structure associated with a net_device
* @dev: the netdev
*
* Return the struct adapter associated with a net_device
*/
static inline struct adapter *netdev2adap(const struct net_device *dev)
{
return netdev2pinfo(dev)->adapter;
}
/*
* OS "Callback" function declarations. These are functions that the OS code
* is "contracted" to provide for the common code.
*/
void t4vf_os_link_changed(struct adapter *, int, int);
/*
* SGE function prototype declarations.
*/
int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool,
struct net_device *, int,
struct sge_fl *, rspq_handler_t);
int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
struct net_device *, struct netdev_queue *,
unsigned int);
void t4vf_free_sge_resources(struct adapter *);
int t4vf_eth_xmit(struct sk_buff *, struct net_device *);
int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
const struct pkt_gl *);
irq_handler_t t4vf_intr_handler(struct adapter *);
irqreturn_t t4vf_sge_intr_msix(int, void *);
int t4vf_sge_init(struct adapter *);
void t4vf_sge_start(struct adapter *);
void t4vf_sge_stop(struct adapter *);
#endif /* __CXGB4VF_ADAPTER_H__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,274 @@
/*
* This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
* driver for Linux.
*
* Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __T4VF_COMMON_H__
#define __T4VF_COMMON_H__
#include "../cxgb4/t4fw_api.h"
/*
* The "len16" field of a Firmware Command Structure ...
*/
#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
/*
* Per-VF statistics.
*/
struct t4vf_port_stats {
/*
* TX statistics.
*/
u64 tx_bcast_bytes; /* broadcast */
u64 tx_bcast_frames;
u64 tx_mcast_bytes; /* multicast */
u64 tx_mcast_frames;
u64 tx_ucast_bytes; /* unicast */
u64 tx_ucast_frames;
u64 tx_drop_frames; /* TX dropped frames */
u64 tx_offload_bytes; /* offload */
u64 tx_offload_frames;
/*
* RX statistics.
*/
u64 rx_bcast_bytes; /* broadcast */
u64 rx_bcast_frames;
u64 rx_mcast_bytes; /* multicast */
u64 rx_mcast_frames;
u64 rx_ucast_bytes;
u64 rx_ucast_frames; /* unicast */
u64 rx_err_frames; /* RX error frames */
};
/*
* Per-"port" (Virtual Interface) link configuration ...
*/
struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
unsigned short requested_speed; /* speed user has requested */
unsigned short speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
unsigned char link_ok; /* link up? */
};
enum {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
/*
* General device parameters ...
*/
struct dev_params {
u32 fwrev; /* firmware version */
u32 tprev; /* TP Microcode Version */
};
/*
* Scatter Gather Engine parameters. These are almost all determined by the
* Physical Function Driver. We just need to grab them to see within which
* environment we're playing ...
*/
struct sge_params {
u32 sge_control; /* padding, boundaries, lengths, etc. */
u32 sge_host_page_size; /* RDMA page sizes */
u32 sge_queues_per_page; /* RDMA queues/page */
u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
u32 sge_timer_value_2_and_3;
u32 sge_timer_value_4_and_5;
};
/*
* Vital Product Data parameters.
*/
struct vpd_params {
u32 cclk; /* Core Clock (KHz) */
};
/*
* Global Receive Side Scaling (RSS) parameters in host-native format.
*/
struct rss_params {
unsigned int mode; /* RSS mode */
union {
struct {
unsigned int synmapen:1; /* SYN Map Enable */
unsigned int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */
unsigned int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */
unsigned int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */
unsigned int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */
unsigned int ofdmapen:1; /* Offload Map Enable */
unsigned int tnlmapen:1; /* Tunnel Map Enable */
unsigned int tnlalllookup:1; /* Tunnel All Lookup */
unsigned int hashtoeplitz:1; /* use Toeplitz hash */
} basicvirtual;
} u;
};
/*
* Virtual Interface RSS Configuration in host-native format.
*/
union rss_vi_config {
struct {
u16 defaultq; /* Ingress Queue ID for !tnlalllookup */
unsigned int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */
unsigned int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */
unsigned int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */
unsigned int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */
int udpen; /* hash 4-tuple UDP ingress packets */
} basicvirtual;
};
/*
* Maximum resources provisioned for a PCI VF.
*/
struct vf_resources {
unsigned int nvi; /* N virtual interfaces */
unsigned int neq; /* N egress Qs */
unsigned int nethctrl; /* N egress ETH or CTRL Qs */
unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
unsigned int niq; /* N ingress Qs */
unsigned int tc; /* PCI-E traffic class */
unsigned int pmask; /* port access rights mask */
unsigned int nexactf; /* N exact MPS filters */
unsigned int r_caps; /* read capabilities */
unsigned int wx_caps; /* write/execute capabilities */
};
/*
* Per-"adapter" (Virtual Function) parameters.
*/
struct adapter_params {
struct dev_params dev; /* general device parameters */
struct sge_params sge; /* Scatter Gather Engine */
struct vpd_params vpd; /* Vital Product Data */
struct rss_params rss; /* Receive Side Scaling */
struct vf_resources vfres; /* Virtual Function Resource limits */
u8 nports; /* # of Ethernet "ports" */
};
#include "adapter.h"
#ifndef PCI_VENDOR_ID_CHELSIO
# define PCI_VENDOR_ID_CHELSIO 0x1425
#endif
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; iter++)
static inline bool is_10g_port(const struct link_config *lc)
{
return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
{
return adapter->params.vpd.cclk / 1000;
}
static inline unsigned int us_to_core_ticks(const struct adapter *adapter,
unsigned int us)
{
return (us * adapter->params.vpd.cclk) / 1000;
}
static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
unsigned int ticks)
{
return (ticks * 1000) / adapter->params.vpd.cclk;
}
int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool);
static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd,
int size, void *rpl)
{
return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true);
}
static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
int size, void *rpl)
{
return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
}
int __devinit t4vf_wait_dev_ready(struct adapter *);
int __devinit t4vf_port_init(struct adapter *, int);
int t4vf_fw_reset(struct adapter *);
int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
int t4vf_get_sge_params(struct adapter *);
int t4vf_get_vpd_params(struct adapter *);
int t4vf_get_dev_params(struct adapter *);
int t4vf_get_rss_glb_config(struct adapter *);
int t4vf_get_vfres(struct adapter *);
int t4vf_read_rss_vi_config(struct adapter *, unsigned int,
union rss_vi_config *);
int t4vf_write_rss_vi_config(struct adapter *, unsigned int,
union rss_vi_config *);
int t4vf_config_rss_range(struct adapter *, unsigned int, int, int,
const u16 *, int);
int t4vf_alloc_vi(struct adapter *, int);
int t4vf_free_vi(struct adapter *, int);
int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool);
int t4vf_identify_port(struct adapter *, unsigned int, unsigned int);
int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int,
bool);
int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int,
const u8 **, u16 *, u64 *, bool);
int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool);
int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool);
int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *);
int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int,
unsigned int);
int t4vf_eth_eq_free(struct adapter *, unsigned int);
int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
#endif /* __T4VF_COMMON_H__ */

View File

@@ -0,0 +1,121 @@
/*
* This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
* driver for Linux.
*
* Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __T4VF_DEFS_H__
#define __T4VF_DEFS_H__
#include "../cxgb4/t4_regs.h"
/*
* The VF Register Map.
*
* The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local
* bus module (PL) and CPU Interface Module (CIM) components are mapped via
* the Slice to Module Map Table (see below) in the Physical Function Register
* Map. The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base
* and Offset registers in the PF Register Map. The MBDATA base address is
* quite constrained as it determines the Mailbox Data addresses for both PFs
* and VFs, and therefore must fit in both the VF and PF Register Maps without
* overlapping other registers.
*/
#define T4VF_SGE_BASE_ADDR 0x0000
#define T4VF_MPS_BASE_ADDR 0x0100
#define T4VF_PL_BASE_ADDR 0x0200
#define T4VF_MBDATA_BASE_ADDR 0x0240
#define T4VF_CIM_BASE_ADDR 0x0300
#define T4VF_REGMAP_START 0x0000
#define T4VF_REGMAP_SIZE 0x0400
/*
* There's no hardware limitation which requires that the addresses of the
* Mailbox Data in the fixed CIM PF map and the programmable VF map must
* match. However, it's a useful convention ...
*/
#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA
#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA!
#endif
/*
* Virtual Function "Slice to Module Map Table" definitions.
*
* This table allows us to map subsets of the various module register sets
* into the T4VF Register Map. Each table entry identifies the index of the
* module whose registers are being mapped, the offset within the module's
* register set that the mapping should start at, the limit of the mapping,
* and the offset within the T4VF Register Map to which the module's registers
* are being mapped. All addresses and qualtities are in terms of 32-bit
* words. The "limit" value is also in terms of 32-bit words and is equal to
* the last address mapped in the T4VF Register Map 1 (i.e. it's a "<="
* relation rather than a "<").
*/
#define T4VF_MOD_MAP(module, index, first, last) \
T4VF_MOD_MAP_##module##_INDEX = (index), \
T4VF_MOD_MAP_##module##_FIRST = (first), \
T4VF_MOD_MAP_##module##_LAST = (last), \
T4VF_MOD_MAP_##module##_OFFSET = ((first)/4), \
T4VF_MOD_MAP_##module##_BASE = \
(T4VF_##module##_BASE_ADDR/4 + (first)/4), \
T4VF_MOD_MAP_##module##_LIMIT = \
(T4VF_##module##_BASE_ADDR/4 + (last)/4),
#define SGE_VF_KDOORBELL 0x0
#define SGE_VF_GTS 0x4
#define MPS_VF_CTL 0x0
#define MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc
#define PL_VF_WHOAMI 0x0
#define CIM_VF_EXT_MAILBOX_CTRL 0x0
#define CIM_VF_EXT_MAILBOX_STATUS 0x4
enum {
T4VF_MOD_MAP(SGE, 2, SGE_VF_KDOORBELL, SGE_VF_GTS)
T4VF_MOD_MAP(MPS, 0, MPS_VF_CTL, MPS_VF_STAT_RX_VF_ERR_FRAMES_H)
T4VF_MOD_MAP(PL, 3, PL_VF_WHOAMI, PL_VF_WHOAMI)
T4VF_MOD_MAP(CIM, 1, CIM_VF_EXT_MAILBOX_CTRL, CIM_VF_EXT_MAILBOX_STATUS)
};
/*
* There isn't a Slice to Module Map Table entry for the Mailbox Data
* registers, but it's convenient to use similar names as above. There are 8
* little-endian 64-bit Mailbox Data registers. Note that the "instances"
* value below is in terms of 32-bit words which matches the "word" addressing
* space we use above for the Slice to Module Map Space.
*/
#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES 16
#define T4VF_MBDATA_FIRST 0
#define T4VF_MBDATA_LAST ((NUM_CIM_VF_MAILBOX_DATA_INSTANCES-1)*4)
#endif /* __T4T4VF_DEFS_H__ */

File diff suppressed because it is too large Load Diff