Merge branch 'linus/master' into rdma.git for-next
rdma.git merge resolution for the 4.19 merge window Conflicts: drivers/infiniband/core/rdma_core.c - Use the rdma code and revise with the new spelling for atomic_fetch_add_unless drivers/nvme/host/rdma.c - Replace max_sge with max_send_sge in new blk code drivers/nvme/target/rdma.c - Use the blk code and revise to use NULL for ib_post_recv when appropriate - Replace max_sge with max_recv_sge in new blk code net/rds/ib_send.c - Use the net code and revise to use NULL for ib_post_recv when appropriate Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
@@ -109,10 +109,6 @@ static int disable_msi = 0;
|
||||
module_param(disable_msi, int, 0);
|
||||
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
|
||||
|
||||
static const char pci_speed[][4] = {
|
||||
"33", "66", "100", "133"
|
||||
};
|
||||
|
||||
/*
|
||||
* Setup MAC to receive the types of packets we want.
|
||||
*/
|
||||
|
@@ -136,6 +136,7 @@ again:
|
||||
if (e->state == L2T_STATE_STALE)
|
||||
e->state = L2T_STATE_VALID;
|
||||
spin_unlock_bh(&e->lock);
|
||||
/* fall through */
|
||||
case L2T_STATE_VALID: /* fast-path, send the packet on */
|
||||
return cxgb3_ofld_send(dev, skb);
|
||||
case L2T_STATE_RESOLVING:
|
||||
|
@@ -120,6 +120,8 @@ struct cudbg_mem_desc {
|
||||
u32 idx;
|
||||
};
|
||||
|
||||
#define CUDBG_MEMINFO_REV 1
|
||||
|
||||
struct cudbg_meminfo {
|
||||
struct cudbg_mem_desc avail[4];
|
||||
struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3];
|
||||
@@ -137,6 +139,9 @@ struct cudbg_meminfo {
|
||||
u32 port_alloc[4];
|
||||
u32 loopback_used[NCHAN];
|
||||
u32 loopback_alloc[NCHAN];
|
||||
u32 p_structs_free_cnt;
|
||||
u32 free_rx_cnt;
|
||||
u32 free_tx_cnt;
|
||||
};
|
||||
|
||||
struct cudbg_cim_pif_la {
|
||||
@@ -281,12 +286,18 @@ struct cudbg_tid_data {
|
||||
|
||||
#define CUDBG_NUM_ULPTX 11
|
||||
#define CUDBG_NUM_ULPTX_READ 512
|
||||
#define CUDBG_NUM_ULPTX_ASIC 6
|
||||
#define CUDBG_NUM_ULPTX_ASIC_READ 128
|
||||
|
||||
#define CUDBG_ULPTX_LA_REV 1
|
||||
|
||||
struct cudbg_ulptx_la {
|
||||
u32 rdptr[CUDBG_NUM_ULPTX];
|
||||
u32 wrptr[CUDBG_NUM_ULPTX];
|
||||
u32 rddata[CUDBG_NUM_ULPTX];
|
||||
u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ];
|
||||
u32 rdptr_asic[CUDBG_NUM_ULPTX_ASIC_READ];
|
||||
u32 rddata_asic[CUDBG_NUM_ULPTX_ASIC_READ][CUDBG_NUM_ULPTX_ASIC];
|
||||
};
|
||||
|
||||
#define CUDBG_CHAC_PBT_ADDR 0x2800
|
||||
|
@@ -349,6 +349,11 @@ int cudbg_fill_meminfo(struct adapter *padap,
|
||||
meminfo_buff->up_extmem2_hi = hi;
|
||||
|
||||
lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
|
||||
for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++)
|
||||
meminfo_buff->free_rx_cnt +=
|
||||
FREERXPAGECOUNT_G(t4_read_reg(padap,
|
||||
TP_FLM_FREE_RX_CNT_A));
|
||||
|
||||
meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo);
|
||||
meminfo_buff->rx_pages_data[1] =
|
||||
t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
|
||||
@@ -356,6 +361,11 @@ int cudbg_fill_meminfo(struct adapter *padap,
|
||||
|
||||
lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
|
||||
hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
|
||||
for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++)
|
||||
meminfo_buff->free_tx_cnt +=
|
||||
FREETXPAGECOUNT_G(t4_read_reg(padap,
|
||||
TP_FLM_FREE_TX_CNT_A));
|
||||
|
||||
meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
|
||||
meminfo_buff->tx_pages_data[1] =
|
||||
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
|
||||
@@ -364,6 +374,8 @@ int cudbg_fill_meminfo(struct adapter *padap,
|
||||
meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
|
||||
|
||||
meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
|
||||
meminfo_buff->p_structs_free_cnt =
|
||||
FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A));
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
|
||||
@@ -1465,14 +1477,23 @@ int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
|
||||
struct adapter *padap = pdbg_init->adap;
|
||||
struct cudbg_buffer temp_buff = { 0 };
|
||||
struct cudbg_meminfo *meminfo_buff;
|
||||
struct cudbg_ver_hdr *ver_hdr;
|
||||
int rc;
|
||||
|
||||
rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo),
|
||||
rc = cudbg_get_buff(pdbg_init, dbg_buff,
|
||||
sizeof(struct cudbg_ver_hdr) +
|
||||
sizeof(struct cudbg_meminfo),
|
||||
&temp_buff);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
meminfo_buff = (struct cudbg_meminfo *)temp_buff.data;
|
||||
ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
|
||||
ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
|
||||
ver_hdr->revision = CUDBG_MEMINFO_REV;
|
||||
ver_hdr->size = sizeof(struct cudbg_meminfo);
|
||||
|
||||
meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data +
|
||||
sizeof(*ver_hdr));
|
||||
rc = cudbg_fill_meminfo(padap, meminfo_buff);
|
||||
if (rc) {
|
||||
cudbg_err->sys_err = rc;
|
||||
@@ -2586,15 +2607,24 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
|
||||
struct adapter *padap = pdbg_init->adap;
|
||||
struct cudbg_buffer temp_buff = { 0 };
|
||||
struct cudbg_ulptx_la *ulptx_la_buff;
|
||||
struct cudbg_ver_hdr *ver_hdr;
|
||||
u32 i, j;
|
||||
int rc;
|
||||
|
||||
rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la),
|
||||
rc = cudbg_get_buff(pdbg_init, dbg_buff,
|
||||
sizeof(struct cudbg_ver_hdr) +
|
||||
sizeof(struct cudbg_ulptx_la),
|
||||
&temp_buff);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
|
||||
ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
|
||||
ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
|
||||
ver_hdr->revision = CUDBG_ULPTX_LA_REV;
|
||||
ver_hdr->size = sizeof(struct cudbg_ulptx_la);
|
||||
|
||||
ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data +
|
||||
sizeof(*ver_hdr));
|
||||
for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
|
||||
ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
|
||||
ULP_TX_LA_RDPTR_0_A +
|
||||
@@ -2610,6 +2640,25 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
|
||||
t4_read_reg(padap,
|
||||
ULP_TX_LA_RDDATA_0_A + 0x10 * i);
|
||||
}
|
||||
|
||||
for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) {
|
||||
t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1);
|
||||
ulptx_la_buff->rdptr_asic[i] =
|
||||
t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A);
|
||||
ulptx_la_buff->rddata_asic[i][0] =
|
||||
t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A);
|
||||
ulptx_la_buff->rddata_asic[i][1] =
|
||||
t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A);
|
||||
ulptx_la_buff->rddata_asic[i][2] =
|
||||
t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A);
|
||||
ulptx_la_buff->rddata_asic[i][3] =
|
||||
t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A);
|
||||
ulptx_la_buff->rddata_asic[i][4] =
|
||||
t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A);
|
||||
ulptx_la_buff->rddata_asic[i][5] =
|
||||
t4_read_reg(padap, PM_RX_BASE_ADDR);
|
||||
}
|
||||
|
||||
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
|
||||
}
|
||||
|
||||
|
@@ -46,6 +46,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/net_tstamp.h>
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
@@ -319,6 +320,21 @@ struct vpd_params {
|
||||
u8 na[MACADDR_LEN + 1];
|
||||
};
|
||||
|
||||
/* Maximum resources provisioned for a PCI PF.
|
||||
*/
|
||||
struct pf_resources {
|
||||
unsigned int nvi; /* N virtual interfaces */
|
||||
unsigned int neq; /* N egress Qs */
|
||||
unsigned int nethctrl; /* N egress ETH or CTRL Qs */
|
||||
unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
|
||||
unsigned int niq; /* N ingress Qs */
|
||||
unsigned int tc; /* PCI-E traffic class */
|
||||
unsigned int pmask; /* port access rights mask */
|
||||
unsigned int nexactf; /* N exact MPS filters */
|
||||
unsigned int r_caps; /* read capabilities */
|
||||
unsigned int wx_caps; /* write/execute capabilities */
|
||||
};
|
||||
|
||||
struct pci_params {
|
||||
unsigned int vpd_cap_addr;
|
||||
unsigned char speed;
|
||||
@@ -346,6 +362,7 @@ struct adapter_params {
|
||||
struct sge_params sge;
|
||||
struct tp_params tp;
|
||||
struct vpd_params vpd;
|
||||
struct pf_resources pfres;
|
||||
struct pci_params pci;
|
||||
struct devlog_params devlog;
|
||||
enum pcie_memwin drv_memwin;
|
||||
@@ -521,6 +538,15 @@ enum {
|
||||
MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
|
||||
};
|
||||
|
||||
enum {
|
||||
PRIV_FLAG_PORT_TX_VM_BIT,
|
||||
};
|
||||
|
||||
#define PRIV_FLAG_PORT_TX_VM BIT(PRIV_FLAG_PORT_TX_VM_BIT)
|
||||
|
||||
#define PRIV_FLAGS_ADAP 0
|
||||
#define PRIV_FLAGS_PORT PRIV_FLAG_PORT_TX_VM
|
||||
|
||||
struct adapter;
|
||||
struct sge_rspq;
|
||||
|
||||
@@ -557,6 +583,7 @@ struct port_info {
|
||||
struct hwtstamp_config tstamp_config;
|
||||
bool ptp_enable;
|
||||
struct sched_table *sched_tbl;
|
||||
u32 eth_flags;
|
||||
};
|
||||
|
||||
struct dentry;
|
||||
@@ -867,6 +894,7 @@ struct adapter {
|
||||
unsigned int flags;
|
||||
unsigned int adap_idx;
|
||||
enum chip_type chip;
|
||||
u32 eth_flags;
|
||||
|
||||
int msg_enable;
|
||||
__be16 vxlan_port;
|
||||
@@ -956,6 +984,7 @@ struct adapter {
|
||||
struct chcr_stats_debug chcr_stats;
|
||||
|
||||
/* TC flower offload */
|
||||
bool tc_flower_initialized;
|
||||
struct rhashtable flower_tbl;
|
||||
struct rhashtable_params flower_ht_params;
|
||||
struct timer_list flower_stats_timer;
|
||||
@@ -1333,7 +1362,7 @@ void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
|
||||
void t4_free_sge_resources(struct adapter *adap);
|
||||
void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
|
||||
irq_handler_t t4_intr_handler(struct adapter *adap);
|
||||
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
|
||||
const struct pkt_gl *gl);
|
||||
int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
|
||||
@@ -1555,6 +1584,7 @@ int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz);
|
||||
int t4_seeprom_wp(struct adapter *adapter, bool enable);
|
||||
int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
|
||||
int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
|
||||
int t4_get_pfres(struct adapter *adapter);
|
||||
int t4_read_flash(struct adapter *adapter, unsigned int addr,
|
||||
unsigned int nwords, u32 *data, int byte_oriented);
|
||||
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
|
||||
@@ -1823,4 +1853,5 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
|
||||
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
|
||||
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
|
||||
u16 vlan);
|
||||
int cxgb4_dcb_enabled(const struct net_device *dev);
|
||||
#endif /* __CXGB4_H__ */
|
||||
|
@@ -224,7 +224,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
|
||||
len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
|
||||
break;
|
||||
case CUDBG_MEMINFO:
|
||||
len = sizeof(struct cudbg_meminfo);
|
||||
len = sizeof(struct cudbg_ver_hdr) +
|
||||
sizeof(struct cudbg_meminfo);
|
||||
break;
|
||||
case CUDBG_CIM_PIF_LA:
|
||||
len = sizeof(struct cudbg_cim_pif_la);
|
||||
@@ -273,7 +274,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
|
||||
}
|
||||
break;
|
||||
case CUDBG_ULPTX_LA:
|
||||
len = sizeof(struct cudbg_ulptx_la);
|
||||
len = sizeof(struct cudbg_ver_hdr) +
|
||||
sizeof(struct cudbg_ulptx_la);
|
||||
break;
|
||||
case CUDBG_UP_CIM_INDIRECT:
|
||||
n = 0;
|
||||
|
@@ -22,7 +22,7 @@
|
||||
|
||||
/* DCBx version control
|
||||
*/
|
||||
static const char * const dcb_ver_array[] = {
|
||||
const char * const dcb_ver_array[] = {
|
||||
"Unknown",
|
||||
"DCBx-CIN",
|
||||
"DCBx-CEE 1.01",
|
||||
|
@@ -2414,6 +2414,234 @@ static const struct file_operations rss_vf_config_debugfs_fops = {
|
||||
.release = seq_release_private
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CHELSIO_T4_DCB
|
||||
extern char *dcb_ver_array[];
|
||||
|
||||
/* Data Center Briging information for each port.
|
||||
*/
|
||||
static int dcb_info_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct adapter *adap = seq->private;
|
||||
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
seq_puts(seq, "Data Center Bridging Information\n");
|
||||
} else {
|
||||
int port = (uintptr_t)v - 2;
|
||||
struct net_device *dev = adap->port[port];
|
||||
struct port_info *pi = netdev2pinfo(dev);
|
||||
struct port_dcb_info *dcb = &pi->dcb;
|
||||
|
||||
seq_puts(seq, "\n");
|
||||
seq_printf(seq, "Port: %d (DCB negotiated: %s)\n",
|
||||
port,
|
||||
cxgb4_dcb_enabled(dev) ? "yes" : "no");
|
||||
|
||||
if (cxgb4_dcb_enabled(dev))
|
||||
seq_printf(seq, "[ DCBx Version %s ]\n",
|
||||
dcb_ver_array[dcb->dcb_version]);
|
||||
|
||||
if (dcb->msgs) {
|
||||
int i;
|
||||
|
||||
seq_puts(seq, "\n Index\t\t\t :\t");
|
||||
for (i = 0; i < 8; i++)
|
||||
seq_printf(seq, " %3d", i);
|
||||
seq_puts(seq, "\n\n");
|
||||
}
|
||||
|
||||
if (dcb->msgs & CXGB4_DCB_FW_PGID) {
|
||||
int prio, pgid;
|
||||
|
||||
seq_puts(seq, " Priority Group IDs\t :\t");
|
||||
for (prio = 0; prio < 8; prio++) {
|
||||
pgid = (dcb->pgid >> 4 * (7 - prio)) & 0xf;
|
||||
seq_printf(seq, " %3d", pgid);
|
||||
}
|
||||
seq_puts(seq, "\n");
|
||||
}
|
||||
|
||||
if (dcb->msgs & CXGB4_DCB_FW_PGRATE) {
|
||||
int pg;
|
||||
|
||||
seq_puts(seq, " Priority Group BW(%)\t :\t");
|
||||
for (pg = 0; pg < 8; pg++)
|
||||
seq_printf(seq, " %3d", dcb->pgrate[pg]);
|
||||
seq_puts(seq, "\n");
|
||||
|
||||
if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
|
||||
seq_puts(seq, " TSA Algorithm\t\t :\t");
|
||||
for (pg = 0; pg < 8; pg++)
|
||||
seq_printf(seq, " %3d", dcb->tsa[pg]);
|
||||
seq_puts(seq, "\n");
|
||||
}
|
||||
|
||||
seq_printf(seq, " Max PG Traffic Classes [%3d ]\n",
|
||||
dcb->pg_num_tcs_supported);
|
||||
|
||||
seq_puts(seq, "\n");
|
||||
}
|
||||
|
||||
if (dcb->msgs & CXGB4_DCB_FW_PRIORATE) {
|
||||
int prio;
|
||||
|
||||
seq_puts(seq, " Priority Rate\t:\t");
|
||||
for (prio = 0; prio < 8; prio++)
|
||||
seq_printf(seq, " %3d", dcb->priorate[prio]);
|
||||
seq_puts(seq, "\n");
|
||||
}
|
||||
|
||||
if (dcb->msgs & CXGB4_DCB_FW_PFC) {
|
||||
int prio;
|
||||
|
||||
seq_puts(seq, " Priority Flow Control :\t");
|
||||
for (prio = 0; prio < 8; prio++) {
|
||||
int pfcen = (dcb->pfcen >> 1 * (7 - prio))
|
||||
& 0x1;
|
||||
seq_printf(seq, " %3d", pfcen);
|
||||
}
|
||||
seq_puts(seq, "\n");
|
||||
|
||||
seq_printf(seq, " Max PFC Traffic Classes [%3d ]\n",
|
||||
dcb->pfc_num_tcs_supported);
|
||||
|
||||
seq_puts(seq, "\n");
|
||||
}
|
||||
|
||||
if (dcb->msgs & CXGB4_DCB_FW_APP_ID) {
|
||||
int app, napps;
|
||||
|
||||
seq_puts(seq, " Application Information:\n");
|
||||
seq_puts(seq, " App Priority Selection Protocol\n");
|
||||
seq_puts(seq, " Index Map Field ID\n");
|
||||
for (app = 0, napps = 0;
|
||||
app < CXGB4_MAX_DCBX_APP_SUPPORTED; app++) {
|
||||
struct app_priority *ap;
|
||||
static const char * const sel_names[] = {
|
||||
"Ethertype",
|
||||
"Socket TCP",
|
||||
"Socket UDP",
|
||||
"Socket All",
|
||||
};
|
||||
const char *sel_name;
|
||||
|
||||
ap = &dcb->app_priority[app];
|
||||
/* skip empty slots */
|
||||
if (ap->protocolid == 0)
|
||||
continue;
|
||||
napps++;
|
||||
|
||||
if (ap->sel_field < ARRAY_SIZE(sel_names))
|
||||
sel_name = sel_names[ap->sel_field];
|
||||
else
|
||||
sel_name = "UNKNOWN";
|
||||
|
||||
seq_printf(seq, " %3d %#04x %-10s (%d) %#06x (%d)\n",
|
||||
app,
|
||||
ap->user_prio_map,
|
||||
sel_name, ap->sel_field,
|
||||
ap->protocolid, ap->protocolid);
|
||||
}
|
||||
if (napps == 0)
|
||||
seq_puts(seq, " --- None ---\n");
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void *dcb_info_get_idx(struct adapter *adap, loff_t pos)
|
||||
{
|
||||
return (pos <= adap->params.nports
|
||||
? (void *)((uintptr_t)pos + 1)
|
||||
: NULL);
|
||||
}
|
||||
|
||||
static void *dcb_info_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
struct adapter *adap = seq->private;
|
||||
|
||||
return (*pos
|
||||
? dcb_info_get_idx(adap, *pos)
|
||||
: SEQ_START_TOKEN);
|
||||
}
|
||||
|
||||
static void dcb_info_stop(struct seq_file *seq, void *v)
|
||||
{
|
||||
}
|
||||
|
||||
static void *dcb_info_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct adapter *adap = seq->private;
|
||||
|
||||
(*pos)++;
|
||||
return dcb_info_get_idx(adap, *pos);
|
||||
}
|
||||
|
||||
static const struct seq_operations dcb_info_seq_ops = {
|
||||
.start = dcb_info_start,
|
||||
.next = dcb_info_next,
|
||||
.stop = dcb_info_stop,
|
||||
.show = dcb_info_show
|
||||
};
|
||||
|
||||
static int dcb_info_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int res = seq_open(file, &dcb_info_seq_ops);
|
||||
|
||||
if (!res) {
|
||||
struct seq_file *seq = file->private_data;
|
||||
|
||||
seq->private = inode->i_private;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static const struct file_operations dcb_info_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = dcb_info_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
#endif /* CONFIG_CHELSIO_T4_DCB */
|
||||
|
||||
static int resources_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct adapter *adapter = seq->private;
|
||||
struct pf_resources *pfres = &adapter->params.pfres;
|
||||
|
||||
#define S(desc, fmt, var) \
|
||||
seq_printf(seq, "%-60s " fmt "\n", \
|
||||
desc " (" #var "):", pfres->var)
|
||||
|
||||
S("Virtual Interfaces", "%d", nvi);
|
||||
S("Egress Queues", "%d", neq);
|
||||
S("Ethernet Control", "%d", nethctrl);
|
||||
S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
|
||||
S("Ingress Queues", "%d", niq);
|
||||
S("Traffic Class", "%d", tc);
|
||||
S("Port Access Rights Mask", "%#x", pmask);
|
||||
S("MAC Address Filters", "%d", nexactf);
|
||||
S("Firmware Command Read Capabilities", "%#x", r_caps);
|
||||
S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
|
||||
|
||||
#undef S
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int resources_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, resources_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations resources_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = resources_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
/**
|
||||
* ethqset2pinfo - return port_info of an Ethernet Queue Set
|
||||
* @adap: the adapter
|
||||
@@ -2436,16 +2664,64 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int sge_qinfo_uld_txq_entries(const struct adapter *adap, int uld)
|
||||
{
|
||||
const struct sge_uld_txq_info *utxq_info = adap->sge.uld_txq_info[uld];
|
||||
|
||||
if (!utxq_info)
|
||||
return 0;
|
||||
|
||||
return DIV_ROUND_UP(utxq_info->ntxq, 4);
|
||||
}
|
||||
|
||||
static int sge_qinfo_uld_rspq_entries(const struct adapter *adap, int uld,
|
||||
bool ciq)
|
||||
{
|
||||
const struct sge_uld_rxq_info *urxq_info = adap->sge.uld_rxq_info[uld];
|
||||
|
||||
if (!urxq_info)
|
||||
return 0;
|
||||
|
||||
return ciq ? DIV_ROUND_UP(urxq_info->nciq, 4) :
|
||||
DIV_ROUND_UP(urxq_info->nrxq, 4);
|
||||
}
|
||||
|
||||
static int sge_qinfo_uld_rxq_entries(const struct adapter *adap, int uld)
|
||||
{
|
||||
return sge_qinfo_uld_rspq_entries(adap, uld, false);
|
||||
}
|
||||
|
||||
static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
|
||||
{
|
||||
return sge_qinfo_uld_rspq_entries(adap, uld, true);
|
||||
}
|
||||
|
||||
static int sge_qinfo_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
|
||||
int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
|
||||
int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
|
||||
const struct sge_uld_txq_info *utxq_info;
|
||||
const struct sge_uld_rxq_info *urxq_info;
|
||||
struct adapter *adap = seq->private;
|
||||
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
|
||||
int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
|
||||
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
|
||||
int i, r = (uintptr_t)v - 1;
|
||||
int ofld_idx = r - eth_entries;
|
||||
int ctrl_idx = ofld_idx - ofld_entries;
|
||||
int fq_idx = ctrl_idx - ctrl_entries;
|
||||
int i, n, r = (uintptr_t)v - 1;
|
||||
int eth_entries, ctrl_entries;
|
||||
struct sge *s = &adap->sge;
|
||||
|
||||
eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
|
||||
ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
|
||||
|
||||
mutex_lock(&uld_mutex);
|
||||
if (s->uld_txq_info)
|
||||
for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
|
||||
uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
|
||||
|
||||
if (s->uld_rxq_info) {
|
||||
for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
|
||||
uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
|
||||
uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
|
||||
}
|
||||
}
|
||||
|
||||
if (r)
|
||||
seq_putc(seq, '\n');
|
||||
@@ -2467,9 +2743,10 @@ do { \
|
||||
|
||||
if (r < eth_entries) {
|
||||
int base_qset = r * 4;
|
||||
const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset];
|
||||
const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset];
|
||||
int n = min(4, adap->sge.ethqsets - 4 * r);
|
||||
const struct sge_eth_rxq *rx = &s->ethrxq[base_qset];
|
||||
const struct sge_eth_txq *tx = &s->ethtxq[base_qset];
|
||||
|
||||
n = min(4, s->ethqsets - 4 * r);
|
||||
|
||||
S("QType:", "Ethernet");
|
||||
S("Interface:",
|
||||
@@ -2494,8 +2771,7 @@ do { \
|
||||
R("RspQ CIDX:", rspq.cidx);
|
||||
R("RspQ Gen:", rspq.gen);
|
||||
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
|
||||
S3("u", "Intr pktcnt:",
|
||||
adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
|
||||
S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
|
||||
R("FL ID:", fl.cntxt_id);
|
||||
R("FL size:", fl.size - 8);
|
||||
R("FL pend:", fl.pend_cred);
|
||||
@@ -2520,9 +2796,196 @@ do { \
|
||||
RL("FLLow:", fl.low);
|
||||
RL("FLStarving:", fl.starving);
|
||||
|
||||
} else if (ctrl_idx < ctrl_entries) {
|
||||
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
|
||||
int n = min(4, adap->params.nports - 4 * ctrl_idx);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
r -= eth_entries;
|
||||
if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
|
||||
const struct sge_uld_txq *tx;
|
||||
|
||||
utxq_info = s->uld_txq_info[CXGB4_TX_OFLD];
|
||||
tx = &utxq_info->uldtxq[r * 4];
|
||||
n = min(4, utxq_info->ntxq - 4 * r);
|
||||
|
||||
S("QType:", "OFLD-TXQ");
|
||||
T("TxQ ID:", q.cntxt_id);
|
||||
T("TxQ size:", q.size);
|
||||
T("TxQ inuse:", q.in_use);
|
||||
T("TxQ CIDX:", q.cidx);
|
||||
T("TxQ PIDX:", q.pidx);
|
||||
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
r -= uld_txq_entries[CXGB4_TX_OFLD];
|
||||
if (r < uld_rxq_entries[CXGB4_ULD_RDMA]) {
|
||||
const struct sge_ofld_rxq *rx;
|
||||
|
||||
urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
|
||||
rx = &urxq_info->uldrxq[r * 4];
|
||||
n = min(4, urxq_info->nrxq - 4 * r);
|
||||
|
||||
S("QType:", "RDMA-CPL");
|
||||
S("Interface:",
|
||||
rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
|
||||
R("RspQ ID:", rspq.abs_id);
|
||||
R("RspQ size:", rspq.size);
|
||||
R("RspQE size:", rspq.iqe_len);
|
||||
R("RspQ CIDX:", rspq.cidx);
|
||||
R("RspQ Gen:", rspq.gen);
|
||||
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
|
||||
S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
|
||||
R("FL ID:", fl.cntxt_id);
|
||||
R("FL size:", fl.size - 8);
|
||||
R("FL pend:", fl.pend_cred);
|
||||
R("FL avail:", fl.avail);
|
||||
R("FL PIDX:", fl.pidx);
|
||||
R("FL CIDX:", fl.cidx);
|
||||
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
r -= uld_rxq_entries[CXGB4_ULD_RDMA];
|
||||
if (r < uld_ciq_entries[CXGB4_ULD_RDMA]) {
|
||||
const struct sge_ofld_rxq *rx;
|
||||
int ciq_idx = 0;
|
||||
|
||||
urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
|
||||
ciq_idx = urxq_info->nrxq + (r * 4);
|
||||
rx = &urxq_info->uldrxq[ciq_idx];
|
||||
n = min(4, urxq_info->nciq - 4 * r);
|
||||
|
||||
S("QType:", "RDMA-CIQ");
|
||||
S("Interface:",
|
||||
rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
|
||||
R("RspQ ID:", rspq.abs_id);
|
||||
R("RspQ size:", rspq.size);
|
||||
R("RspQE size:", rspq.iqe_len);
|
||||
R("RspQ CIDX:", rspq.cidx);
|
||||
R("RspQ Gen:", rspq.gen);
|
||||
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
|
||||
S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
|
||||
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
r -= uld_ciq_entries[CXGB4_ULD_RDMA];
|
||||
if (r < uld_rxq_entries[CXGB4_ULD_ISCSI]) {
|
||||
const struct sge_ofld_rxq *rx;
|
||||
|
||||
urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSI];
|
||||
rx = &urxq_info->uldrxq[r * 4];
|
||||
n = min(4, urxq_info->nrxq - 4 * r);
|
||||
|
||||
S("QType:", "iSCSI");
|
||||
R("RspQ ID:", rspq.abs_id);
|
||||
R("RspQ size:", rspq.size);
|
||||
R("RspQE size:", rspq.iqe_len);
|
||||
R("RspQ CIDX:", rspq.cidx);
|
||||
R("RspQ Gen:", rspq.gen);
|
||||
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
|
||||
S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
|
||||
R("FL ID:", fl.cntxt_id);
|
||||
R("FL size:", fl.size - 8);
|
||||
R("FL pend:", fl.pend_cred);
|
||||
R("FL avail:", fl.avail);
|
||||
R("FL PIDX:", fl.pidx);
|
||||
R("FL CIDX:", fl.cidx);
|
||||
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
r -= uld_rxq_entries[CXGB4_ULD_ISCSI];
|
||||
if (r < uld_rxq_entries[CXGB4_ULD_ISCSIT]) {
|
||||
const struct sge_ofld_rxq *rx;
|
||||
|
||||
urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSIT];
|
||||
rx = &urxq_info->uldrxq[r * 4];
|
||||
n = min(4, urxq_info->nrxq - 4 * r);
|
||||
|
||||
S("QType:", "iSCSIT");
|
||||
R("RspQ ID:", rspq.abs_id);
|
||||
R("RspQ size:", rspq.size);
|
||||
R("RspQE size:", rspq.iqe_len);
|
||||
R("RspQ CIDX:", rspq.cidx);
|
||||
R("RspQ Gen:", rspq.gen);
|
||||
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
|
||||
S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
|
||||
R("FL ID:", fl.cntxt_id);
|
||||
R("FL size:", fl.size - 8);
|
||||
R("FL pend:", fl.pend_cred);
|
||||
R("FL avail:", fl.avail);
|
||||
R("FL PIDX:", fl.pidx);
|
||||
R("FL CIDX:", fl.cidx);
|
||||
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
r -= uld_rxq_entries[CXGB4_ULD_ISCSIT];
|
||||
if (r < uld_rxq_entries[CXGB4_ULD_TLS]) {
|
||||
const struct sge_ofld_rxq *rx;
|
||||
|
||||
urxq_info = s->uld_rxq_info[CXGB4_ULD_TLS];
|
||||
rx = &urxq_info->uldrxq[r * 4];
|
||||
n = min(4, urxq_info->nrxq - 4 * r);
|
||||
|
||||
S("QType:", "TLS");
|
||||
R("RspQ ID:", rspq.abs_id);
|
||||
R("RspQ size:", rspq.size);
|
||||
R("RspQE size:", rspq.iqe_len);
|
||||
R("RspQ CIDX:", rspq.cidx);
|
||||
R("RspQ Gen:", rspq.gen);
|
||||
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
|
||||
S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
|
||||
R("FL ID:", fl.cntxt_id);
|
||||
R("FL size:", fl.size - 8);
|
||||
R("FL pend:", fl.pend_cred);
|
||||
R("FL avail:", fl.avail);
|
||||
R("FL PIDX:", fl.pidx);
|
||||
R("FL CIDX:", fl.cidx);
|
||||
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
r -= uld_rxq_entries[CXGB4_ULD_TLS];
|
||||
if (r < uld_txq_entries[CXGB4_TX_CRYPTO]) {
|
||||
const struct sge_ofld_rxq *rx;
|
||||
const struct sge_uld_txq *tx;
|
||||
|
||||
utxq_info = s->uld_txq_info[CXGB4_TX_CRYPTO];
|
||||
urxq_info = s->uld_rxq_info[CXGB4_ULD_CRYPTO];
|
||||
tx = &utxq_info->uldtxq[r * 4];
|
||||
rx = &urxq_info->uldrxq[r * 4];
|
||||
n = min(4, utxq_info->ntxq - 4 * r);
|
||||
|
||||
S("QType:", "Crypto");
|
||||
T("TxQ ID:", q.cntxt_id);
|
||||
T("TxQ size:", q.size);
|
||||
T("TxQ inuse:", q.in_use);
|
||||
T("TxQ CIDX:", q.cidx);
|
||||
T("TxQ PIDX:", q.pidx);
|
||||
R("RspQ ID:", rspq.abs_id);
|
||||
R("RspQ size:", rspq.size);
|
||||
R("RspQE size:", rspq.iqe_len);
|
||||
R("RspQ CIDX:", rspq.cidx);
|
||||
R("RspQ Gen:", rspq.gen);
|
||||
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
|
||||
S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
|
||||
R("FL ID:", fl.cntxt_id);
|
||||
R("FL size:", fl.size - 8);
|
||||
R("FL pend:", fl.pend_cred);
|
||||
R("FL avail:", fl.avail);
|
||||
R("FL PIDX:", fl.pidx);
|
||||
R("FL CIDX:", fl.cidx);
|
||||
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
r -= uld_txq_entries[CXGB4_TX_CRYPTO];
|
||||
if (r < ctrl_entries) {
|
||||
const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4];
|
||||
|
||||
n = min(4, adap->params.nports - 4 * r);
|
||||
|
||||
S("QType:", "Control");
|
||||
T("TxQ ID:", q.cntxt_id);
|
||||
@@ -2532,8 +2995,13 @@ do { \
|
||||
T("TxQ PIDX:", q.pidx);
|
||||
TL("TxQFull:", q.stops);
|
||||
TL("TxQRestarts:", q.restarts);
|
||||
} else if (fq_idx == 0) {
|
||||
const struct sge_rspq *evtq = &adap->sge.fw_evtq;
|
||||
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
r -= ctrl_entries;
|
||||
if (r < 1) {
|
||||
const struct sge_rspq *evtq = &s->fw_evtq;
|
||||
|
||||
seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
|
||||
seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
|
||||
@@ -2544,8 +3012,13 @@ do { \
|
||||
seq_printf(seq, "%-12s %16u\n", "Intr delay:",
|
||||
qtimer_val(adap, evtq));
|
||||
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
|
||||
adap->sge.counter_val[evtq->pktcnt_idx]);
|
||||
s->counter_val[evtq->pktcnt_idx]);
|
||||
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&uld_mutex);
|
||||
#undef R
|
||||
#undef RL
|
||||
#undef T
|
||||
@@ -2559,8 +3032,21 @@ do { \
|
||||
|
||||
static int sge_queue_entries(const struct adapter *adap)
|
||||
{
|
||||
int tot_uld_entries = 0;
|
||||
int i;
|
||||
|
||||
mutex_lock(&uld_mutex);
|
||||
for (i = 0; i < CXGB4_TX_MAX; i++)
|
||||
tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i);
|
||||
|
||||
for (i = 0; i < CXGB4_ULD_MAX; i++) {
|
||||
tot_uld_entries += sge_qinfo_uld_rxq_entries(adap, i);
|
||||
tot_uld_entries += sge_qinfo_uld_ciq_entries(adap, i);
|
||||
}
|
||||
mutex_unlock(&uld_mutex);
|
||||
|
||||
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
|
||||
DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
|
||||
tot_uld_entries +
|
||||
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
|
||||
}
|
||||
|
||||
@@ -2851,15 +3337,17 @@ static int meminfo_show(struct seq_file *seq, void *v)
|
||||
mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
|
||||
meminfo.up_extmem2_hi);
|
||||
|
||||
seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
|
||||
meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
|
||||
meminfo.rx_pages_data[2]);
|
||||
seq_printf(seq, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
|
||||
meminfo.rx_pages_data[0], meminfo.free_rx_cnt,
|
||||
meminfo.rx_pages_data[1], meminfo.rx_pages_data[2]);
|
||||
|
||||
seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
|
||||
meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
|
||||
meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
|
||||
seq_printf(seq, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
|
||||
meminfo.tx_pages_data[0], meminfo.free_tx_cnt,
|
||||
meminfo.tx_pages_data[1], meminfo.tx_pages_data[2],
|
||||
meminfo.tx_pages_data[3]);
|
||||
|
||||
seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
|
||||
seq_printf(seq, "%u p-structs (%u free)\n\n",
|
||||
meminfo.p_structs, meminfo.p_structs_free_cnt);
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
/* For T6 these are MAC buffer groups */
|
||||
@@ -2924,6 +3412,169 @@ static const struct file_operations chcr_stats_debugfs_fops = {
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
#define PRINT_ADAP_STATS(string, value) \
|
||||
seq_printf(seq, "%-25s %-20llu\n", (string), \
|
||||
(unsigned long long)(value))
|
||||
|
||||
#define PRINT_CH_STATS(string, value) \
|
||||
do { \
|
||||
seq_printf(seq, "%-25s ", (string)); \
|
||||
for (i = 0; i < adap->params.arch.nchan; i++) \
|
||||
seq_printf(seq, "%-20llu ", \
|
||||
(unsigned long long)stats.value[i]); \
|
||||
seq_printf(seq, "\n"); \
|
||||
} while (0)
|
||||
|
||||
#define PRINT_CH_STATS2(string, value) \
|
||||
do { \
|
||||
seq_printf(seq, "%-25s ", (string)); \
|
||||
for (i = 0; i < adap->params.arch.nchan; i++) \
|
||||
seq_printf(seq, "%-20llu ", \
|
||||
(unsigned long long)stats[i].value); \
|
||||
seq_printf(seq, "\n"); \
|
||||
} while (0)
|
||||
|
||||
static void show_tcp_stats(struct seq_file *seq)
|
||||
{
|
||||
struct adapter *adap = seq->private;
|
||||
struct tp_tcp_stats v4, v6;
|
||||
|
||||
spin_lock(&adap->stats_lock);
|
||||
t4_tp_get_tcp_stats(adap, &v4, &v6, false);
|
||||
spin_unlock(&adap->stats_lock);
|
||||
|
||||
PRINT_ADAP_STATS("tcp_ipv4_out_rsts:", v4.tcp_out_rsts);
|
||||
PRINT_ADAP_STATS("tcp_ipv4_in_segs:", v4.tcp_in_segs);
|
||||
PRINT_ADAP_STATS("tcp_ipv4_out_segs:", v4.tcp_out_segs);
|
||||
PRINT_ADAP_STATS("tcp_ipv4_retrans_segs:", v4.tcp_retrans_segs);
|
||||
PRINT_ADAP_STATS("tcp_ipv6_out_rsts:", v6.tcp_out_rsts);
|
||||
PRINT_ADAP_STATS("tcp_ipv6_in_segs:", v6.tcp_in_segs);
|
||||
PRINT_ADAP_STATS("tcp_ipv6_out_segs:", v6.tcp_out_segs);
|
||||
PRINT_ADAP_STATS("tcp_ipv6_retrans_segs:", v6.tcp_retrans_segs);
|
||||
}
|
||||
|
||||
static void show_ddp_stats(struct seq_file *seq)
|
||||
{
|
||||
struct adapter *adap = seq->private;
|
||||
struct tp_usm_stats stats;
|
||||
|
||||
spin_lock(&adap->stats_lock);
|
||||
t4_get_usm_stats(adap, &stats, false);
|
||||
spin_unlock(&adap->stats_lock);
|
||||
|
||||
PRINT_ADAP_STATS("usm_ddp_frames:", stats.frames);
|
||||
PRINT_ADAP_STATS("usm_ddp_octets:", stats.octets);
|
||||
PRINT_ADAP_STATS("usm_ddp_drops:", stats.drops);
|
||||
}
|
||||
|
||||
static void show_rdma_stats(struct seq_file *seq)
|
||||
{
|
||||
struct adapter *adap = seq->private;
|
||||
struct tp_rdma_stats stats;
|
||||
|
||||
spin_lock(&adap->stats_lock);
|
||||
t4_tp_get_rdma_stats(adap, &stats, false);
|
||||
spin_unlock(&adap->stats_lock);
|
||||
|
||||
PRINT_ADAP_STATS("rdma_no_rqe_mod_defer:", stats.rqe_dfr_mod);
|
||||
PRINT_ADAP_STATS("rdma_no_rqe_pkt_defer:", stats.rqe_dfr_pkt);
|
||||
}
|
||||
|
||||
static void show_tp_err_adapter_stats(struct seq_file *seq)
|
||||
{
|
||||
struct adapter *adap = seq->private;
|
||||
struct tp_err_stats stats;
|
||||
|
||||
spin_lock(&adap->stats_lock);
|
||||
t4_tp_get_err_stats(adap, &stats, false);
|
||||
spin_unlock(&adap->stats_lock);
|
||||
|
||||
PRINT_ADAP_STATS("tp_err_ofld_no_neigh:", stats.ofld_no_neigh);
|
||||
PRINT_ADAP_STATS("tp_err_ofld_cong_defer:", stats.ofld_cong_defer);
|
||||
}
|
||||
|
||||
static void show_cpl_stats(struct seq_file *seq)
|
||||
{
|
||||
struct adapter *adap = seq->private;
|
||||
struct tp_cpl_stats stats;
|
||||
u8 i;
|
||||
|
||||
spin_lock(&adap->stats_lock);
|
||||
t4_tp_get_cpl_stats(adap, &stats, false);
|
||||
spin_unlock(&adap->stats_lock);
|
||||
|
||||
PRINT_CH_STATS("tp_cpl_requests:", req);
|
||||
PRINT_CH_STATS("tp_cpl_responses:", rsp);
|
||||
}
|
||||
|
||||
static void show_tp_err_channel_stats(struct seq_file *seq)
|
||||
{
|
||||
struct adapter *adap = seq->private;
|
||||
struct tp_err_stats stats;
|
||||
u8 i;
|
||||
|
||||
spin_lock(&adap->stats_lock);
|
||||
t4_tp_get_err_stats(adap, &stats, false);
|
||||
spin_unlock(&adap->stats_lock);
|
||||
|
||||
PRINT_CH_STATS("tp_mac_in_errs:", mac_in_errs);
|
||||
PRINT_CH_STATS("tp_hdr_in_errs:", hdr_in_errs);
|
||||
PRINT_CH_STATS("tp_tcp_in_errs:", tcp_in_errs);
|
||||
PRINT_CH_STATS("tp_tcp6_in_errs:", tcp6_in_errs);
|
||||
PRINT_CH_STATS("tp_tnl_cong_drops:", tnl_cong_drops);
|
||||
PRINT_CH_STATS("tp_tnl_tx_drops:", tnl_tx_drops);
|
||||
PRINT_CH_STATS("tp_ofld_vlan_drops:", ofld_vlan_drops);
|
||||
PRINT_CH_STATS("tp_ofld_chan_drops:", ofld_chan_drops);
|
||||
}
|
||||
|
||||
static void show_fcoe_stats(struct seq_file *seq)
|
||||
{
|
||||
struct adapter *adap = seq->private;
|
||||
struct tp_fcoe_stats stats[NCHAN];
|
||||
u8 i;
|
||||
|
||||
spin_lock(&adap->stats_lock);
|
||||
for (i = 0; i < adap->params.arch.nchan; i++)
|
||||
t4_get_fcoe_stats(adap, i, &stats[i], false);
|
||||
spin_unlock(&adap->stats_lock);
|
||||
|
||||
PRINT_CH_STATS2("fcoe_octets_ddp", octets_ddp);
|
||||
PRINT_CH_STATS2("fcoe_frames_ddp", frames_ddp);
|
||||
PRINT_CH_STATS2("fcoe_frames_drop", frames_drop);
|
||||
}
|
||||
|
||||
#undef PRINT_CH_STATS2
|
||||
#undef PRINT_CH_STATS
|
||||
#undef PRINT_ADAP_STATS
|
||||
|
||||
static int tp_stats_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct adapter *adap = seq->private;
|
||||
|
||||
seq_puts(seq, "\n--------Adapter Stats--------\n");
|
||||
show_tcp_stats(seq);
|
||||
show_ddp_stats(seq);
|
||||
show_rdma_stats(seq);
|
||||
show_tp_err_adapter_stats(seq);
|
||||
|
||||
seq_puts(seq, "\n-------- Channel Stats --------\n");
|
||||
if (adap->params.arch.nchan == NCHAN)
|
||||
seq_printf(seq, "%-25s %-20s %-20s %-20s %-20s\n",
|
||||
" ", "channel 0", "channel 1",
|
||||
"channel 2", "channel 3");
|
||||
else
|
||||
seq_printf(seq, "%-25s %-20s %-20s\n",
|
||||
" ", "channel 0", "channel 1");
|
||||
show_cpl_stats(seq);
|
||||
show_tp_err_channel_stats(seq);
|
||||
show_fcoe_stats(seq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats);
|
||||
|
||||
/* Add an array of Debug FS files.
|
||||
*/
|
||||
void add_debugfs_files(struct adapter *adap,
|
||||
@@ -2973,6 +3624,10 @@ int t4_setup_debugfs(struct adapter *adap)
|
||||
{ "rss_key", &rss_key_debugfs_fops, 0400, 0 },
|
||||
{ "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 },
|
||||
{ "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 },
|
||||
{ "resources", &resources_debugfs_fops, 0400, 0 },
|
||||
#ifdef CONFIG_CHELSIO_T4_DCB
|
||||
{ "dcb_info", &dcb_info_debugfs_fops, 0400, 0 },
|
||||
#endif
|
||||
{ "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 },
|
||||
{ "ibq_tp0", &cim_ibq_fops, 0400, 0 },
|
||||
{ "ibq_tp1", &cim_ibq_fops, 0400, 1 },
|
||||
@@ -2999,6 +3654,7 @@ int t4_setup_debugfs(struct adapter *adap)
|
||||
{ "blocked_fl", &blocked_fl_fops, 0600, 0 },
|
||||
{ "meminfo", &meminfo_fops, 0400, 0 },
|
||||
{ "crypto", &chcr_stats_debugfs_fops, 0400, 0 },
|
||||
{ "tp_stats", &tp_stats_debugfs_fops, 0400, 0 },
|
||||
};
|
||||
|
||||
/* Debug FS nodes common to all T5 and later adapters.
|
||||
|
@@ -115,42 +115,10 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
|
||||
"db_drop ",
|
||||
"db_full ",
|
||||
"db_empty ",
|
||||
"tcp_ipv4_out_rsts ",
|
||||
"tcp_ipv4_in_segs ",
|
||||
"tcp_ipv4_out_segs ",
|
||||
"tcp_ipv4_retrans_segs ",
|
||||
"tcp_ipv6_out_rsts ",
|
||||
"tcp_ipv6_in_segs ",
|
||||
"tcp_ipv6_out_segs ",
|
||||
"tcp_ipv6_retrans_segs ",
|
||||
"usm_ddp_frames ",
|
||||
"usm_ddp_octets ",
|
||||
"usm_ddp_drops ",
|
||||
"rdma_no_rqe_mod_defer ",
|
||||
"rdma_no_rqe_pkt_defer ",
|
||||
"tp_err_ofld_no_neigh ",
|
||||
"tp_err_ofld_cong_defer ",
|
||||
"write_coal_success ",
|
||||
"write_coal_fail ",
|
||||
};
|
||||
|
||||
static char channel_stats_strings[][ETH_GSTRING_LEN] = {
|
||||
"--------Channel--------- ",
|
||||
"tp_cpl_requests ",
|
||||
"tp_cpl_responses ",
|
||||
"tp_mac_in_errs ",
|
||||
"tp_hdr_in_errs ",
|
||||
"tp_tcp_in_errs ",
|
||||
"tp_tcp6_in_errs ",
|
||||
"tp_tnl_cong_drops ",
|
||||
"tp_tnl_tx_drops ",
|
||||
"tp_ofld_vlan_drops ",
|
||||
"tp_ofld_chan_drops ",
|
||||
"fcoe_octets_ddp ",
|
||||
"fcoe_frames_ddp ",
|
||||
"fcoe_frames_drop ",
|
||||
};
|
||||
|
||||
static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
|
||||
"-------Loopback----------- ",
|
||||
"octets_ok ",
|
||||
@@ -177,14 +145,19 @@ static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
|
||||
"bg3_frames_trunc ",
|
||||
};
|
||||
|
||||
static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
|
||||
[PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
|
||||
};
|
||||
|
||||
static int get_sset_count(struct net_device *dev, int sset)
|
||||
{
|
||||
switch (sset) {
|
||||
case ETH_SS_STATS:
|
||||
return ARRAY_SIZE(stats_strings) +
|
||||
ARRAY_SIZE(adapter_stats_strings) +
|
||||
ARRAY_SIZE(channel_stats_strings) +
|
||||
ARRAY_SIZE(loopback_stats_strings);
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
return ARRAY_SIZE(cxgb4_priv_flags_strings);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@@ -235,6 +208,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
|
||||
FW_HDR_FW_VER_MINOR_G(exprom_vers),
|
||||
FW_HDR_FW_VER_MICRO_G(exprom_vers),
|
||||
FW_HDR_FW_VER_BUILD_G(exprom_vers));
|
||||
info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
|
||||
}
|
||||
|
||||
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||
@@ -245,11 +219,11 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||
memcpy(data, adapter_stats_strings,
|
||||
sizeof(adapter_stats_strings));
|
||||
data += sizeof(adapter_stats_strings);
|
||||
memcpy(data, channel_stats_strings,
|
||||
sizeof(channel_stats_strings));
|
||||
data += sizeof(channel_stats_strings);
|
||||
memcpy(data, loopback_stats_strings,
|
||||
sizeof(loopback_stats_strings));
|
||||
} else if (stringset == ETH_SS_PRIV_FLAGS) {
|
||||
memcpy(data, cxgb4_priv_flags_strings,
|
||||
sizeof(cxgb4_priv_flags_strings));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -270,41 +244,10 @@ struct adapter_stats {
|
||||
u64 db_drop;
|
||||
u64 db_full;
|
||||
u64 db_empty;
|
||||
u64 tcp_v4_out_rsts;
|
||||
u64 tcp_v4_in_segs;
|
||||
u64 tcp_v4_out_segs;
|
||||
u64 tcp_v4_retrans_segs;
|
||||
u64 tcp_v6_out_rsts;
|
||||
u64 tcp_v6_in_segs;
|
||||
u64 tcp_v6_out_segs;
|
||||
u64 tcp_v6_retrans_segs;
|
||||
u64 frames;
|
||||
u64 octets;
|
||||
u64 drops;
|
||||
u64 rqe_dfr_mod;
|
||||
u64 rqe_dfr_pkt;
|
||||
u64 ofld_no_neigh;
|
||||
u64 ofld_cong_defer;
|
||||
u64 wc_success;
|
||||
u64 wc_fail;
|
||||
};
|
||||
|
||||
struct channel_stats {
|
||||
u64 cpl_req;
|
||||
u64 cpl_rsp;
|
||||
u64 mac_in_errs;
|
||||
u64 hdr_in_errs;
|
||||
u64 tcp_in_errs;
|
||||
u64 tcp6_in_errs;
|
||||
u64 tnl_cong_drops;
|
||||
u64 tnl_tx_drops;
|
||||
u64 ofld_vlan_drops;
|
||||
u64 ofld_chan_drops;
|
||||
u64 octets_ddp;
|
||||
u64 frames_ddp;
|
||||
u64 frames_drop;
|
||||
};
|
||||
|
||||
static void collect_sge_port_stats(const struct adapter *adap,
|
||||
const struct port_info *p,
|
||||
struct queue_port_stats *s)
|
||||
@@ -327,45 +270,14 @@ static void collect_sge_port_stats(const struct adapter *adap,
|
||||
|
||||
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
|
||||
{
|
||||
struct tp_tcp_stats v4, v6;
|
||||
struct tp_rdma_stats rdma_stats;
|
||||
struct tp_err_stats err_stats;
|
||||
struct tp_usm_stats usm_stats;
|
||||
u64 val1, val2;
|
||||
|
||||
memset(s, 0, sizeof(*s));
|
||||
|
||||
spin_lock(&adap->stats_lock);
|
||||
t4_tp_get_tcp_stats(adap, &v4, &v6, false);
|
||||
t4_tp_get_rdma_stats(adap, &rdma_stats, false);
|
||||
t4_get_usm_stats(adap, &usm_stats, false);
|
||||
t4_tp_get_err_stats(adap, &err_stats, false);
|
||||
spin_unlock(&adap->stats_lock);
|
||||
|
||||
s->db_drop = adap->db_stats.db_drop;
|
||||
s->db_full = adap->db_stats.db_full;
|
||||
s->db_empty = adap->db_stats.db_empty;
|
||||
|
||||
s->tcp_v4_out_rsts = v4.tcp_out_rsts;
|
||||
s->tcp_v4_in_segs = v4.tcp_in_segs;
|
||||
s->tcp_v4_out_segs = v4.tcp_out_segs;
|
||||
s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
|
||||
s->tcp_v6_out_rsts = v6.tcp_out_rsts;
|
||||
s->tcp_v6_in_segs = v6.tcp_in_segs;
|
||||
s->tcp_v6_out_segs = v6.tcp_out_segs;
|
||||
s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
|
||||
|
||||
if (is_offload(adap)) {
|
||||
s->frames = usm_stats.frames;
|
||||
s->octets = usm_stats.octets;
|
||||
s->drops = usm_stats.drops;
|
||||
s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
|
||||
s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
|
||||
}
|
||||
|
||||
s->ofld_no_neigh = err_stats.ofld_no_neigh;
|
||||
s->ofld_cong_defer = err_stats.ofld_cong_defer;
|
||||
|
||||
if (!is_t4(adap->params.chip)) {
|
||||
int v;
|
||||
|
||||
@@ -379,36 +291,6 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
|
||||
}
|
||||
}
|
||||
|
||||
static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
|
||||
u8 i)
|
||||
{
|
||||
struct tp_cpl_stats cpl_stats;
|
||||
struct tp_err_stats err_stats;
|
||||
struct tp_fcoe_stats fcoe_stats;
|
||||
|
||||
memset(s, 0, sizeof(*s));
|
||||
|
||||
spin_lock(&adap->stats_lock);
|
||||
t4_tp_get_cpl_stats(adap, &cpl_stats, false);
|
||||
t4_tp_get_err_stats(adap, &err_stats, false);
|
||||
t4_get_fcoe_stats(adap, i, &fcoe_stats, false);
|
||||
spin_unlock(&adap->stats_lock);
|
||||
|
||||
s->cpl_req = cpl_stats.req[i];
|
||||
s->cpl_rsp = cpl_stats.rsp[i];
|
||||
s->mac_in_errs = err_stats.mac_in_errs[i];
|
||||
s->hdr_in_errs = err_stats.hdr_in_errs[i];
|
||||
s->tcp_in_errs = err_stats.tcp_in_errs[i];
|
||||
s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
|
||||
s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
|
||||
s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
|
||||
s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
|
||||
s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
|
||||
s->octets_ddp = fcoe_stats.octets_ddp;
|
||||
s->frames_ddp = fcoe_stats.frames_ddp;
|
||||
s->frames_drop = fcoe_stats.frames_drop;
|
||||
}
|
||||
|
||||
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
|
||||
u64 *data)
|
||||
{
|
||||
@@ -428,11 +310,6 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
|
||||
collect_adapter_stats(adapter, (struct adapter_stats *)data);
|
||||
data += sizeof(struct adapter_stats) / sizeof(u64);
|
||||
|
||||
*data++ = (u64)pi->port_id;
|
||||
collect_channel_stats(adapter, (struct channel_stats *)data,
|
||||
pi->port_id);
|
||||
data += sizeof(struct channel_stats) / sizeof(u64);
|
||||
|
||||
*data++ = (u64)pi->port_id;
|
||||
memset(&s, 0, sizeof(s));
|
||||
t4_get_lb_stats(adapter, pi->port_id, &s);
|
||||
@@ -751,13 +628,10 @@ static int get_link_ksettings(struct net_device *dev,
|
||||
fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
|
||||
link_ksettings->link_modes.lp_advertising);
|
||||
|
||||
if (netif_carrier_ok(dev)) {
|
||||
base->speed = pi->link_cfg.speed;
|
||||
base->duplex = DUPLEX_FULL;
|
||||
} else {
|
||||
base->speed = SPEED_UNKNOWN;
|
||||
base->duplex = DUPLEX_UNKNOWN;
|
||||
}
|
||||
base->speed = (netif_carrier_ok(dev)
|
||||
? pi->link_cfg.speed
|
||||
: SPEED_UNKNOWN);
|
||||
base->duplex = DUPLEX_FULL;
|
||||
|
||||
if (pi->link_cfg.fc & PAUSE_RX) {
|
||||
if (pi->link_cfg.fc & PAUSE_TX) {
|
||||
@@ -1499,6 +1373,36 @@ static int cxgb4_get_module_eeprom(struct net_device *dev,
|
||||
offset, len, &data[eprom->len - len]);
|
||||
}
|
||||
|
||||
static u32 cxgb4_get_priv_flags(struct net_device *netdev)
|
||||
{
|
||||
struct port_info *pi = netdev_priv(netdev);
|
||||
struct adapter *adapter = pi->adapter;
|
||||
|
||||
return (adapter->eth_flags | pi->eth_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* set_flags - set/unset specified flags if passed in new_flags
|
||||
* @cur_flags: pointer to current flags
|
||||
* @new_flags: new incoming flags
|
||||
* @flags: set of flags to set/unset
|
||||
*/
|
||||
static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
|
||||
{
|
||||
*cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
|
||||
}
|
||||
|
||||
static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
|
||||
{
|
||||
struct port_info *pi = netdev_priv(netdev);
|
||||
struct adapter *adapter = pi->adapter;
|
||||
|
||||
set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
|
||||
set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops cxgb_ethtool_ops = {
|
||||
.get_link_ksettings = get_link_ksettings,
|
||||
.set_link_ksettings = set_link_ksettings,
|
||||
@@ -1535,6 +1439,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
|
||||
.get_dump_data = get_dump_data,
|
||||
.get_module_info = cxgb4_get_module_info,
|
||||
.get_module_eeprom = cxgb4_get_module_eeprom,
|
||||
.get_priv_flags = cxgb4_get_priv_flags,
|
||||
.set_priv_flags = cxgb4_set_priv_flags,
|
||||
};
|
||||
|
||||
void cxgb4_set_ethtool_ops(struct net_device *netdev)
|
||||
|
@@ -267,7 +267,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
|
||||
}
|
||||
}
|
||||
|
||||
static int cxgb4_dcb_enabled(const struct net_device *dev)
|
||||
int cxgb4_dcb_enabled(const struct net_device *dev)
|
||||
{
|
||||
struct port_info *pi = netdev_priv(dev);
|
||||
|
||||
@@ -554,10 +554,9 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
|
||||
|
||||
dev = q->adap->port[q->adap->chan_map[port]];
|
||||
dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
|
||||
? !!(pcmd->u.info.dcbxdis_pkd &
|
||||
FW_PORT_CMD_DCBXDIS_F)
|
||||
: !!(pcmd->u.info32.lstatus32_to_cbllen32 &
|
||||
FW_PORT_CMD_DCBXDIS32_F));
|
||||
? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
|
||||
: !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
|
||||
& FW_PORT_CMD_DCBXDIS32_F));
|
||||
state_input = (dcbxdis
|
||||
? CXGB4_DCB_INPUT_FW_DISABLED
|
||||
: CXGB4_DCB_INPUT_FW_ENABLED);
|
||||
@@ -924,12 +923,14 @@ static int setup_sge_queues(struct adapter *adap)
|
||||
QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
|
||||
return 0;
|
||||
freeout:
|
||||
dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
|
||||
t4_free_sge_resources(adap);
|
||||
return err;
|
||||
}
|
||||
|
||||
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
int txq;
|
||||
|
||||
@@ -971,7 +972,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
return txq;
|
||||
}
|
||||
|
||||
return fallback(dev, skb) % dev->real_num_tx_queues;
|
||||
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
|
||||
}
|
||||
|
||||
static int closest_timer(const struct sge *s, int time)
|
||||
@@ -3016,7 +3017,7 @@ static int cxgb_setup_tc_block(struct net_device *dev,
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
|
||||
pi, dev);
|
||||
pi, dev, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
|
||||
return 0;
|
||||
@@ -3219,7 +3220,7 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev,
|
||||
static const struct net_device_ops cxgb4_netdev_ops = {
|
||||
.ndo_open = cxgb_open,
|
||||
.ndo_stop = cxgb_close,
|
||||
.ndo_start_xmit = t4_eth_xmit,
|
||||
.ndo_start_xmit = t4_start_xmit,
|
||||
.ndo_select_queue = cxgb_select_queue,
|
||||
.ndo_get_stats64 = cxgb_get_stats,
|
||||
.ndo_set_rx_mode = cxgb_set_rxmode,
|
||||
@@ -3538,6 +3539,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
|
||||
u32 v;
|
||||
int ret;
|
||||
|
||||
/* Now that we've successfully configured and initialized the adapter
|
||||
* can ask the Firmware what resources it has provisioned for us.
|
||||
*/
|
||||
ret = t4_get_pfres(adap);
|
||||
if (ret) {
|
||||
dev_err(adap->pdev_dev,
|
||||
"Unable to retrieve resource provisioning information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* get device capabilities */
|
||||
memset(c, 0, sizeof(*c));
|
||||
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
|
||||
@@ -4172,32 +4183,6 @@ static int adap_init0(struct adapter *adap)
|
||||
goto bye;
|
||||
}
|
||||
|
||||
/*
|
||||
* Grab VPD parameters. This should be done after we establish a
|
||||
* connection to the firmware since some of the VPD parameters
|
||||
* (notably the Core Clock frequency) are retrieved via requests to
|
||||
* the firmware. On the other hand, we need these fairly early on
|
||||
* so we do this right after getting ahold of the firmware.
|
||||
*/
|
||||
ret = t4_get_vpd_params(adap, &adap->params.vpd);
|
||||
if (ret < 0)
|
||||
goto bye;
|
||||
|
||||
/*
|
||||
* Find out what ports are available to us. Note that we need to do
|
||||
* this before calling adap_init0_no_config() since it needs nports
|
||||
* and portvec ...
|
||||
*/
|
||||
v =
|
||||
FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
|
||||
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
|
||||
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
|
||||
if (ret < 0)
|
||||
goto bye;
|
||||
|
||||
adap->params.nports = hweight32(port_vec);
|
||||
adap->params.portvec = port_vec;
|
||||
|
||||
/* If the firmware is initialized already, emit a simply note to that
|
||||
* effect. Otherwise, it's time to try initializing the adapter.
|
||||
*/
|
||||
@@ -4248,6 +4233,45 @@ static int adap_init0(struct adapter *adap)
|
||||
}
|
||||
}
|
||||
|
||||
/* Now that we've successfully configured and initialized the adapter
|
||||
* (or found it already initialized), we can ask the Firmware what
|
||||
* resources it has provisioned for us.
|
||||
*/
|
||||
ret = t4_get_pfres(adap);
|
||||
if (ret) {
|
||||
dev_err(adap->pdev_dev,
|
||||
"Unable to retrieve resource provisioning information\n");
|
||||
goto bye;
|
||||
}
|
||||
|
||||
/* Grab VPD parameters. This should be done after we establish a
|
||||
* connection to the firmware since some of the VPD parameters
|
||||
* (notably the Core Clock frequency) are retrieved via requests to
|
||||
* the firmware. On the other hand, we need these fairly early on
|
||||
* so we do this right after getting ahold of the firmware.
|
||||
*
|
||||
* We need to do this after initializing the adapter because someone
|
||||
* could have FLASHed a new VPD which won't be read by the firmware
|
||||
* until we do the RESET ...
|
||||
*/
|
||||
ret = t4_get_vpd_params(adap, &adap->params.vpd);
|
||||
if (ret < 0)
|
||||
goto bye;
|
||||
|
||||
/* Find out what ports are available to us. Note that we need to do
|
||||
* this before calling adap_init0_no_config() since it needs nports
|
||||
* and portvec ...
|
||||
*/
|
||||
v =
|
||||
FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
|
||||
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
|
||||
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
|
||||
if (ret < 0)
|
||||
goto bye;
|
||||
|
||||
adap->params.nports = hweight32(port_vec);
|
||||
adap->params.portvec = port_vec;
|
||||
|
||||
/* Give the SGE code a chance to pull in anything that it needs ...
|
||||
* Note that this must be called after we retrieve our VPD parameters
|
||||
* in order to know how to convert core ticks to seconds, etc.
|
||||
@@ -4799,10 +4823,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
|
||||
* of ports we found and the number of available CPUs. Most settings can be
|
||||
* modified by the admin prior to actual use.
|
||||
*/
|
||||
static void cfg_queues(struct adapter *adap)
|
||||
static int cfg_queues(struct adapter *adap)
|
||||
{
|
||||
struct sge *s = &adap->sge;
|
||||
int i = 0, n10g = 0, qidx = 0;
|
||||
int i, n10g = 0, qidx = 0;
|
||||
int niqflint, neq, avail_eth_qsets;
|
||||
int max_eth_qsets = 32;
|
||||
#ifndef CONFIG_CHELSIO_T4_DCB
|
||||
int q10g = 0;
|
||||
#endif
|
||||
@@ -4814,16 +4840,46 @@ static void cfg_queues(struct adapter *adap)
|
||||
adap->params.crypto = 0;
|
||||
}
|
||||
|
||||
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
|
||||
/* Calculate the number of Ethernet Queue Sets available based on
|
||||
* resources provisioned for us. We always have an Asynchronous
|
||||
* Firmware Event Ingress Queue. If we're operating in MSI or Legacy
|
||||
* IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
|
||||
* Ingress Queue. Meanwhile, we need two Egress Queues for each
|
||||
* Queue Set: one for the Free List and one for the Ethernet TX Queue.
|
||||
*
|
||||
* Note that we should also take into account all of the various
|
||||
* Offload Queues. But, in any situation where we're operating in
|
||||
* a Resource Constrained Provisioning environment, doing any Offload
|
||||
* at all is problematic ...
|
||||
*/
|
||||
niqflint = adap->params.pfres.niqflint - 1;
|
||||
if (!(adap->flags & USING_MSIX))
|
||||
niqflint--;
|
||||
neq = adap->params.pfres.neq / 2;
|
||||
avail_eth_qsets = min(niqflint, neq);
|
||||
|
||||
if (avail_eth_qsets > max_eth_qsets)
|
||||
avail_eth_qsets = max_eth_qsets;
|
||||
|
||||
if (avail_eth_qsets < adap->params.nports) {
|
||||
dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
|
||||
avail_eth_qsets, adap->params.nports);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Count the number of 10Gb/s or better ports */
|
||||
for_each_port(adap, i)
|
||||
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
|
||||
|
||||
#ifdef CONFIG_CHELSIO_T4_DCB
|
||||
/* For Data Center Bridging support we need to be able to support up
|
||||
* to 8 Traffic Priorities; each of which will be assigned to its
|
||||
* own TX Queue in order to prevent Head-Of-Line Blocking.
|
||||
*/
|
||||
if (adap->params.nports * 8 > MAX_ETH_QSETS) {
|
||||
dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
|
||||
MAX_ETH_QSETS, adap->params.nports * 8);
|
||||
BUG_ON(1);
|
||||
if (adap->params.nports * 8 > avail_eth_qsets) {
|
||||
dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
|
||||
avail_eth_qsets, adap->params.nports * 8);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for_each_port(adap, i) {
|
||||
@@ -4839,7 +4895,7 @@ static void cfg_queues(struct adapter *adap)
|
||||
* per 10G port.
|
||||
*/
|
||||
if (n10g)
|
||||
q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
|
||||
q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
|
||||
if (q10g > netif_get_num_default_rss_queues())
|
||||
q10g = netif_get_num_default_rss_queues();
|
||||
|
||||
@@ -4890,6 +4946,8 @@ static void cfg_queues(struct adapter *adap)
|
||||
|
||||
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
|
||||
init_rspq(adap, &s->intrq, 0, 1, 512, 64);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -5086,17 +5144,9 @@ static void print_port_info(const struct net_device *dev)
|
||||
{
|
||||
char buf[80];
|
||||
char *bufp = buf;
|
||||
const char *spd = "";
|
||||
const struct port_info *pi = netdev_priv(dev);
|
||||
const struct adapter *adap = pi->adapter;
|
||||
|
||||
if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
|
||||
spd = " 2.5 GT/s";
|
||||
else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
|
||||
spd = " 5 GT/s";
|
||||
else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
|
||||
spd = " 8 GT/s";
|
||||
|
||||
if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
|
||||
bufp += sprintf(bufp, "100M/");
|
||||
if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
|
||||
@@ -5600,6 +5650,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
#ifdef CONFIG_CHELSIO_T4_DCB
|
||||
netdev->dcbnl_ops = &cxgb4_dcb_ops;
|
||||
cxgb4_dcb_state_init(netdev);
|
||||
cxgb4_dcb_version_init(netdev);
|
||||
#endif
|
||||
cxgb4_set_ethtool_ops(netdev);
|
||||
}
|
||||
@@ -5630,10 +5681,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
}
|
||||
|
||||
if (!(adapter->flags & FW_OK))
|
||||
goto fw_attach_fail;
|
||||
|
||||
/* Configure queues and allocate tables now, they can be needed as
|
||||
* soon as the first register_netdev completes.
|
||||
*/
|
||||
cfg_queues(adapter);
|
||||
err = cfg_queues(adapter);
|
||||
if (err)
|
||||
goto out_free_dev;
|
||||
|
||||
adapter->smt = t4_init_smt();
|
||||
if (!adapter->smt) {
|
||||
@@ -5705,7 +5761,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
|
||||
u32 hash_base, hash_reg;
|
||||
|
||||
if (chip <= CHELSIO_T5) {
|
||||
if (chip_ver <= CHELSIO_T5) {
|
||||
hash_reg = LE_DB_TID_HASHBASE_A;
|
||||
hash_base = t4_read_reg(adapter, hash_reg);
|
||||
adapter->tids.hash_base = hash_base / 4;
|
||||
@@ -5740,6 +5796,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
goto out_free_dev;
|
||||
}
|
||||
|
||||
fw_attach_fail:
|
||||
/*
|
||||
* The card is now ready to go. If any errors occur during device
|
||||
* registration we do not fail the whole card but rather proceed only
|
||||
|
@@ -874,6 +874,9 @@ int cxgb4_init_tc_flower(struct adapter *adap)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (adap->tc_flower_initialized)
|
||||
return -EEXIST;
|
||||
|
||||
adap->flower_ht_params = cxgb4_tc_flower_ht_params;
|
||||
ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
|
||||
if (ret)
|
||||
@@ -882,13 +885,18 @@ int cxgb4_init_tc_flower(struct adapter *adap)
|
||||
INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
|
||||
timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
|
||||
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
|
||||
adap->tc_flower_initialized = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cxgb4_cleanup_tc_flower(struct adapter *adap)
|
||||
{
|
||||
if (!adap->tc_flower_initialized)
|
||||
return;
|
||||
|
||||
if (adap->flower_stats_timer.function)
|
||||
del_timer_sync(&adap->flower_stats_timer);
|
||||
cancel_work_sync(&adap->flower_stats_work);
|
||||
rhashtable_destroy(&adap->flower_tbl);
|
||||
adap->tc_flower_initialized = false;
|
||||
}
|
||||
|
@@ -231,6 +231,7 @@ again:
|
||||
if (e->state == L2T_STATE_STALE)
|
||||
e->state = L2T_STATE_VALID;
|
||||
spin_unlock_bh(&e->lock);
|
||||
/* fall through */
|
||||
case L2T_STATE_VALID: /* fast-path, send the packet on */
|
||||
return t4_ofld_send(adap, skb);
|
||||
case L2T_STATE_RESOLVING:
|
||||
|
@@ -539,6 +539,9 @@ void t4_cleanup_sched(struct adapter *adap)
|
||||
struct port_info *pi = netdev2pinfo(adap->port[j]);
|
||||
|
||||
s = pi->sched_tbl;
|
||||
if (!s)
|
||||
continue;
|
||||
|
||||
for (i = 0; i < s->sched_size; i++) {
|
||||
struct sched_class *e;
|
||||
|
||||
|
@@ -1288,13 +1288,13 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_eth_xmit - add a packet to an Ethernet Tx queue
|
||||
* cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
|
||||
* @skb: the packet
|
||||
* @dev: the egress net device
|
||||
*
|
||||
* Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
|
||||
*/
|
||||
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
u32 wr_mid, ctrl0, op;
|
||||
u64 cntrl, *end, *sgl;
|
||||
@@ -1547,6 +1547,374 @@ out_free: dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/* Constants ... */
|
||||
enum {
|
||||
/* Egress Queue sizes, producer and consumer indices are all in units
|
||||
* of Egress Context Units bytes. Note that as far as the hardware is
|
||||
* concerned, the free list is an Egress Queue (the host produces free
|
||||
* buffers which the hardware consumes) and free list entries are
|
||||
* 64-bit PCI DMA addresses.
|
||||
*/
|
||||
EQ_UNIT = SGE_EQ_IDXSIZE,
|
||||
FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
|
||||
TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
|
||||
|
||||
T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
|
||||
sizeof(struct cpl_tx_pkt_lso_core) +
|
||||
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
|
||||
};
|
||||
|
||||
/**
|
||||
* t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
|
||||
* @skb: the packet
|
||||
*
|
||||
* Returns whether an Ethernet packet is small enough to fit completely as
|
||||
* immediate data.
|
||||
*/
|
||||
static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
|
||||
{
|
||||
/* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
|
||||
* which does not accommodate immediate data. We could dike out all
|
||||
* of the support code for immediate data but that would tie our hands
|
||||
* too much if we ever want to enhace the firmware. It would also
|
||||
* create more differences between the PF and VF Drivers.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
|
||||
* @skb: the packet
|
||||
*
|
||||
* Returns the number of flits needed for a TX Work Request for the
|
||||
* given Ethernet packet, including the needed WR and CPL headers.
|
||||
*/
|
||||
static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
|
||||
{
|
||||
unsigned int flits;
|
||||
|
||||
/* If the skb is small enough, we can pump it out as a work request
|
||||
* with only immediate data. In that case we just have to have the
|
||||
* TX Packet header plus the skb data in the Work Request.
|
||||
*/
|
||||
if (t4vf_is_eth_imm(skb))
|
||||
return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
|
||||
sizeof(__be64));
|
||||
|
||||
/* Otherwise, we're going to have to construct a Scatter gather list
|
||||
* of the skb body and fragments. We also include the flits necessary
|
||||
* for the TX Packet Work Request and CPL. We always have a firmware
|
||||
* Write Header (incorporated as part of the cpl_tx_pkt_lso and
|
||||
* cpl_tx_pkt structures), followed by either a TX Packet Write CPL
|
||||
* message or, if we're doing a Large Send Offload, an LSO CPL message
|
||||
* with an embedded TX Packet Write CPL message.
|
||||
*/
|
||||
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
|
||||
if (skb_shinfo(skb)->gso_size)
|
||||
flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
|
||||
sizeof(struct cpl_tx_pkt_lso_core) +
|
||||
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
|
||||
else
|
||||
flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
|
||||
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
|
||||
return flits;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
|
||||
* @skb: the packet
|
||||
* @dev: the egress net device
|
||||
*
|
||||
* Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
|
||||
*/
|
||||
static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
dma_addr_t addr[MAX_SKB_FRAGS + 1];
|
||||
const struct skb_shared_info *ssi;
|
||||
struct fw_eth_tx_pkt_vm_wr *wr;
|
||||
int qidx, credits, max_pkt_len;
|
||||
struct cpl_tx_pkt_core *cpl;
|
||||
const struct port_info *pi;
|
||||
unsigned int flits, ndesc;
|
||||
struct sge_eth_txq *txq;
|
||||
struct adapter *adapter;
|
||||
u64 cntrl, *end;
|
||||
u32 wr_mid;
|
||||
const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
|
||||
sizeof(wr->ethmacsrc) +
|
||||
sizeof(wr->ethtype) +
|
||||
sizeof(wr->vlantci);
|
||||
|
||||
/* The chip minimum packet length is 10 octets but the firmware
|
||||
* command that we are using requires that we copy the Ethernet header
|
||||
* (including the VLAN tag) into the header so we reject anything
|
||||
* smaller than that ...
|
||||
*/
|
||||
if (unlikely(skb->len < fw_hdr_copy_len))
|
||||
goto out_free;
|
||||
|
||||
/* Discard the packet if the length is greater than mtu */
|
||||
max_pkt_len = ETH_HLEN + dev->mtu;
|
||||
if (skb_vlan_tag_present(skb))
|
||||
max_pkt_len += VLAN_HLEN;
|
||||
if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
|
||||
goto out_free;
|
||||
|
||||
/* Figure out which TX Queue we're going to use. */
|
||||
pi = netdev_priv(dev);
|
||||
adapter = pi->adapter;
|
||||
qidx = skb_get_queue_mapping(skb);
|
||||
WARN_ON(qidx >= pi->nqsets);
|
||||
txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
|
||||
|
||||
/* Take this opportunity to reclaim any TX Descriptors whose DMA
|
||||
* transfers have completed.
|
||||
*/
|
||||
cxgb4_reclaim_completed_tx(adapter, &txq->q, true);
|
||||
|
||||
/* Calculate the number of flits and TX Descriptors we're going to
|
||||
* need along with how many TX Descriptors will be left over after
|
||||
* we inject our Work Request.
|
||||
*/
|
||||
flits = t4vf_calc_tx_flits(skb);
|
||||
ndesc = flits_to_desc(flits);
|
||||
credits = txq_avail(&txq->q) - ndesc;
|
||||
|
||||
if (unlikely(credits < 0)) {
|
||||
/* Not enough room for this packet's Work Request. Stop the
|
||||
* TX Queue and return a "busy" condition. The queue will get
|
||||
* started later on when the firmware informs us that space
|
||||
* has opened up.
|
||||
*/
|
||||
eth_txq_stop(txq);
|
||||
dev_err(adapter->pdev_dev,
|
||||
"%s: TX ring %u full while queue awake!\n",
|
||||
dev->name, qidx);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
if (!t4vf_is_eth_imm(skb) &&
|
||||
unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
|
||||
/* We need to map the skb into PCI DMA space (because it can't
|
||||
* be in-lined directly into the Work Request) and the mapping
|
||||
* operation failed. Record the error and drop the packet.
|
||||
*/
|
||||
txq->mapping_err++;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
|
||||
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
|
||||
/* After we're done injecting the Work Request for this
|
||||
* packet, we'll be below our "stop threshold" so stop the TX
|
||||
* Queue now and schedule a request for an SGE Egress Queue
|
||||
* Update message. The queue will get started later on when
|
||||
* the firmware processes this Work Request and sends us an
|
||||
* Egress Queue Status Update message indicating that space
|
||||
* has opened up.
|
||||
*/
|
||||
eth_txq_stop(txq);
|
||||
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
|
||||
}
|
||||
|
||||
/* Start filling in our Work Request. Note that we do _not_ handle
|
||||
* the WR Header wrapping around the TX Descriptor Ring. If our
|
||||
* maximum header size ever exceeds one TX Descriptor, we'll need to
|
||||
* do something else here.
|
||||
*/
|
||||
WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
|
||||
wr = (void *)&txq->q.desc[txq->q.pidx];
|
||||
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
|
||||
wr->r3[0] = cpu_to_be32(0);
|
||||
wr->r3[1] = cpu_to_be32(0);
|
||||
skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
|
||||
end = (u64 *)wr + flits;
|
||||
|
||||
/* If this is a Large Send Offload packet we'll put in an LSO CPL
|
||||
* message with an encapsulated TX Packet CPL message. Otherwise we
|
||||
* just use a TX Packet CPL message.
|
||||
*/
|
||||
ssi = skb_shinfo(skb);
|
||||
if (ssi->gso_size) {
|
||||
struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
|
||||
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
|
||||
int l3hdr_len = skb_network_header_len(skb);
|
||||
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
|
||||
|
||||
wr->op_immdlen =
|
||||
cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
|
||||
FW_WR_IMMDLEN_V(sizeof(*lso) +
|
||||
sizeof(*cpl)));
|
||||
/* Fill in the LSO CPL message. */
|
||||
lso->lso_ctrl =
|
||||
cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
|
||||
LSO_FIRST_SLICE_F |
|
||||
LSO_LAST_SLICE_F |
|
||||
LSO_IPV6_V(v6) |
|
||||
LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
|
||||
LSO_IPHDR_LEN_V(l3hdr_len / 4) |
|
||||
LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
|
||||
lso->ipid_ofst = cpu_to_be16(0);
|
||||
lso->mss = cpu_to_be16(ssi->gso_size);
|
||||
lso->seqno_offset = cpu_to_be32(0);
|
||||
if (is_t4(adapter->params.chip))
|
||||
lso->len = cpu_to_be32(skb->len);
|
||||
else
|
||||
lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
|
||||
|
||||
/* Set up TX Packet CPL pointer, control word and perform
|
||||
* accounting.
|
||||
*/
|
||||
cpl = (void *)(lso + 1);
|
||||
|
||||
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
|
||||
cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
|
||||
else
|
||||
cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
|
||||
|
||||
cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
|
||||
TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
|
||||
TXPKT_IPHDR_LEN_V(l3hdr_len);
|
||||
txq->tso++;
|
||||
txq->tx_cso += ssi->gso_segs;
|
||||
} else {
|
||||
int len;
|
||||
|
||||
len = (t4vf_is_eth_imm(skb)
|
||||
? skb->len + sizeof(*cpl)
|
||||
: sizeof(*cpl));
|
||||
wr->op_immdlen =
|
||||
cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
|
||||
FW_WR_IMMDLEN_V(len));
|
||||
|
||||
/* Set up TX Packet CPL pointer, control word and perform
|
||||
* accounting.
|
||||
*/
|
||||
cpl = (void *)(wr + 1);
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
cntrl = hwcsum(adapter->params.chip, skb) |
|
||||
TXPKT_IPCSUM_DIS_F;
|
||||
txq->tx_cso++;
|
||||
} else {
|
||||
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
|
||||
}
|
||||
}
|
||||
|
||||
/* If there's a VLAN tag present, add that to the list of things to
|
||||
* do in this Work Request.
|
||||
*/
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
txq->vlan_ins++;
|
||||
cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
|
||||
}
|
||||
|
||||
/* Fill in the TX Packet CPL message header. */
|
||||
cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
|
||||
TXPKT_INTF_V(pi->port_id) |
|
||||
TXPKT_PF_V(0));
|
||||
cpl->pack = cpu_to_be16(0);
|
||||
cpl->len = cpu_to_be16(skb->len);
|
||||
cpl->ctrl1 = cpu_to_be64(cntrl);
|
||||
|
||||
/* Fill in the body of the TX Packet CPL message with either in-lined
|
||||
* data or a Scatter/Gather List.
|
||||
*/
|
||||
if (t4vf_is_eth_imm(skb)) {
|
||||
/* In-line the packet's data and free the skb since we don't
|
||||
* need it any longer.
|
||||
*/
|
||||
cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
|
||||
dev_consume_skb_any(skb);
|
||||
} else {
|
||||
/* Write the skb's Scatter/Gather list into the TX Packet CPL
|
||||
* message and retain a pointer to the skb so we can free it
|
||||
* later when its DMA completes. (We store the skb pointer
|
||||
* in the Software Descriptor corresponding to the last TX
|
||||
* Descriptor used by the Work Request.)
|
||||
*
|
||||
* The retained skb will be freed when the corresponding TX
|
||||
* Descriptors are reclaimed after their DMAs complete.
|
||||
* However, this could take quite a while since, in general,
|
||||
* the hardware is set up to be lazy about sending DMA
|
||||
* completion notifications to us and we mostly perform TX
|
||||
* reclaims in the transmit routine.
|
||||
*
|
||||
* This is good for performamce but means that we rely on new
|
||||
* TX packets arriving to run the destructors of completed
|
||||
* packets, which open up space in their sockets' send queues.
|
||||
* Sometimes we do not get such new packets causing TX to
|
||||
* stall. A single UDP transmitter is a good example of this
|
||||
* situation. We have a clean up timer that periodically
|
||||
* reclaims completed packets but it doesn't run often enough
|
||||
* (nor do we want it to) to prevent lengthy stalls. A
|
||||
* solution to this problem is to run the destructor early,
|
||||
* after the packet is queued but before it's DMAd. A con is
|
||||
* that we lie to socket memory accounting, but the amount of
|
||||
* extra memory is reasonable (limited by the number of TX
|
||||
* descriptors), the packets do actually get freed quickly by
|
||||
* new packets almost always, and for protocols like TCP that
|
||||
* wait for acks to really free up the data the extra memory
|
||||
* is even less. On the positive side we run the destructors
|
||||
* on the sending CPU rather than on a potentially different
|
||||
* completing CPU, usually a good thing.
|
||||
*
|
||||
* Run the destructor before telling the DMA engine about the
|
||||
* packet to make sure it doesn't complete and get freed
|
||||
* prematurely.
|
||||
*/
|
||||
struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
|
||||
struct sge_txq *tq = &txq->q;
|
||||
int last_desc;
|
||||
|
||||
/* If the Work Request header was an exact multiple of our TX
|
||||
* Descriptor length, then it's possible that the starting SGL
|
||||
* pointer lines up exactly with the end of our TX Descriptor
|
||||
* ring. If that's the case, wrap around to the beginning
|
||||
* here ...
|
||||
*/
|
||||
if (unlikely((void *)sgl == (void *)tq->stat)) {
|
||||
sgl = (void *)tq->desc;
|
||||
end = (void *)((void *)tq->desc +
|
||||
((void *)end - (void *)tq->stat));
|
||||
}
|
||||
|
||||
cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
|
||||
skb_orphan(skb);
|
||||
|
||||
last_desc = tq->pidx + ndesc - 1;
|
||||
if (last_desc >= tq->size)
|
||||
last_desc -= tq->size;
|
||||
tq->sdesc[last_desc].skb = skb;
|
||||
tq->sdesc[last_desc].sgl = sgl;
|
||||
}
|
||||
|
||||
/* Advance our internal TX Queue state, tell the hardware about
|
||||
* the new TX descriptors and return success.
|
||||
*/
|
||||
txq_advance(&txq->q, ndesc);
|
||||
|
||||
cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
out_free:
|
||||
/* An error of some sort happened. Free the TX skb and tell the
|
||||
* OS that we've "dealt" with the packet ...
|
||||
*/
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct port_info *pi = netdev_priv(dev);
|
||||
|
||||
if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
|
||||
return cxgb4_vf_eth_xmit(skb, dev);
|
||||
|
||||
return cxgb4_eth_xmit(skb, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
|
||||
* @q: the SGE control Tx queue
|
||||
@@ -3044,7 +3412,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
||||
c.iqsize = htons(iq->size);
|
||||
c.iqaddr = cpu_to_be64(iq->phys_addr);
|
||||
if (cong >= 0)
|
||||
c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
|
||||
c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
|
||||
FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
|
||||
: FW_IQ_IQTYPE_OFLD));
|
||||
|
||||
if (fl) {
|
||||
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
|
||||
|
@@ -2882,6 +2882,57 @@ int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_get_pfres - retrieve VF resource limits
|
||||
* @adapter: the adapter
|
||||
*
|
||||
* Retrieves configured resource limits and capabilities for a physical
|
||||
* function. The results are stored in @adapter->pfres.
|
||||
*/
|
||||
int t4_get_pfres(struct adapter *adapter)
|
||||
{
|
||||
struct pf_resources *pfres = &adapter->params.pfres;
|
||||
struct fw_pfvf_cmd cmd, rpl;
|
||||
int v;
|
||||
u32 word;
|
||||
|
||||
/* Execute PFVF Read command to get VF resource limits; bail out early
|
||||
* with error on command failure.
|
||||
*/
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
|
||||
FW_CMD_REQUEST_F |
|
||||
FW_CMD_READ_F |
|
||||
FW_PFVF_CMD_PFN_V(adapter->pf) |
|
||||
FW_PFVF_CMD_VFN_V(0));
|
||||
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
|
||||
v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
|
||||
if (v != FW_SUCCESS)
|
||||
return v;
|
||||
|
||||
/* Extract PF resource limits and return success.
|
||||
*/
|
||||
word = be32_to_cpu(rpl.niqflint_niq);
|
||||
pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
|
||||
pfres->niq = FW_PFVF_CMD_NIQ_G(word);
|
||||
|
||||
word = be32_to_cpu(rpl.type_to_neq);
|
||||
pfres->neq = FW_PFVF_CMD_NEQ_G(word);
|
||||
pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
|
||||
|
||||
word = be32_to_cpu(rpl.tc_to_nexactf);
|
||||
pfres->tc = FW_PFVF_CMD_TC_G(word);
|
||||
pfres->nvi = FW_PFVF_CMD_NVI_G(word);
|
||||
pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
|
||||
|
||||
word = be32_to_cpu(rpl.r_caps_to_nethctrl);
|
||||
pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
|
||||
pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
|
||||
pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* serial flash and firmware constants */
|
||||
enum {
|
||||
SF_ATTEMPTS = 10, /* max retries for SF operations */
|
||||
@@ -7453,10 +7504,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
|
||||
switch (nmac) {
|
||||
case 5:
|
||||
memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
|
||||
/* Fall through */
|
||||
case 4:
|
||||
memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
|
||||
/* Fall through */
|
||||
case 3:
|
||||
memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
|
||||
/* Fall through */
|
||||
case 2:
|
||||
memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
|
||||
}
|
||||
|
@@ -188,6 +188,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
|
||||
CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x50af), /* Custom T580-KR-SO */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x50b0), /* Custom T520-CR-LOM */
|
||||
|
||||
/* T6 adapters:
|
||||
*/
|
||||
|
@@ -1502,6 +1502,25 @@
|
||||
#define TP_MIB_DATA_A 0x7e54
|
||||
#define TP_INT_CAUSE_A 0x7e74
|
||||
|
||||
#define TP_FLM_FREE_PS_CNT_A 0x7e80
|
||||
#define TP_FLM_FREE_RX_CNT_A 0x7e84
|
||||
|
||||
#define FREEPSTRUCTCOUNT_S 0
|
||||
#define FREEPSTRUCTCOUNT_M 0x1fffffU
|
||||
#define FREEPSTRUCTCOUNT_G(x) (((x) >> FREEPSTRUCTCOUNT_S) & FREEPSTRUCTCOUNT_M)
|
||||
|
||||
#define FREERXPAGECOUNT_S 0
|
||||
#define FREERXPAGECOUNT_M 0x1fffffU
|
||||
#define FREERXPAGECOUNT_V(x) ((x) << FREERXPAGECOUNT_S)
|
||||
#define FREERXPAGECOUNT_G(x) (((x) >> FREERXPAGECOUNT_S) & FREERXPAGECOUNT_M)
|
||||
|
||||
#define TP_FLM_FREE_TX_CNT_A 0x7e88
|
||||
|
||||
#define FREETXPAGECOUNT_S 0
|
||||
#define FREETXPAGECOUNT_M 0x1fffffU
|
||||
#define FREETXPAGECOUNT_V(x) ((x) << FREETXPAGECOUNT_S)
|
||||
#define FREETXPAGECOUNT_G(x) (((x) >> FREETXPAGECOUNT_S) & FREETXPAGECOUNT_M)
|
||||
|
||||
#define FLMTXFLSTEMPTY_S 30
|
||||
#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
|
||||
#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U)
|
||||
@@ -1683,6 +1702,16 @@
|
||||
#define ULP_TX_LA_RDPTR_0_A 0x8ec0
|
||||
#define ULP_TX_LA_RDDATA_0_A 0x8ec4
|
||||
#define ULP_TX_LA_WRPTR_0_A 0x8ec8
|
||||
#define ULP_TX_ASIC_DEBUG_CTRL_A 0x8f70
|
||||
|
||||
#define ULP_TX_ASIC_DEBUG_0_A 0x8f74
|
||||
#define ULP_TX_ASIC_DEBUG_1_A 0x8f78
|
||||
#define ULP_TX_ASIC_DEBUG_2_A 0x8f7c
|
||||
#define ULP_TX_ASIC_DEBUG_3_A 0x8f80
|
||||
#define ULP_TX_ASIC_DEBUG_4_A 0x8f84
|
||||
|
||||
/* registers for module PM_RX */
|
||||
#define PM_RX_BASE_ADDR 0x8fc0
|
||||
|
||||
#define PMRX_E_PCMD_PAR_ERROR_S 0
|
||||
#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
|
||||
|
@@ -1472,6 +1472,12 @@ enum fw_iq_type {
|
||||
FW_IQ_TYPE_NO_FL_INT_CAP
|
||||
};
|
||||
|
||||
enum fw_iq_iqtype {
|
||||
FW_IQ_IQTYPE_OTHER,
|
||||
FW_IQ_IQTYPE_NIC,
|
||||
FW_IQ_IQTYPE_OFLD,
|
||||
};
|
||||
|
||||
struct fw_iq_cmd {
|
||||
__be32 op_to_vfn;
|
||||
__be32 alloc_to_len16;
|
||||
@@ -1586,6 +1592,12 @@ struct fw_iq_cmd {
|
||||
#define FW_IQ_CMD_IQFLINTISCSIC_S 26
|
||||
#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
|
||||
|
||||
#define FW_IQ_CMD_IQTYPE_S 24
|
||||
#define FW_IQ_CMD_IQTYPE_M 0x3
|
||||
#define FW_IQ_CMD_IQTYPE_V(x) ((x) << FW_IQ_CMD_IQTYPE_S)
|
||||
#define FW_IQ_CMD_IQTYPE_G(x) \
|
||||
(((x) >> FW_IQ_CMD_IQTYPE_S) & FW_IQ_CMD_IQTYPE_M)
|
||||
|
||||
#define FW_IQ_CMD_FL0CNGCHMAP_S 20
|
||||
#define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S)
|
||||
|
||||
|
@@ -36,8 +36,8 @@
|
||||
#define __T4FW_VERSION_H__
|
||||
|
||||
#define T4FW_VERSION_MAJOR 0x01
|
||||
#define T4FW_VERSION_MINOR 0x13
|
||||
#define T4FW_VERSION_MICRO 0x01
|
||||
#define T4FW_VERSION_MINOR 0x14
|
||||
#define T4FW_VERSION_MICRO 0x08
|
||||
#define T4FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T4FW_MIN_VERSION_MAJOR 0x01
|
||||
@@ -45,8 +45,8 @@
|
||||
#define T4FW_MIN_VERSION_MICRO 0x00
|
||||
|
||||
#define T5FW_VERSION_MAJOR 0x01
|
||||
#define T5FW_VERSION_MINOR 0x13
|
||||
#define T5FW_VERSION_MICRO 0x01
|
||||
#define T5FW_VERSION_MINOR 0x14
|
||||
#define T5FW_VERSION_MICRO 0x08
|
||||
#define T5FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T5FW_MIN_VERSION_MAJOR 0x00
|
||||
@@ -54,8 +54,8 @@
|
||||
#define T5FW_MIN_VERSION_MICRO 0x00
|
||||
|
||||
#define T6FW_VERSION_MAJOR 0x01
|
||||
#define T6FW_VERSION_MINOR 0x13
|
||||
#define T6FW_VERSION_MICRO 0x01
|
||||
#define T6FW_VERSION_MINOR 0x14
|
||||
#define T6FW_VERSION_MICRO 0x08
|
||||
#define T6FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T6FW_MIN_VERSION_MAJOR 0x00
|
||||
|
@@ -412,12 +412,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
|
||||
ppmax * (sizeof(struct cxgbi_ppod_data)) +
|
||||
ppod_bmap_size * sizeof(unsigned long);
|
||||
|
||||
ppm = vmalloc(alloc_sz);
|
||||
ppm = vzalloc(alloc_sz);
|
||||
if (!ppm)
|
||||
goto release_ppm_pool;
|
||||
|
||||
memset(ppm, 0, alloc_sz);
|
||||
|
||||
ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]);
|
||||
|
||||
if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) {
|
||||
|
Reference in New Issue
Block a user