Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: ipv4: make default for INET_LRO consistent with help text net: fix skb_seq_read returning wrong offset/length for page frag data pkt_sched: gen_estimator: use 64 bit intermediate counters for bps be2net: add two new pci device ids to pci device table sch_teql: should not dereference skb after ndo_start_xmit() tcp: fix MSG_PEEK race check Doc: fixed descriptions on /proc/sys/net/core/* and /proc/sys/net/unix/* Neterion: *FIFO1_DMA_ERR set twice, should 2nd be *FIFO2_DMA_ERR? mv643xx_eth: fix PPC DMA breakage bonding: fix link down handling in 802.3ad mode bridge: fix initial packet flood if !STP bridge: relay bridge multicast pkgs if !STP NET: Meth: Fix unsafe mix of irq and non-irq spinlocks. mlx4_en: Fix not deleted napi structures ipconfig: handle case of delayed DHCP server netpoll: don't dereference NULL dev from np wimax/i2400m: fix device crash: fix optimization in _roq_queue_update_ws
This commit is contained in:
@@ -1266,13 +1266,22 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
|
|||||||
sctp_wmem - vector of 3 INTEGERs: min, default, max
|
sctp_wmem - vector of 3 INTEGERs: min, default, max
|
||||||
See tcp_wmem for a description.
|
See tcp_wmem for a description.
|
||||||
|
|
||||||
UNDOCUMENTED:
|
|
||||||
|
|
||||||
/proc/sys/net/core/*
|
/proc/sys/net/core/*
|
||||||
dev_weight FIXME
|
dev_weight - INTEGER
|
||||||
|
The maximum number of packets that kernel can handle on a NAPI
|
||||||
|
interrupt, it's a Per-CPU variable.
|
||||||
|
|
||||||
|
Default: 64
|
||||||
|
|
||||||
/proc/sys/net/unix/*
|
/proc/sys/net/unix/*
|
||||||
max_dgram_qlen FIXME
|
max_dgram_qlen - INTEGER
|
||||||
|
The maximum length of dgram socket receive queue
|
||||||
|
|
||||||
|
Default: 10
|
||||||
|
|
||||||
|
|
||||||
|
UNDOCUMENTED:
|
||||||
|
|
||||||
/proc/sys/net/irda/*
|
/proc/sys/net/irda/*
|
||||||
fast_poll_increase FIXME
|
fast_poll_increase FIXME
|
||||||
|
@@ -35,8 +35,22 @@
|
|||||||
#define DRV_VER "2.0.348"
|
#define DRV_VER "2.0.348"
|
||||||
#define DRV_NAME "be2net"
|
#define DRV_NAME "be2net"
|
||||||
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
|
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
|
||||||
|
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
|
||||||
#define DRV_DESC BE_NAME "Driver"
|
#define DRV_DESC BE_NAME "Driver"
|
||||||
|
|
||||||
|
#define BE_VENDOR_ID 0x19a2
|
||||||
|
#define BE_DEVICE_ID1 0x211
|
||||||
|
#define OC_DEVICE_ID1 0x700
|
||||||
|
#define OC_DEVICE_ID2 0x701
|
||||||
|
|
||||||
|
static inline char *nic_name(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2)
|
||||||
|
return OC_NAME;
|
||||||
|
else
|
||||||
|
return BE_NAME;
|
||||||
|
}
|
||||||
|
|
||||||
/* Number of bytes of an RX frame that are copied to skb->data */
|
/* Number of bytes of an RX frame that are copied to skb->data */
|
||||||
#define BE_HDR_LEN 64
|
#define BE_HDR_LEN 64
|
||||||
#define BE_MAX_JUMBO_FRAME_SIZE 9018
|
#define BE_MAX_JUMBO_FRAME_SIZE 9018
|
||||||
|
@@ -28,10 +28,10 @@ static unsigned int rx_frag_size = 2048;
|
|||||||
module_param(rx_frag_size, uint, S_IRUGO);
|
module_param(rx_frag_size, uint, S_IRUGO);
|
||||||
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
|
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
|
||||||
|
|
||||||
#define BE_VENDOR_ID 0x19a2
|
|
||||||
#define BE2_DEVICE_ID_1 0x0211
|
|
||||||
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
|
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
|
||||||
{ PCI_DEVICE(BE_VENDOR_ID, BE2_DEVICE_ID_1) },
|
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
|
||||||
|
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
|
||||||
|
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
|
||||||
{ 0 }
|
{ 0 }
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(pci, be_dev_ids);
|
MODULE_DEVICE_TABLE(pci, be_dev_ids);
|
||||||
@@ -1859,7 +1859,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
|
|||||||
if (status != 0)
|
if (status != 0)
|
||||||
goto stats_clean;
|
goto stats_clean;
|
||||||
|
|
||||||
dev_info(&pdev->dev, BE_NAME " port %d\n", adapter->port_num);
|
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
stats_clean:
|
stats_clean:
|
||||||
@@ -1873,7 +1873,7 @@ rel_reg:
|
|||||||
disable_dev:
|
disable_dev:
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
do_none:
|
do_none:
|
||||||
dev_warn(&pdev->dev, BE_NAME " initialization failed\n");
|
dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1465,6 +1465,12 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
|
|||||||
return best;
|
return best;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int agg_device_up(const struct aggregator *agg)
|
||||||
|
{
|
||||||
|
return (netif_running(agg->slave->dev) &&
|
||||||
|
netif_carrier_ok(agg->slave->dev));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ad_agg_selection_logic - select an aggregation group for a team
|
* ad_agg_selection_logic - select an aggregation group for a team
|
||||||
* @aggregator: the aggregator we're looking at
|
* @aggregator: the aggregator we're looking at
|
||||||
@@ -1496,14 +1502,13 @@ static void ad_agg_selection_logic(struct aggregator *agg)
|
|||||||
struct port *port;
|
struct port *port;
|
||||||
|
|
||||||
origin = agg;
|
origin = agg;
|
||||||
|
|
||||||
active = __get_active_agg(agg);
|
active = __get_active_agg(agg);
|
||||||
best = active;
|
best = (active && agg_device_up(active)) ? active : NULL;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
agg->is_active = 0;
|
agg->is_active = 0;
|
||||||
|
|
||||||
if (agg->num_of_ports)
|
if (agg->num_of_ports && agg_device_up(agg))
|
||||||
best = ad_agg_selection_test(best, agg);
|
best = ad_agg_selection_test(best, agg);
|
||||||
|
|
||||||
} while ((agg = __get_next_agg(agg)));
|
} while ((agg = __get_next_agg(agg)));
|
||||||
|
@@ -127,11 +127,11 @@ static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg)
|
|||||||
static int mdio_probe(struct meth_private *priv)
|
static int mdio_probe(struct meth_private *priv)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
unsigned long p2, p3;
|
unsigned long p2, p3, flags;
|
||||||
/* check if phy is detected already */
|
/* check if phy is detected already */
|
||||||
if(priv->phy_addr>=0&&priv->phy_addr<32)
|
if(priv->phy_addr>=0&&priv->phy_addr<32)
|
||||||
return 0;
|
return 0;
|
||||||
spin_lock(&priv->meth_lock);
|
spin_lock_irqsave(&priv->meth_lock, flags);
|
||||||
for (i=0;i<32;++i){
|
for (i=0;i<32;++i){
|
||||||
priv->phy_addr=i;
|
priv->phy_addr=i;
|
||||||
p2=mdio_read(priv,2);
|
p2=mdio_read(priv,2);
|
||||||
@@ -157,7 +157,7 @@ static int mdio_probe(struct meth_private *priv)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&priv->meth_lock);
|
spin_unlock_irqrestore(&priv->meth_lock, flags);
|
||||||
if(priv->phy_addr<32) {
|
if(priv->phy_addr<32) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -373,14 +373,14 @@ static int meth_release(struct net_device *dev)
|
|||||||
static void meth_rx(struct net_device* dev, unsigned long int_status)
|
static void meth_rx(struct net_device* dev, unsigned long int_status)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
unsigned long status;
|
unsigned long status, flags;
|
||||||
struct meth_private *priv = netdev_priv(dev);
|
struct meth_private *priv = netdev_priv(dev);
|
||||||
unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8;
|
unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8;
|
||||||
|
|
||||||
spin_lock(&priv->meth_lock);
|
spin_lock_irqsave(&priv->meth_lock, flags);
|
||||||
priv->dma_ctrl &= ~METH_DMA_RX_INT_EN;
|
priv->dma_ctrl &= ~METH_DMA_RX_INT_EN;
|
||||||
mace->eth.dma_ctrl = priv->dma_ctrl;
|
mace->eth.dma_ctrl = priv->dma_ctrl;
|
||||||
spin_unlock(&priv->meth_lock);
|
spin_unlock_irqrestore(&priv->meth_lock, flags);
|
||||||
|
|
||||||
if (int_status & METH_INT_RX_UNDERFLOW) {
|
if (int_status & METH_INT_RX_UNDERFLOW) {
|
||||||
fifo_rptr = (fifo_rptr - 1) & 0x0f;
|
fifo_rptr = (fifo_rptr - 1) & 0x0f;
|
||||||
@@ -452,12 +452,12 @@ static void meth_rx(struct net_device* dev, unsigned long int_status)
|
|||||||
mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
|
mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
|
||||||
ADVANCE_RX_PTR(priv->rx_write);
|
ADVANCE_RX_PTR(priv->rx_write);
|
||||||
}
|
}
|
||||||
spin_lock(&priv->meth_lock);
|
spin_lock_irqsave(&priv->meth_lock, flags);
|
||||||
/* In case there was underflow, and Rx DMA was disabled */
|
/* In case there was underflow, and Rx DMA was disabled */
|
||||||
priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN;
|
priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN;
|
||||||
mace->eth.dma_ctrl = priv->dma_ctrl;
|
mace->eth.dma_ctrl = priv->dma_ctrl;
|
||||||
mace->eth.int_stat = METH_INT_RX_THRESHOLD;
|
mace->eth.int_stat = METH_INT_RX_THRESHOLD;
|
||||||
spin_unlock(&priv->meth_lock);
|
spin_unlock_irqrestore(&priv->meth_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int meth_tx_full(struct net_device *dev)
|
static int meth_tx_full(struct net_device *dev)
|
||||||
@@ -470,11 +470,11 @@ static int meth_tx_full(struct net_device *dev)
|
|||||||
static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
|
static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
|
||||||
{
|
{
|
||||||
struct meth_private *priv = netdev_priv(dev);
|
struct meth_private *priv = netdev_priv(dev);
|
||||||
unsigned long status;
|
unsigned long status, flags;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16;
|
unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16;
|
||||||
|
|
||||||
spin_lock(&priv->meth_lock);
|
spin_lock_irqsave(&priv->meth_lock, flags);
|
||||||
|
|
||||||
/* Stop DMA notification */
|
/* Stop DMA notification */
|
||||||
priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
|
priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
|
||||||
@@ -527,12 +527,13 @@ static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
|
|||||||
}
|
}
|
||||||
|
|
||||||
mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT;
|
mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT;
|
||||||
spin_unlock(&priv->meth_lock);
|
spin_unlock_irqrestore(&priv->meth_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void meth_error(struct net_device* dev, unsigned status)
|
static void meth_error(struct net_device* dev, unsigned status)
|
||||||
{
|
{
|
||||||
struct meth_private *priv = netdev_priv(dev);
|
struct meth_private *priv = netdev_priv(dev);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
printk(KERN_WARNING "meth: error status: 0x%08x\n",status);
|
printk(KERN_WARNING "meth: error status: 0x%08x\n",status);
|
||||||
/* check for errors too... */
|
/* check for errors too... */
|
||||||
@@ -547,7 +548,7 @@ static void meth_error(struct net_device* dev, unsigned status)
|
|||||||
printk(KERN_WARNING "meth: Rx overflow\n");
|
printk(KERN_WARNING "meth: Rx overflow\n");
|
||||||
if (status & (METH_INT_RX_UNDERFLOW)) {
|
if (status & (METH_INT_RX_UNDERFLOW)) {
|
||||||
printk(KERN_WARNING "meth: Rx underflow\n");
|
printk(KERN_WARNING "meth: Rx underflow\n");
|
||||||
spin_lock(&priv->meth_lock);
|
spin_lock_irqsave(&priv->meth_lock, flags);
|
||||||
mace->eth.int_stat = METH_INT_RX_UNDERFLOW;
|
mace->eth.int_stat = METH_INT_RX_UNDERFLOW;
|
||||||
/* more underflow interrupts will be delivered,
|
/* more underflow interrupts will be delivered,
|
||||||
* effectively throwing us into an infinite loop.
|
* effectively throwing us into an infinite loop.
|
||||||
@@ -555,7 +556,7 @@ static void meth_error(struct net_device* dev, unsigned status)
|
|||||||
priv->dma_ctrl &= ~METH_DMA_RX_EN;
|
priv->dma_ctrl &= ~METH_DMA_RX_EN;
|
||||||
mace->eth.dma_ctrl = priv->dma_ctrl;
|
mace->eth.dma_ctrl = priv->dma_ctrl;
|
||||||
DPRINTK("Disabled meth Rx DMA temporarily\n");
|
DPRINTK("Disabled meth Rx DMA temporarily\n");
|
||||||
spin_unlock(&priv->meth_lock);
|
spin_unlock_irqrestore(&priv->meth_lock, flags);
|
||||||
}
|
}
|
||||||
mace->eth.int_stat = METH_INT_ERROR;
|
mace->eth.int_stat = METH_INT_ERROR;
|
||||||
}
|
}
|
||||||
|
@@ -125,8 +125,10 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
|||||||
|
|
||||||
if (cq->is_tx)
|
if (cq->is_tx)
|
||||||
del_timer(&cq->timer);
|
del_timer(&cq->timer);
|
||||||
else
|
else {
|
||||||
napi_disable(&cq->napi);
|
napi_disable(&cq->napi);
|
||||||
|
netif_napi_del(&cq->napi);
|
||||||
|
}
|
||||||
|
|
||||||
mlx4_cq_free(mdev->dev, &cq->mcq);
|
mlx4_cq_free(mdev->dev, &cq->mcq);
|
||||||
}
|
}
|
||||||
|
@@ -569,7 +569,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
|
|||||||
if (rxq->rx_curr_desc == rxq->rx_ring_size)
|
if (rxq->rx_curr_desc == rxq->rx_ring_size)
|
||||||
rxq->rx_curr_desc = 0;
|
rxq->rx_curr_desc = 0;
|
||||||
|
|
||||||
dma_unmap_single(NULL, rx_desc->buf_ptr,
|
dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
|
||||||
rx_desc->buf_size, DMA_FROM_DEVICE);
|
rx_desc->buf_size, DMA_FROM_DEVICE);
|
||||||
rxq->rx_desc_count--;
|
rxq->rx_desc_count--;
|
||||||
rx++;
|
rx++;
|
||||||
@@ -678,8 +678,9 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
|
|||||||
|
|
||||||
rx_desc = rxq->rx_desc_area + rx;
|
rx_desc = rxq->rx_desc_area + rx;
|
||||||
|
|
||||||
rx_desc->buf_ptr = dma_map_single(NULL, skb->data,
|
rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
|
||||||
mp->skb_size, DMA_FROM_DEVICE);
|
skb->data, mp->skb_size,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
rx_desc->buf_size = mp->skb_size;
|
rx_desc->buf_size = mp->skb_size;
|
||||||
rxq->rx_skb[rx] = skb;
|
rxq->rx_skb[rx] = skb;
|
||||||
wmb();
|
wmb();
|
||||||
@@ -718,6 +719,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
|
|||||||
|
|
||||||
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
|
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
struct mv643xx_eth_private *mp = txq_to_mp(txq);
|
||||||
int nr_frags = skb_shinfo(skb)->nr_frags;
|
int nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
int frag;
|
int frag;
|
||||||
|
|
||||||
@@ -746,10 +748,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
|
|||||||
|
|
||||||
desc->l4i_chk = 0;
|
desc->l4i_chk = 0;
|
||||||
desc->byte_cnt = this_frag->size;
|
desc->byte_cnt = this_frag->size;
|
||||||
desc->buf_ptr = dma_map_page(NULL, this_frag->page,
|
desc->buf_ptr = dma_map_page(mp->dev->dev.parent,
|
||||||
|
this_frag->page,
|
||||||
this_frag->page_offset,
|
this_frag->page_offset,
|
||||||
this_frag->size,
|
this_frag->size, DMA_TO_DEVICE);
|
||||||
DMA_TO_DEVICE);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -826,7 +828,8 @@ no_csum:
|
|||||||
|
|
||||||
desc->l4i_chk = l4i_chk;
|
desc->l4i_chk = l4i_chk;
|
||||||
desc->byte_cnt = length;
|
desc->byte_cnt = length;
|
||||||
desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
|
desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
|
||||||
|
length, DMA_TO_DEVICE);
|
||||||
|
|
||||||
__skb_queue_tail(&txq->tx_skb, skb);
|
__skb_queue_tail(&txq->tx_skb, skb);
|
||||||
|
|
||||||
@@ -956,10 +959,10 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (cmd_sts & TX_FIRST_DESC) {
|
if (cmd_sts & TX_FIRST_DESC) {
|
||||||
dma_unmap_single(NULL, desc->buf_ptr,
|
dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
|
||||||
desc->byte_cnt, DMA_TO_DEVICE);
|
desc->byte_cnt, DMA_TO_DEVICE);
|
||||||
} else {
|
} else {
|
||||||
dma_unmap_page(NULL, desc->buf_ptr,
|
dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
|
||||||
desc->byte_cnt, DMA_TO_DEVICE);
|
desc->byte_cnt, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1894,8 +1897,8 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
|
|||||||
mp->rx_desc_sram_size);
|
mp->rx_desc_sram_size);
|
||||||
rxq->rx_desc_dma = mp->rx_desc_sram_addr;
|
rxq->rx_desc_dma = mp->rx_desc_sram_addr;
|
||||||
} else {
|
} else {
|
||||||
rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
|
rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
|
||||||
&rxq->rx_desc_dma,
|
size, &rxq->rx_desc_dma,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1947,7 +1950,7 @@ out_free:
|
|||||||
if (index == 0 && size <= mp->rx_desc_sram_size)
|
if (index == 0 && size <= mp->rx_desc_sram_size)
|
||||||
iounmap(rxq->rx_desc_area);
|
iounmap(rxq->rx_desc_area);
|
||||||
else
|
else
|
||||||
dma_free_coherent(NULL, size,
|
dma_free_coherent(mp->dev->dev.parent, size,
|
||||||
rxq->rx_desc_area,
|
rxq->rx_desc_area,
|
||||||
rxq->rx_desc_dma);
|
rxq->rx_desc_dma);
|
||||||
|
|
||||||
@@ -1979,7 +1982,7 @@ static void rxq_deinit(struct rx_queue *rxq)
|
|||||||
rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
|
rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
|
||||||
iounmap(rxq->rx_desc_area);
|
iounmap(rxq->rx_desc_area);
|
||||||
else
|
else
|
||||||
dma_free_coherent(NULL, rxq->rx_desc_area_size,
|
dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
|
||||||
rxq->rx_desc_area, rxq->rx_desc_dma);
|
rxq->rx_desc_area, rxq->rx_desc_dma);
|
||||||
|
|
||||||
kfree(rxq->rx_skb);
|
kfree(rxq->rx_skb);
|
||||||
@@ -2007,8 +2010,8 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
|
|||||||
mp->tx_desc_sram_size);
|
mp->tx_desc_sram_size);
|
||||||
txq->tx_desc_dma = mp->tx_desc_sram_addr;
|
txq->tx_desc_dma = mp->tx_desc_sram_addr;
|
||||||
} else {
|
} else {
|
||||||
txq->tx_desc_area = dma_alloc_coherent(NULL, size,
|
txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
|
||||||
&txq->tx_desc_dma,
|
size, &txq->tx_desc_dma,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2053,7 +2056,7 @@ static void txq_deinit(struct tx_queue *txq)
|
|||||||
txq->tx_desc_area_size <= mp->tx_desc_sram_size)
|
txq->tx_desc_area_size <= mp->tx_desc_sram_size)
|
||||||
iounmap(txq->tx_desc_area);
|
iounmap(txq->tx_desc_area);
|
||||||
else
|
else
|
||||||
dma_free_coherent(NULL, txq->tx_desc_area_size,
|
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
|
||||||
txq->tx_desc_area, txq->tx_desc_dma);
|
txq->tx_desc_area, txq->tx_desc_dma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -115,7 +115,7 @@ enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
|
|||||||
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
|
||||||
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
|
||||||
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
|
||||||
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR), 0, 32),
|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
|
||||||
&vp_reg->kdfcctl_errors_mask);
|
&vp_reg->kdfcctl_errors_mask);
|
||||||
|
|
||||||
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
|
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
|
||||||
|
@@ -819,10 +819,9 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
|
|||||||
roq_data = (struct i2400m_roq_data *) &skb->cb;
|
roq_data = (struct i2400m_roq_data *) &skb->cb;
|
||||||
i2400m_net_erx(i2400m, skb, roq_data->cs);
|
i2400m_net_erx(i2400m, skb, roq_data->cs);
|
||||||
}
|
}
|
||||||
else {
|
else
|
||||||
__i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
|
__i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
|
||||||
__i2400m_roq_update_ws(i2400m, roq, sn + 1);
|
__i2400m_roq_update_ws(i2400m, roq, sn + 1);
|
||||||
}
|
|
||||||
i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
|
i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
|
||||||
old_ws, len, sn, nsn, roq->ws);
|
old_ws, len, sn, nsn, roq->ws);
|
||||||
}
|
}
|
||||||
|
@@ -134,6 +134,10 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
|
|||||||
if (skb->protocol == htons(ETH_P_PAUSE))
|
if (skb->protocol == htons(ETH_P_PAUSE))
|
||||||
goto drop;
|
goto drop;
|
||||||
|
|
||||||
|
/* If STP is turned off, then forward */
|
||||||
|
if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
|
||||||
|
goto forward;
|
||||||
|
|
||||||
if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
|
if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
|
||||||
NULL, br_handle_local_finish))
|
NULL, br_handle_local_finish))
|
||||||
return NULL; /* frame consumed by filter */
|
return NULL; /* frame consumed by filter */
|
||||||
@@ -141,6 +145,7 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
|
|||||||
return skb; /* continue processing */
|
return skb; /* continue processing */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
forward:
|
||||||
switch (p->state) {
|
switch (p->state) {
|
||||||
case BR_STATE_FORWARDING:
|
case BR_STATE_FORWARDING:
|
||||||
rhook = rcu_dereference(br_should_route_hook);
|
rhook = rcu_dereference(br_should_route_hook);
|
||||||
|
@@ -297,6 +297,9 @@ void br_topology_change_detection(struct net_bridge *br)
|
|||||||
{
|
{
|
||||||
int isroot = br_is_root_bridge(br);
|
int isroot = br_is_root_bridge(br);
|
||||||
|
|
||||||
|
if (br->stp_enabled != BR_KERNEL_STP)
|
||||||
|
return;
|
||||||
|
|
||||||
pr_info("%s: topology change detected, %s\n", br->dev->name,
|
pr_info("%s: topology change detected, %s\n", br->dev->name,
|
||||||
isroot ? "propagating" : "sending tcn bpdu");
|
isroot ? "propagating" : "sending tcn bpdu");
|
||||||
|
|
||||||
|
@@ -66,9 +66,9 @@
|
|||||||
|
|
||||||
NOTES.
|
NOTES.
|
||||||
|
|
||||||
* The stored value for avbps is scaled by 2^5, so that maximal
|
* avbps is scaled by 2^5, avpps is scaled by 2^10.
|
||||||
rate is ~1Gbit, avpps is scaled by 2^10.
|
* both values are reported as 32 bit unsigned values. bps can
|
||||||
|
overflow for fast links : max speed being 34360Mbit/sec
|
||||||
* Minimal interval is HZ/4=250msec (it is the greatest common divisor
|
* Minimal interval is HZ/4=250msec (it is the greatest common divisor
|
||||||
for HZ=100 and HZ=1024 8)), maximal interval
|
for HZ=100 and HZ=1024 8)), maximal interval
|
||||||
is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
|
is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
|
||||||
@@ -86,9 +86,9 @@ struct gen_estimator
|
|||||||
spinlock_t *stats_lock;
|
spinlock_t *stats_lock;
|
||||||
int ewma_log;
|
int ewma_log;
|
||||||
u64 last_bytes;
|
u64 last_bytes;
|
||||||
|
u64 avbps;
|
||||||
u32 last_packets;
|
u32 last_packets;
|
||||||
u32 avpps;
|
u32 avpps;
|
||||||
u32 avbps;
|
|
||||||
struct rcu_head e_rcu;
|
struct rcu_head e_rcu;
|
||||||
struct rb_node node;
|
struct rb_node node;
|
||||||
};
|
};
|
||||||
@@ -115,6 +115,7 @@ static void est_timer(unsigned long arg)
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(e, &elist[idx].list, list) {
|
list_for_each_entry_rcu(e, &elist[idx].list, list) {
|
||||||
u64 nbytes;
|
u64 nbytes;
|
||||||
|
u64 brate;
|
||||||
u32 npackets;
|
u32 npackets;
|
||||||
u32 rate;
|
u32 rate;
|
||||||
|
|
||||||
@@ -125,9 +126,9 @@ static void est_timer(unsigned long arg)
|
|||||||
|
|
||||||
nbytes = e->bstats->bytes;
|
nbytes = e->bstats->bytes;
|
||||||
npackets = e->bstats->packets;
|
npackets = e->bstats->packets;
|
||||||
rate = (nbytes - e->last_bytes)<<(7 - idx);
|
brate = (nbytes - e->last_bytes)<<(7 - idx);
|
||||||
e->last_bytes = nbytes;
|
e->last_bytes = nbytes;
|
||||||
e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log;
|
e->avbps += ((s64)(brate - e->avbps)) >> e->ewma_log;
|
||||||
e->rate_est->bps = (e->avbps+0xF)>>5;
|
e->rate_est->bps = (e->avbps+0xF)>>5;
|
||||||
|
|
||||||
rate = (npackets - e->last_packets)<<(12 - idx);
|
rate = (npackets - e->last_packets)<<(12 - idx);
|
||||||
|
@@ -175,9 +175,13 @@ static void service_arp_queue(struct netpoll_info *npi)
|
|||||||
void netpoll_poll(struct netpoll *np)
|
void netpoll_poll(struct netpoll *np)
|
||||||
{
|
{
|
||||||
struct net_device *dev = np->dev;
|
struct net_device *dev = np->dev;
|
||||||
const struct net_device_ops *ops = dev->netdev_ops;
|
const struct net_device_ops *ops;
|
||||||
|
|
||||||
if (!dev || !netif_running(dev) || !ops->ndo_poll_controller)
|
if (!dev || !netif_running(dev))
|
||||||
|
return;
|
||||||
|
|
||||||
|
ops = dev->netdev_ops;
|
||||||
|
if (!ops->ndo_poll_controller)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Process pending work on NIC */
|
/* Process pending work on NIC */
|
||||||
|
@@ -2288,7 +2288,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
|
|||||||
next_skb:
|
next_skb:
|
||||||
block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
|
block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
|
||||||
|
|
||||||
if (abs_offset < block_limit) {
|
if (abs_offset < block_limit && !st->frag_data) {
|
||||||
*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
|
*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
|
||||||
return block_limit - abs_offset;
|
return block_limit - abs_offset;
|
||||||
}
|
}
|
||||||
|
@@ -408,7 +408,7 @@ config INET_XFRM_MODE_BEET
|
|||||||
|
|
||||||
config INET_LRO
|
config INET_LRO
|
||||||
bool "Large Receive Offload (ipv4/tcp)"
|
bool "Large Receive Offload (ipv4/tcp)"
|
||||||
|
default y
|
||||||
---help---
|
---help---
|
||||||
Support for Large Receive Offload (ipv4/tcp).
|
Support for Large Receive Offload (ipv4/tcp).
|
||||||
|
|
||||||
|
@@ -139,6 +139,8 @@ __be32 ic_servaddr = NONE; /* Boot server IP address */
|
|||||||
__be32 root_server_addr = NONE; /* Address of NFS server */
|
__be32 root_server_addr = NONE; /* Address of NFS server */
|
||||||
u8 root_server_path[256] = { 0, }; /* Path to mount as root */
|
u8 root_server_path[256] = { 0, }; /* Path to mount as root */
|
||||||
|
|
||||||
|
u32 ic_dev_xid; /* Device under configuration */
|
||||||
|
|
||||||
/* vendor class identifier */
|
/* vendor class identifier */
|
||||||
static char vendor_class_identifier[253] __initdata;
|
static char vendor_class_identifier[253] __initdata;
|
||||||
|
|
||||||
@@ -932,6 +934,13 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
|
|||||||
goto drop_unlock;
|
goto drop_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Is it a reply for the device we are configuring? */
|
||||||
|
if (b->xid != ic_dev_xid) {
|
||||||
|
if (net_ratelimit())
|
||||||
|
printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n");
|
||||||
|
goto drop_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
/* Parse extensions */
|
/* Parse extensions */
|
||||||
if (ext_len >= 4 &&
|
if (ext_len >= 4 &&
|
||||||
!memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */
|
!memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */
|
||||||
@@ -1115,6 +1124,9 @@ static int __init ic_dynamic(void)
|
|||||||
get_random_bytes(&timeout, sizeof(timeout));
|
get_random_bytes(&timeout, sizeof(timeout));
|
||||||
timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM);
|
timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM);
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
/* Track the device we are configuring */
|
||||||
|
ic_dev_xid = d->xid;
|
||||||
|
|
||||||
#ifdef IPCONFIG_BOOTP
|
#ifdef IPCONFIG_BOOTP
|
||||||
if (do_bootp && (d->able & IC_BOOTP))
|
if (do_bootp && (d->able & IC_BOOTP))
|
||||||
ic_bootp_send_if(d, jiffies - start_jiffies);
|
ic_bootp_send_if(d, jiffies - start_jiffies);
|
||||||
|
@@ -1321,6 +1321,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||||||
struct task_struct *user_recv = NULL;
|
struct task_struct *user_recv = NULL;
|
||||||
int copied_early = 0;
|
int copied_early = 0;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
u32 urg_hole = 0;
|
||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
@@ -1532,7 +1533,8 @@ do_prequeue:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
|
if ((flags & MSG_PEEK) &&
|
||||||
|
(peek_seq - copied - urg_hole != tp->copied_seq)) {
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
|
printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
|
||||||
current->comm, task_pid_nr(current));
|
current->comm, task_pid_nr(current));
|
||||||
@@ -1553,6 +1555,7 @@ do_prequeue:
|
|||||||
if (!urg_offset) {
|
if (!urg_offset) {
|
||||||
if (!sock_flag(sk, SOCK_URGINLINE)) {
|
if (!sock_flag(sk, SOCK_URGINLINE)) {
|
||||||
++*seq;
|
++*seq;
|
||||||
|
urg_hole++;
|
||||||
offset++;
|
offset++;
|
||||||
used--;
|
used--;
|
||||||
if (!used)
|
if (!used)
|
||||||
|
@@ -303,6 +303,8 @@ restart:
|
|||||||
switch (teql_resolve(skb, skb_res, slave)) {
|
switch (teql_resolve(skb, skb_res, slave)) {
|
||||||
case 0:
|
case 0:
|
||||||
if (__netif_tx_trylock(slave_txq)) {
|
if (__netif_tx_trylock(slave_txq)) {
|
||||||
|
unsigned int length = qdisc_pkt_len(skb);
|
||||||
|
|
||||||
if (!netif_tx_queue_stopped(slave_txq) &&
|
if (!netif_tx_queue_stopped(slave_txq) &&
|
||||||
!netif_tx_queue_frozen(slave_txq) &&
|
!netif_tx_queue_frozen(slave_txq) &&
|
||||||
slave_ops->ndo_start_xmit(skb, slave) == 0) {
|
slave_ops->ndo_start_xmit(skb, slave) == 0) {
|
||||||
@@ -310,8 +312,7 @@ restart:
|
|||||||
master->slaves = NEXT_SLAVE(q);
|
master->slaves = NEXT_SLAVE(q);
|
||||||
netif_wake_queue(dev);
|
netif_wake_queue(dev);
|
||||||
master->stats.tx_packets++;
|
master->stats.tx_packets++;
|
||||||
master->stats.tx_bytes +=
|
master->stats.tx_bytes += length;
|
||||||
qdisc_pkt_len(skb);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
__netif_tx_unlock(slave_txq);
|
__netif_tx_unlock(slave_txq);
|
||||||
|
Reference in New Issue
Block a user