Merge commit 'v3.6-rc5' into next
* commit 'v3.6-rc5': (1098 commits) Linux 3.6-rc5 HID: tpkbd: work even if the new Lenovo Keyboard driver is not configured Remove user-triggerable BUG from mpol_to_str xen/pciback: Fix proper FLR steps. uml: fix compile error in deliver_alarm() dj: memory scribble in logi_dj Fix order of arguments to compat_put_time[spec|val] xen: Use correct masking in xen_swiotlb_alloc_coherent. xen: fix logical error in tlb flushing xen/p2m: Fix one-off error in checking the P2M tree directory. powerpc: Don't use __put_user() in patch_instruction powerpc: Make sure IPI handlers see data written by IPI senders powerpc: Restore correct DSCR in context switch powerpc: Fix DSCR inheritance in copy_thread() powerpc: Keep thread.dscr and thread.dscr_inherit in sync powerpc: Update DSCR on all CPUs when writing sysfs dscr_default powerpc/powernv: Always go into nap mode when CPU is offline powerpc: Give hypervisor decrementer interrupts their own handler powerpc/vphn: Fix arch_update_cpu_topology() return value ARM: gemini: fix the gemini build ... Conflicts: drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c drivers/rapidio/devices/tsi721.c
This commit is contained in:
@@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
|
||||
**/
|
||||
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
|
||||
{
|
||||
u32 ctrl, ctrl_ext, eecd;
|
||||
u32 ctrl, ctrl_ext, eecd, tctl;
|
||||
s32 ret_val;
|
||||
|
||||
/*
|
||||
@@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
|
||||
ew32(IMC, 0xffffffff);
|
||||
|
||||
ew32(RCTL, 0);
|
||||
ew32(TCTL, E1000_TCTL_PSP);
|
||||
tctl = er32(TCTL);
|
||||
tctl &= ~E1000_TCTL_EN;
|
||||
ew32(TCTL, tctl);
|
||||
e1e_flush();
|
||||
|
||||
usleep_range(10000, 20000);
|
||||
@@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
|
||||
* auto-negotiation in the TXCW register and disable
|
||||
* forced link in the Device Control register in an
|
||||
* attempt to auto-negotiate with our link partner.
|
||||
* If the partner code word is null, stop forcing
|
||||
* and restart auto negotiation.
|
||||
*/
|
||||
if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
|
||||
if (rxcw & E1000_RXCW_C) {
|
||||
/* Enable autoneg, and unforce link up */
|
||||
ew32(TXCW, mac->txcw);
|
||||
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
|
||||
|
@@ -310,6 +310,7 @@ struct e1000_adapter {
|
||||
*/
|
||||
struct e1000_ring *tx_ring /* One per active queue */
|
||||
____cacheline_aligned_in_smp;
|
||||
u32 tx_fifo_limit;
|
||||
|
||||
struct napi_struct napi;
|
||||
|
||||
|
@@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
|
||||
pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
|
||||
}
|
||||
|
||||
static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
|
||||
struct e1000_buffer *bi)
|
||||
{
|
||||
int i;
|
||||
struct e1000_ps_page *ps_page;
|
||||
|
||||
for (i = 0; i < adapter->rx_ps_pages; i++) {
|
||||
ps_page = &bi->ps_pages[i];
|
||||
|
||||
if (ps_page->page) {
|
||||
pr_info("packet dump for ps_page %d:\n", i);
|
||||
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
|
||||
16, 1, page_address(ps_page->page),
|
||||
PAGE_SIZE, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* e1000e_dump - Print registers, Tx-ring and Rx-ring
|
||||
*/
|
||||
@@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
|
||||
(unsigned long long)buffer_info->time_stamp,
|
||||
buffer_info->skb, next_desc);
|
||||
|
||||
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
|
||||
if (netif_msg_pktdata(adapter) && buffer_info->skb)
|
||||
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
|
||||
16, 1, phys_to_virt(buffer_info->dma),
|
||||
buffer_info->length, true);
|
||||
16, 1, buffer_info->skb->data,
|
||||
buffer_info->skb->len, true);
|
||||
}
|
||||
|
||||
/* Print Rx Ring Summary */
|
||||
@@ -381,10 +399,8 @@ rx_ring_summary:
|
||||
buffer_info->skb, next_desc);
|
||||
|
||||
if (netif_msg_pktdata(adapter))
|
||||
print_hex_dump(KERN_INFO, "",
|
||||
DUMP_PREFIX_ADDRESS, 16, 1,
|
||||
phys_to_virt(buffer_info->dma),
|
||||
adapter->rx_ps_bsize0, true);
|
||||
e1000e_dump_ps_pages(adapter,
|
||||
buffer_info);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@@ -444,12 +460,12 @@ rx_ring_summary:
|
||||
(unsigned long long)buffer_info->dma,
|
||||
buffer_info->skb, next_desc);
|
||||
|
||||
if (netif_msg_pktdata(adapter))
|
||||
if (netif_msg_pktdata(adapter) &&
|
||||
buffer_info->skb)
|
||||
print_hex_dump(KERN_INFO, "",
|
||||
DUMP_PREFIX_ADDRESS, 16,
|
||||
1,
|
||||
phys_to_virt
|
||||
(buffer_info->dma),
|
||||
buffer_info->skb->data,
|
||||
adapter->rx_buffer_len,
|
||||
true);
|
||||
}
|
||||
@@ -3500,6 +3516,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Alignment of Tx data is on an arbitrary byte boundary with the
|
||||
* maximum size per Tx descriptor limited only to the transmit
|
||||
* allocation of the packet buffer minus 96 bytes with an upper
|
||||
* limit of 24KB due to receive synchronization limitations.
|
||||
*/
|
||||
adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
|
||||
24 << 10);
|
||||
|
||||
/*
|
||||
* Disable Adaptive Interrupt Moderation if 2 full packets cannot
|
||||
* fit in receive buffer.
|
||||
@@ -4769,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define E1000_MAX_PER_TXD 8192
|
||||
#define E1000_MAX_TXD_PWR 12
|
||||
|
||||
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
|
||||
unsigned int first, unsigned int max_per_txd,
|
||||
unsigned int nr_frags, unsigned int mss)
|
||||
unsigned int nr_frags)
|
||||
{
|
||||
struct e1000_adapter *adapter = tx_ring->adapter;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
@@ -5007,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
|
||||
|
||||
static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
|
||||
{
|
||||
BUG_ON(size > tx_ring->count);
|
||||
|
||||
if (e1000_desc_unused(tx_ring) >= size)
|
||||
return 0;
|
||||
return __e1000_maybe_stop_tx(tx_ring, size);
|
||||
}
|
||||
|
||||
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
|
||||
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_ring *tx_ring = adapter->tx_ring;
|
||||
unsigned int first;
|
||||
unsigned int max_per_txd = E1000_MAX_PER_TXD;
|
||||
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
|
||||
unsigned int tx_flags = 0;
|
||||
unsigned int len = skb_headlen(skb);
|
||||
unsigned int nr_frags;
|
||||
@@ -5040,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
/*
|
||||
* The controller does a simple calculation to
|
||||
* make sure there is enough room in the FIFO before
|
||||
* initiating the DMA for each buffer. The calc is:
|
||||
* 4 = ceil(buffer len/mss). To make sure we don't
|
||||
* overrun the FIFO, adjust the max buffer len if mss
|
||||
* drops.
|
||||
*/
|
||||
if (mss) {
|
||||
u8 hdr_len;
|
||||
max_per_txd = min(mss << 2, max_per_txd);
|
||||
max_txd_pwr = fls(max_per_txd) - 1;
|
||||
|
||||
/*
|
||||
* TSO Workaround for 82571/2/3 Controllers -- if skb->data
|
||||
@@ -5081,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
count++;
|
||||
count++;
|
||||
|
||||
count += TXD_USE_COUNT(len, max_txd_pwr);
|
||||
count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
for (f = 0; f < nr_frags; f++)
|
||||
count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
|
||||
max_txd_pwr);
|
||||
count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
|
||||
adapter->tx_fifo_limit);
|
||||
|
||||
if (adapter->hw.mac.tx_pkt_filtering)
|
||||
e1000_transfer_dhcp_info(adapter, skb);
|
||||
@@ -5128,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
tx_flags |= E1000_TX_FLAGS_NO_FCS;
|
||||
|
||||
/* if count is 0 then mapping error has occurred */
|
||||
count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
|
||||
count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
|
||||
nr_frags);
|
||||
if (count) {
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
netdev_sent_queue(netdev, skb->len);
|
||||
e1000_tx_queue(tx_ring, tx_flags, count);
|
||||
/* Make sure there is space in the ring for the next send. */
|
||||
e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
|
||||
|
||||
e1000_maybe_stop_tx(tx_ring,
|
||||
(MAX_SKB_FRAGS *
|
||||
DIV_ROUND_UP(PAGE_SIZE,
|
||||
adapter->tx_fifo_limit) + 2));
|
||||
} else {
|
||||
dev_kfree_skb_any(skb);
|
||||
tx_ring->buffer_info[first].time_stamp = 0;
|
||||
@@ -6300,8 +6314,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
||||
adapter->hw.phy.autoneg_advertised = 0x2f;
|
||||
|
||||
/* ring size defaults */
|
||||
adapter->rx_ring->count = 256;
|
||||
adapter->tx_ring->count = 256;
|
||||
adapter->rx_ring->count = E1000_DEFAULT_RXD;
|
||||
adapter->tx_ring->count = E1000_DEFAULT_TXD;
|
||||
|
||||
/*
|
||||
* Initial Wake on LAN setting - If APM wake is enabled in
|
||||
|
@@ -254,6 +254,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
|
||||
*/
|
||||
size += NVM_WORD_SIZE_BASE_SHIFT;
|
||||
|
||||
/*
|
||||
* Check for invalid size
|
||||
*/
|
||||
if ((hw->mac.type == e1000_82576) && (size > 15)) {
|
||||
pr_notice("The NVM size is not valid, defaulting to 32K\n");
|
||||
size = 15;
|
||||
}
|
||||
|
||||
nvm->word_size = 1 << size;
|
||||
if (hw->mac.type < e1000_i210) {
|
||||
nvm->opcode_bits = 8;
|
||||
@@ -281,14 +289,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
|
||||
} else
|
||||
nvm->type = e1000_nvm_flash_hw;
|
||||
|
||||
/*
|
||||
* Check for invalid size
|
||||
*/
|
||||
if ((hw->mac.type == e1000_82576) && (size > 15)) {
|
||||
pr_notice("The NVM size is not valid, defaulting to 32K\n");
|
||||
size = 15;
|
||||
}
|
||||
|
||||
/* NVM Function Pointers */
|
||||
switch (hw->mac.type) {
|
||||
case e1000_82580:
|
||||
|
@@ -156,8 +156,12 @@
|
||||
: (0x0E018 + ((_n) * 0x40)))
|
||||
#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
|
||||
: (0x0E028 + ((_n) * 0x40)))
|
||||
#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
|
||||
#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
|
||||
#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
|
||||
(0x0C014 + ((_n) * 0x40)))
|
||||
#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
|
||||
#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
|
||||
(0x0E014 + ((_n) * 0x40)))
|
||||
#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
|
||||
#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
|
||||
: (0x0E038 + ((_n) * 0x40)))
|
||||
#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
|
||||
|
@@ -209,8 +209,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
|
||||
/* When SoL/IDER sessions are active, autoneg/speed/duplex
|
||||
* cannot be changed */
|
||||
if (igb_check_reset_block(hw)) {
|
||||
dev_err(&adapter->pdev->dev, "Cannot change link "
|
||||
"characteristics when SoL/IDER is active.\n");
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Cannot change link characteristics when SoL/IDER is active.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -1089,8 +1089,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
|
||||
wr32(reg, (_test[pat] & write));
|
||||
val = rd32(reg) & mask;
|
||||
if (val != (_test[pat] & write & mask)) {
|
||||
dev_err(&adapter->pdev->dev, "pattern test reg %04X "
|
||||
"failed: got 0x%08X expected 0x%08X\n",
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
|
||||
reg, val, (_test[pat] & write & mask));
|
||||
*data = reg;
|
||||
return 1;
|
||||
@@ -1108,8 +1108,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
|
||||
wr32(reg, write & mask);
|
||||
val = rd32(reg);
|
||||
if ((write & mask) != (val & mask)) {
|
||||
dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
|
||||
" got 0x%08X expected 0x%08X\n", reg,
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
|
||||
(val & mask), (write & mask));
|
||||
*data = reg;
|
||||
return 1;
|
||||
@@ -1171,8 +1171,9 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
|
||||
wr32(E1000_STATUS, toggle);
|
||||
after = rd32(E1000_STATUS) & toggle;
|
||||
if (value != after) {
|
||||
dev_err(&adapter->pdev->dev, "failed STATUS register test "
|
||||
"got: 0x%08X expected: 0x%08X\n", after, value);
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"failed STATUS register test got: 0x%08X expected: 0x%08X\n",
|
||||
after, value);
|
||||
*data = 1;
|
||||
return 1;
|
||||
}
|
||||
@@ -1497,6 +1498,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
|
||||
break;
|
||||
}
|
||||
|
||||
/* add small delay to avoid loopback test failure */
|
||||
msleep(50);
|
||||
|
||||
/* force 1000, set loopback */
|
||||
igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
|
||||
|
||||
@@ -1777,16 +1781,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
|
||||
* sessions are active */
|
||||
if (igb_check_reset_block(&adapter->hw)) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Cannot do PHY loopback test "
|
||||
"when SoL/IDER is active.\n");
|
||||
"Cannot do PHY loopback test when SoL/IDER is active.\n");
|
||||
*data = 0;
|
||||
goto out;
|
||||
}
|
||||
if ((adapter->hw.mac.type == e1000_i210)
|
||||
|| (adapter->hw.mac.type == e1000_i210)) {
|
||||
|| (adapter->hw.mac.type == e1000_i211)) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Loopback test not supported "
|
||||
"on this part at this time.\n");
|
||||
"Loopback test not supported on this part at this time.\n");
|
||||
*data = 0;
|
||||
goto out;
|
||||
}
|
||||
|
@@ -462,10 +462,10 @@ static void igb_dump(struct igb_adapter *adapter)
|
||||
(u64)buffer_info->time_stamp,
|
||||
buffer_info->skb, next_desc);
|
||||
|
||||
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
|
||||
if (netif_msg_pktdata(adapter) && buffer_info->skb)
|
||||
print_hex_dump(KERN_INFO, "",
|
||||
DUMP_PREFIX_ADDRESS,
|
||||
16, 1, phys_to_virt(buffer_info->dma),
|
||||
16, 1, buffer_info->skb->data,
|
||||
buffer_info->length, true);
|
||||
}
|
||||
}
|
||||
@@ -547,18 +547,17 @@ rx_ring_summary:
|
||||
(u64)buffer_info->dma,
|
||||
buffer_info->skb, next_desc);
|
||||
|
||||
if (netif_msg_pktdata(adapter)) {
|
||||
if (netif_msg_pktdata(adapter) &&
|
||||
buffer_info->dma && buffer_info->skb) {
|
||||
print_hex_dump(KERN_INFO, "",
|
||||
DUMP_PREFIX_ADDRESS,
|
||||
16, 1,
|
||||
phys_to_virt(buffer_info->dma),
|
||||
IGB_RX_HDR_LEN, true);
|
||||
DUMP_PREFIX_ADDRESS,
|
||||
16, 1, buffer_info->skb->data,
|
||||
IGB_RX_HDR_LEN, true);
|
||||
print_hex_dump(KERN_INFO, "",
|
||||
DUMP_PREFIX_ADDRESS,
|
||||
16, 1,
|
||||
phys_to_virt(
|
||||
buffer_info->page_dma +
|
||||
buffer_info->page_offset),
|
||||
page_address(buffer_info->page) +
|
||||
buffer_info->page_offset,
|
||||
PAGE_SIZE/2, true);
|
||||
}
|
||||
}
|
||||
|
@@ -804,12 +804,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
|
||||
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
|
||||
/* Set KX4/KX/KR support according to speed requested */
|
||||
autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
|
||||
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
|
||||
if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
|
||||
if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
|
||||
autoc |= IXGBE_AUTOC_KX4_SUPP;
|
||||
if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
|
||||
(hw->phy.smart_speed_active == false))
|
||||
autoc |= IXGBE_AUTOC_KR_SUPP;
|
||||
}
|
||||
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
|
||||
autoc |= IXGBE_AUTOC_KX_SUPP;
|
||||
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
|
||||
|
Reference in New Issue
Block a user