Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next

Pull networking changes from David Miller:
 "Noteworthy changes this time around:

   1) Multicast rejoin support for team driver, from Jiri Pirko.

   2) Centralize and simplify TCP RTT measurement handling in order to
      reduce the impact of bad RTO seeding from SYN/ACKs.  Also, when
      both timestamps and local RTT measurements are available prefer
      the later because there are broken middleware devices which
      scramble the timestamp.

      From Yuchung Cheng.

   3) Add TCP_NOTSENT_LOWAT socket option to limit the amount of kernel
      memory consumed to queue up unsend user data.  From Eric Dumazet.

   4) Add a "physical port ID" abstraction for network devices, from
      Jiri Pirko.

   5) Add a "suppress" operation to influence fib_rules lookups, from
      Stefan Tomanek.

   6) Add a networking development FAQ, from Paul Gortmaker.

   7) Extend the information provided by tcp_probe and add ipv6 support,
      from Daniel Borkmann.

   8) Use RCU locking more extensively in openvswitch data paths, from
      Pravin B Shelar.

   9) Add SCTP support to openvswitch, from Joe Stringer.

  10) Add EF10 chip support to SFC driver, from Ben Hutchings.

  11) Add new SYNPROXY netfilter target, from Patrick McHardy.

  12) Compute a rate approximation for sending in TCP sockets, and use
      this to more intelligently coalesce TSO frames.  Furthermore, add
      a new packet scheduler which takes advantage of this estimate when
      available.  From Eric Dumazet.

  13) Allow AF_PACKET fanouts with random selection, from Daniel
      Borkmann.

  14) Add ipv6 support to vxlan driver, from Cong Wang"

Resolved conflicts as per discussion.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1218 commits)
  openvswitch: Fix alignment of struct sw_flow_key.
  netfilter: Fix build errors with xt_socket.c
  tcp: Add missing braces to do_tcp_setsockopt
  caif: Add missing braces to multiline if in cfctrl_linkup_request
  bnx2x: Add missing braces in bnx2x:bnx2x_link_initialize
  vxlan: Fix kernel panic on device delete.
  net: mvneta: implement ->ndo_do_ioctl() to support PHY ioctls
  net: mvneta: properly disable HW PHY polling and ensure adjust_link() works
  icplus: Use netif_running to determine device state
  ethernet/arc/arc_emac: Fix huge delays in large file copies
  tuntap: orphan frags before trying to set tx timestamp
  tuntap: purge socket error queue on detach
  qlcnic: use standard NAPI weights
  ipv6:introduce function to find route for redirect
  bnx2x: VF RSS support - VF side
  bnx2x: VF RSS support - PF side
  vxlan: Notify drivers for listening UDP port changes
  net: usbnet: update addr_assign_type if appropriate
  driver/net: enic: update enic maintainers and driver
  driver/net: enic: Exposing symbols for Cisco's low latency driver
  ...
This commit is contained in:
Linus Torvalds
2013-09-05 14:54:29 -07:00
974 changed files with 56899 additions and 24089 deletions

View File

@@ -325,6 +325,7 @@
<title>functions/definitions</title> <title>functions/definitions</title>
!Finclude/net/mac80211.h ieee80211_rx_status !Finclude/net/mac80211.h ieee80211_rx_status
!Finclude/net/mac80211.h mac80211_rx_flags !Finclude/net/mac80211.h mac80211_rx_flags
!Finclude/net/mac80211.h mac80211_tx_info_flags
!Finclude/net/mac80211.h mac80211_tx_control_flags !Finclude/net/mac80211.h mac80211_tx_control_flags
!Finclude/net/mac80211.h mac80211_rate_control_flags !Finclude/net/mac80211.h mac80211_rate_control_flags
!Finclude/net/mac80211.h ieee80211_tx_rate !Finclude/net/mac80211.h ieee80211_tx_rate

View File

@@ -0,0 +1,49 @@
Micrel KSZ9021 Gigabit Ethernet PHY
Some boards require special tuning values, particularly when it comes to
clock delays. You can specify clock delay values by adding
micrel-specific properties to an Ethernet OF device node.
All skew control options are specified in picoseconds. The minimum
value is 0, and the maximum value is 3000.
Optional properties:
- rxc-skew-ps : Skew control of RXC pad
- rxdv-skew-ps : Skew control of RX CTL pad
- txc-skew-ps : Skew control of TXC pad
- txen-skew-ps : Skew control of TX_CTL pad
- rxd0-skew-ps : Skew control of RX data 0 pad
- rxd1-skew-ps : Skew control of RX data 1 pad
- rxd2-skew-ps : Skew control of RX data 2 pad
- rxd3-skew-ps : Skew control of RX data 3 pad
- txd0-skew-ps : Skew control of TX data 0 pad
- txd1-skew-ps : Skew control of TX data 1 pad
- txd2-skew-ps : Skew control of TX data 2 pad
- txd3-skew-ps : Skew control of TX data 3 pad
Examples:
/* Attach to an Ethernet device with autodetected PHY */
&enet {
rxc-skew-ps = <3000>;
rxdv-skew-ps = <0>;
txc-skew-ps = <3000>;
txen-skew-ps = <0>;
status = "okay";
};
/* Attach to an explicitly-specified PHY */
mdio {
phy0: ethernet-phy@0 {
rxc-skew-ps = <3000>;
rxdv-skew-ps = <0>;
txc-skew-ps = <3000>;
txen-skew-ps = <0>;
reg = <0>;
};
};
ethernet@70000 {
status = "okay";
phy = <&phy0>;
phy-mode = "rgmii-id";
};

View File

@@ -0,0 +1,21 @@
MOXA ART Ethernet Controller
Required properties:
- compatible : Must be "moxa,moxart-mac"
- reg : Should contain register location and length
- interrupts : Should contain the mac interrupt number
Example:
mac0: mac@90900000 {
compatible = "moxa,moxart-mac";
reg = <0x90900000 0x100>;
interrupts = <25 0>;
};
mac1: mac@92000000 {
compatible = "moxa,moxart-mac";
reg = <0x92000000 0x100>;
interrupts = <27 0>;
};

View File

@@ -22,6 +22,11 @@ Required properties:
- snps,pbl Programmable Burst Length - snps,pbl Programmable Burst Length
- snps,fixed-burst Program the DMA to use the fixed burst mode - snps,fixed-burst Program the DMA to use the fixed burst mode
- snps,mixed-burst Program the DMA to use the mixed burst mode - snps,mixed-burst Program the DMA to use the mixed burst mode
- snps,force_thresh_dma_mode Force DMA to use the threshold mode for
both tx and rx
- snps,force_sf_dma_mode Force DMA to use the Store and Forward
mode for both tx and rx. This flag is
ignored if force_thresh_dma_mode is set.
Optional properties: Optional properties:
- mac-address: 6 bytes, mac address - mac-address: 6 bytes, mac address

View File

@@ -124,6 +124,8 @@ multiqueue.txt
- HOWTO for multiqueue network device support. - HOWTO for multiqueue network device support.
netconsole.txt netconsole.txt
- The network console module netconsole.ko: configuration and notes. - The network console module netconsole.ko: configuration and notes.
netdev-FAQ.txt
- FAQ describing how to submit net changes to netdev mailing list.
netdev-features.txt netdev-features.txt
- Network interface features API description. - Network interface features API description.
netdevices.txt netdevices.txt

View File

@@ -1,7 +1,7 @@
Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
============================================================== ==============================================================
November 15, 2005 March 15, 2011
Contents Contents
======== ========
@@ -122,7 +122,7 @@ Additional Configurations
NOTE: This setting is not saved across reboots. NOTE: This setting is not saved across reboots.
Ethtool ethtool
------- -------
The driver utilizes the ethtool interface for driver configuration and The driver utilizes the ethtool interface for driver configuration and

View File

@@ -1,8 +1,8 @@
Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters Linux* Base Driver for Intel(R) Ethernet Network Connection
=============================================================== ===========================================================
Intel Gigabit Linux driver. Intel Gigabit Linux driver.
Copyright(c) 1999 - 2010 Intel Corporation. Copyright(c) 1999 - 2013 Intel Corporation.
Contents Contents
======== ========
@@ -420,15 +420,15 @@ Additional Configurations
- The maximum MTU setting for Jumbo Frames is 16110. This value coincides - The maximum MTU setting for Jumbo Frames is 16110. This value coincides
with the maximum Jumbo Frames size of 16128. with the maximum Jumbo Frames size of 16128.
- Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
loss of link. poor performance or loss of link.
- Adapters based on the Intel(R) 82542 and 82573V/E controller do not - Adapters based on the Intel(R) 82542 and 82573V/E controller do not
support Jumbo Frames. These correspond to the following product names: support Jumbo Frames. These correspond to the following product names:
Intel(R) PRO/1000 Gigabit Server Adapter Intel(R) PRO/1000 Gigabit Server Adapter
Intel(R) PRO/1000 PM Network Connection Intel(R) PRO/1000 PM Network Connection
Ethtool ethtool
------- -------
The driver utilizes the ethtool interface for driver configuration and The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The ethtool diagnostics, as well as displaying statistical information. The ethtool

View File

@@ -1,8 +1,8 @@
Linux* Driver for Intel(R) Network Connection Linux* Driver for Intel(R) Ethernet Network Connection
============================================= ======================================================
Intel Gigabit Linux driver. Intel Gigabit Linux driver.
Copyright(c) 1999 - 2010 Intel Corporation. Copyright(c) 1999 - 2013 Intel Corporation.
Contents Contents
======== ========
@@ -259,13 +259,16 @@ Additional Configurations
- The maximum MTU setting for Jumbo Frames is 9216. This value coincides - The maximum MTU setting for Jumbo Frames is 9216. This value coincides
with the maximum Jumbo Frames size of 9234 bytes. with the maximum Jumbo Frames size of 9234 bytes.
- Using Jumbo Frames at 10 or 100 Mbps is not supported and may result in - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
poor performance or loss of link. poor performance or loss of link.
- Some adapters limit Jumbo Frames sized packets to a maximum of - Some adapters limit Jumbo Frames sized packets to a maximum of
4096 bytes and some adapters do not support Jumbo Frames. 4096 bytes and some adapters do not support Jumbo Frames.
Ethtool - Jumbo Frames cannot be configured on an 82579-based Network device, if
MACSec is enabled on the system.
ethtool
------- -------
The driver utilizes the ethtool interface for driver configuration and The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. We diagnostics, as well as displaying statistical information. We
@@ -273,6 +276,9 @@ Additional Configurations
http://ftp.kernel.org/pub/software/network/ethtool/ http://ftp.kernel.org/pub/software/network/ethtool/
NOTE: When validating enable/disable tests on some parts (82578, for example)
you need to add a few seconds between tests when working with ethtool.
Speed and Duplex Speed and Duplex
---------------- ----------------
Speed and Duplex are configured through the ethtool* utility. For Speed and Duplex are configured through the ethtool* utility. For

View File

@@ -1,8 +1,8 @@
Linux* Base Driver for Intel(R) Network Connection Linux* Base Driver for Intel(R) Ethernet Network Connection
================================================== ===========================================================
Intel Gigabit Linux driver. Intel Gigabit Linux driver.
Copyright(c) 1999 - 2010 Intel Corporation. Copyright(c) 1999 - 2013 Intel Corporation.
Contents Contents
======== ========
@@ -36,6 +36,53 @@ Default Value: 0
This parameter adds support for SR-IOV. It causes the driver to spawn up to This parameter adds support for SR-IOV. It causes the driver to spawn up to
max_vfs worth of virtual function. max_vfs worth of virtual function.
QueuePairs
----------
Valid Range: 0-1
Default Value: 1 (TX and RX will be paired onto one interrupt vector)
If set to 0, when MSI-X is enabled, the TX and RX will attempt to occupy
separate vectors.
This option can be overridden to 1 if there are not sufficient interrupts
available. This can occur if any combination of RSS, VMDQ, and max_vfs
results in more than 4 queues being used.
Node
----
Valid Range: 0-n
Default Value: -1 (off)
0 - n: where n is the number of the NUMA node that should be used to
allocate memory for this adapter port.
-1: uses the driver default of allocating memory on whichever processor is
running insmod/modprobe.
The Node parameter will allow you to pick which NUMA node you want to have
the adapter allocate memory from. All driver structures, in-memory queues,
and receive buffers will be allocated on the node specified. This parameter
is only useful when interrupt affinity is specified, otherwise some portion
of the time the interrupt could run on a different core than the memory is
allocated on, causing slower memory access and impacting throughput, CPU, or
both.
EEE
---
Valid Range: 0-1
Default Value: 1 (enabled)
A link between two EEE-compliant devices will result in periodic bursts of
data followed by long periods where in the link is in an idle state. This Low
Power Idle (LPI) state is supported in both 1Gbps and 100Mbps link speeds.
NOTE: EEE support requires autonegotiation.
DMAC
----
Valid Range: 0-1
Default Value: 1 (enabled)
Enables or disables DMA Coalescing feature.
Additional Configurations Additional Configurations
========================= =========================
@@ -55,10 +102,10 @@ Additional Configurations
- The maximum MTU setting for Jumbo Frames is 9216. This value coincides - The maximum MTU setting for Jumbo Frames is 9216. This value coincides
with the maximum Jumbo Frames size of 9234 bytes. with the maximum Jumbo Frames size of 9234 bytes.
- Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
loss of link. poor performance or loss of link.
Ethtool ethtool
------- -------
The driver utilizes the ethtool interface for driver configuration and The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The latest diagnostics, as well as displaying statistical information. The latest
@@ -106,6 +153,14 @@ Additional Configurations
Where n=the VF that attempted to do the spoofing. Where n=the VF that attempted to do the spoofing.
Setting MAC Address, VLAN and Rate Limit Using IProute2 Tool
------------------------------------------------------------
You can set a MAC address of a Virtual Function (VF), a default VLAN and the
rate limit using the IProute2 tool. Download the latest version of the
iproute2 tool from Sourceforge if your version does not have all the
features you require.
Support Support
======= =======

View File

@@ -1,8 +1,8 @@
Linux* Base Driver for Intel(R) Network Connection Linux* Base Driver for Intel(R) Ethernet Network Connection
================================================== ===========================================================
Intel Gigabit Linux driver. Intel Gigabit Linux driver.
Copyright(c) 1999 - 2010 Intel Corporation. Copyright(c) 1999 - 2013 Intel Corporation.
Contents Contents
======== ========
@@ -55,7 +55,7 @@ networking link on the left to search for your adapter:
Additional Configurations Additional Configurations
========================= =========================
Ethtool ethtool
------- -------
The driver utilizes the ethtool interface for driver configuration and The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The ethtool diagnostics, as well as displaying statistical information. The ethtool

View File

@@ -440,6 +440,10 @@ tcp_syncookies - BOOLEAN
SYN flood warnings in logs not being really flooded, your server SYN flood warnings in logs not being really flooded, your server
is seriously misconfigured. is seriously misconfigured.
If you want to test which effects syncookies have to your
network connections you can set this knob to 2 to enable
unconditionally generation of syncookies.
tcp_fastopen - INTEGER tcp_fastopen - INTEGER
Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
in the opening SYN packet. To use this feature, the client application in the opening SYN packet. To use this feature, the client application
@@ -478,6 +482,15 @@ tcp_syn_retries - INTEGER
tcp_timestamps - BOOLEAN tcp_timestamps - BOOLEAN
Enable timestamps as defined in RFC1323. Enable timestamps as defined in RFC1323.
tcp_min_tso_segs - INTEGER
Minimal number of segments per TSO frame.
Since linux-3.12, TCP does an automatic sizing of TSO frames,
depending on flow rate, instead of filling 64Kbytes packets.
For specific usages, it's possible to force TCP to build big
TSO frames. Note that TCP stack might split too big TSO packets
if available window is too small.
Default: 2
tcp_tso_win_divisor - INTEGER tcp_tso_win_divisor - INTEGER
This allows control over what percentage of the congestion window This allows control over what percentage of the congestion window
can be consumed by a single TSO frame. can be consumed by a single TSO frame.
@@ -516,6 +529,19 @@ tcp_wmem - vector of 3 INTEGERs: min, default, max
this value is ignored. this value is ignored.
Default: between 64K and 4MB, depending on RAM size. Default: between 64K and 4MB, depending on RAM size.
tcp_notsent_lowat - UNSIGNED INTEGER
A TCP socket can control the amount of unsent bytes in its write queue,
thanks to TCP_NOTSENT_LOWAT socket option. poll()/select()/epoll()
reports POLLOUT events if the amount of unsent bytes is below a per
socket value, and if the write queue is not full. sendmsg() will
also not add new buffers if the limit is hit.
This global variable controls the amount of unsent data for
sockets not using TCP_NOTSENT_LOWAT. For these sockets, a change
to the global variable has immediate effect.
Default: UINT_MAX (0xFFFFFFFF)
tcp_workaround_signed_windows - BOOLEAN tcp_workaround_signed_windows - BOOLEAN
If set, assume no receipt of a window scaling option means the If set, assume no receipt of a window scaling option means the
remote TCP is broken and treats the window as a signed quantity. remote TCP is broken and treats the window as a signed quantity.
@@ -1022,7 +1048,15 @@ disable_policy - BOOLEAN
disable_xfrm - BOOLEAN disable_xfrm - BOOLEAN
Disable IPSEC encryption on this interface, whatever the policy Disable IPSEC encryption on this interface, whatever the policy
igmpv2_unsolicited_report_interval - INTEGER
The interval in milliseconds in which the next unsolicited
IGMPv1 or IGMPv2 report retransmit will take place.
Default: 10000 (10 seconds)
igmpv3_unsolicited_report_interval - INTEGER
The interval in milliseconds in which the next unsolicited
IGMPv3 report retransmit will take place.
Default: 1000 (1 seconds)
tag - INTEGER tag - INTEGER
Allows you to write a number, which can be used as required. Allows you to write a number, which can be used as required.
@@ -1314,6 +1348,27 @@ ndisc_notify - BOOLEAN
1 - Generate unsolicited neighbour advertisements when device is brought 1 - Generate unsolicited neighbour advertisements when device is brought
up or hardware address changes. up or hardware address changes.
mldv1_unsolicited_report_interval - INTEGER
The interval in milliseconds in which the next unsolicited
MLDv1 report retransmit will take place.
Default: 10000 (10 seconds)
mldv2_unsolicited_report_interval - INTEGER
The interval in milliseconds in which the next unsolicited
MLDv2 report retransmit will take place.
Default: 1000 (1 second)
force_mld_version - INTEGER
0 - (default) No enforcement of a MLD version, MLDv1 fallback allowed
1 - Enforce to use MLD version 1
2 - Enforce to use MLD version 2
suppress_frag_ndisc - INTEGER
Control RFC 6980 (Security Implications of IPv6 Fragmentation
with IPv6 Neighbor Discovery) behavior:
1 - (default) discard fragmented neighbor discovery packets
0 - allow fragmented neighbor discovery packets
icmp/*: icmp/*:
ratelimit - INTEGER ratelimit - INTEGER
Limit the maximal rates for sending ICMPv6 packets. Limit the maximal rates for sending ICMPv6 packets.

View File

@@ -1,7 +1,7 @@
Linux Base Driver for 10 Gigabit Intel(R) Network Connection Linux Base Driver for 10 Gigabit Intel(R) Ethernet Network Connection
============================================================= =====================================================================
October 9, 2007 March 14, 2011
Contents Contents
@@ -274,9 +274,9 @@ Additional Configurations
------------------------------------------------- -------------------------------------------------
Configuring a network driver to load properly when the system is started is Configuring a network driver to load properly when the system is started is
distribution dependent. Typically, the configuration process involves adding distribution dependent. Typically, the configuration process involves adding
an alias line to files in /etc/modprobe.d/ as well as editing other system an alias line to /etc/modprobe.conf as well as editing other system startup
startup scripts and/or configuration files. Many popular Linux distributions scripts and/or configuration files. Many popular Linux distributions ship
ship with tools to make these changes for you. To learn the proper way to with tools to make these changes for you. To learn the proper way to
configure a network device for your system, refer to your distribution configure a network device for your system, refer to your distribution
documentation. If during this process you are asked for the driver or module documentation. If during this process you are asked for the driver or module
name, the name for the Linux Base Driver for the Intel 10GbE Family of name, the name for the Linux Base Driver for the Intel 10GbE Family of
@@ -306,7 +306,7 @@ Additional Configurations
with the maximum Jumbo Frames size of 16128. with the maximum Jumbo Frames size of 16128.
Ethtool ethtool
------- -------
The driver utilizes the ethtool interface for driver configuration and The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The ethtool diagnostics, as well as displaying statistical information. The ethtool

View File

@@ -1,8 +1,9 @@
Linux Base Driver for 10 Gigabit PCI Express Intel(R) Network Connection Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Family of
======================================================================== Adapters
=============================================================================
Intel Gigabit Linux driver. Intel 10 Gigabit Linux driver.
Copyright(c) 1999 - 2010 Intel Corporation. Copyright(c) 1999 - 2013 Intel Corporation.
Contents Contents
======== ========
@@ -16,8 +17,8 @@ Contents
Identifying Your Adapter Identifying Your Adapter
======================== ========================
The driver in this release is compatible with 82598 and 82599-based Intel The driver in this release is compatible with 82598, 82599 and X540-based
Network Connections. Intel Network Connections.
For more information on how to identify your adapter, go to the Adapter & For more information on how to identify your adapter, go to the Adapter &
Driver ID Guide at: Driver ID Guide at:
@@ -72,7 +73,7 @@ cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
Laser turns off for SFP+ when ifconfig down Laser turns off for SFP+ when ifconfig down
------------------------------------------- -------------------------------------------
"ifconfig down" turns off the laser for 82599-based SFP+ fiber adapters. "ifconfig down" turns off the laser for 82599-based SFP+ fiber adapters.
"ifconfig up" turns on the later. "ifconfig up" turns on the laser.
82598-BASED ADAPTERS 82598-BASED ADAPTERS
@@ -118,6 +119,93 @@ NOTE: For 82598 backplane cards entering 1 gig mode, flow control default
behavior is changed to off. Flow control in 1 gig mode on these devices can behavior is changed to off. Flow control in 1 gig mode on these devices can
lead to Tx hangs. lead to Tx hangs.
Intel(R) Ethernet Flow Director
-------------------------------
Supports advanced filters that direct receive packets by their flows to
different queues. Enables tight control on routing a flow in the platform.
Matches flows and CPU cores for flow affinity. Supports multiple parameters
for flexible flow classification and load balancing.
Flow director is enabled only if the kernel is multiple TX queue capable.
An included script (set_irq_affinity.sh) automates setting the IRQ to CPU
affinity.
You can verify that the driver is using Flow Director by looking at the counter
in ethtool: fdir_miss and fdir_match.
Other ethtool Commands:
To enable Flow Director
ethtool -K ethX ntuple on
To add a filter
Use -U switch. e.g., ethtool -U ethX flow-type tcp4 src-ip 0x178000a
action 1
To see the list of filters currently present:
ethtool -u ethX
Perfect Filter: Perfect filter is an interface to load the filter table that
funnels all flow into queue_0 unless an alternative queue is specified using
"action". In that case, any flow that matches the filter criteria will be
directed to the appropriate queue.
If the queue is defined as -1, filter will drop matching packets.
To account for filter matches and misses, there are two stats in ethtool:
fdir_match and fdir_miss. In addition, rx_queue_N_packets shows the number of
packets processed by the Nth queue.
NOTE: Receive Packet Steering (RPS) and Receive Flow Steering (RFS) are not
compatible with Flow Director. IF Flow Director is enabled, these will be
disabled.
The following three parameters impact Flow Director.
FdirMode
--------
Valid Range: 0-2 (0=off, 1=ATR, 2=Perfect filter mode)
Default Value: 1
Flow Director filtering modes.
FdirPballoc
-----------
Valid Range: 0-2 (0=64k, 1=128k, 2=256k)
Default Value: 0
Flow Director allocated packet buffer size.
AtrSampleRate
--------------
Valid Range: 1-100
Default Value: 20
Software ATR Tx packet sample rate. For example, when set to 20, every 20th
packet, looks to see if the packet will create a new flow.
Node
----
Valid Range: 0-n
Default Value: 1 (off)
0 - n: where n is the number of NUMA nodes (i.e. 0 - 3) currently online in
your system
1: turns this option off
The Node parameter will allow you to pick which NUMA node you want to have
the adapter allocate memory on.
max_vfs
-------
Valid Range: 1-63
Default Value: 0
If the value is greater than 0 it will also force the VMDq parameter to be 1
or more.
This parameter adds support for SR-IOV. It causes the driver to spawn up to
max_vfs worth of virtual function.
Additional Configurations Additional Configurations
========================= =========================
@@ -221,9 +309,10 @@ http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf
Known Issues Known Issues
============ ============
Enabling SR-IOV in a 32-bit Microsoft* Windows* Server 2008 Guest OS using Enabling SR-IOV in a 32-bit or 64-bit Microsoft* Windows* Server 2008/R2
Intel (R) 82576-based GbE or Intel (R) 82599-based 10GbE controller under KVM Guest OS using Intel (R) 82576-based GbE or Intel (R) 82599-based 10GbE
----------------------------------------------------------------------------- controller under KVM
------------------------------------------------------------------------
KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
includes traditional PCIe devices, as well as SR-IOV-capable devices using includes traditional PCIe devices, as well as SR-IOV-capable devices using
Intel 82576-based and 82599-based controllers. Intel 82576-based and 82599-based controllers.

View File

@@ -1,8 +1,8 @@
Linux* Base Driver for Intel(R) Network Connection Linux* Base Driver for Intel(R) Ethernet Network Connection
================================================== ===========================================================
Intel Gigabit Linux driver. Intel Gigabit Linux driver.
Copyright(c) 1999 - 2010 Intel Corporation. Copyright(c) 1999 - 2013 Intel Corporation.
Contents Contents
======== ========

View File

@@ -0,0 +1,224 @@
Information you need to know about netdev
-----------------------------------------
Q: What is netdev?
A: It is a mailing list for all network related linux stuff. This includes
anything found under net/ (i.e. core code like IPv6) and drivers/net
(i.e. hardware specific drivers) in the linux source tree.
Note that some subsystems (e.g. wireless drivers) which have a high volume
of traffic have their own specific mailing lists.
The netdev list is managed (like many other linux mailing lists) through
VGER ( http://vger.kernel.org/ ) and archives can be found below:
http://marc.info/?l=linux-netdev
http://www.spinics.net/lists/netdev/
Aside from subsystems like that mentioned above, all network related linux
development (i.e. RFC, review, comments, etc) takes place on netdev.
Q: How do the changes posted to netdev make their way into linux?
A: There are always two trees (git repositories) in play. Both are driven
by David Miller, the main network maintainer. There is the "net" tree,
and the "net-next" tree. As you can probably guess from the names, the
net tree is for fixes to existing code already in the mainline tree from
Linus, and net-next is where the new code goes for the future release.
You can find the trees here:
http://git.kernel.org/?p=linux/kernel/git/davem/net.git
http://git.kernel.org/?p=linux/kernel/git/davem/net-next.git
Q: How often do changes from these trees make it to the mainline Linus tree?
A: To understand this, you need to know a bit of background information
on the cadence of linux development. Each new release starts off with
a two week "merge window" where the main maintainers feed their new
stuff to Linus for merging into the mainline tree. After the two weeks,
the merge window is closed, and it is called/tagged "-rc1". No new
features get mainlined after this -- only fixes to the rc1 content
are expected. After roughly a week of collecting fixes to the rc1
content, rc2 is released. This repeats on a roughly weekly basis
until rc7 (typically; sometimes rc6 if things are quiet, or rc8 if
things are in a state of churn), and a week after the last vX.Y-rcN
was done, the official "vX.Y" is released.
Relating that to netdev: At the beginning of the 2 week merge window,
the net-next tree will be closed - no new changes/features. The
accumulated new content of the past ~10 weeks will be passed onto
mainline/Linus via a pull request for vX.Y -- at the same time,
the "net" tree will start accumulating fixes for this pulled content
relating to vX.Y
An announcement indicating when net-next has been closed is usually
sent to netdev, but knowing the above, you can predict that in advance.
IMPORTANT: Do not send new net-next content to netdev during the
period during which net-next tree is closed.
Shortly after the two weeks have passed, (and vX.Y-rc1 is released) the
tree for net-next reopens to collect content for the next (vX.Y+1) release.
If you aren't subscribed to netdev and/or are simply unsure if net-next
has re-opened yet, simply check the net-next git repository link above for
any new networking related commits.
The "net" tree continues to collect fixes for the vX.Y content, and
is fed back to Linus at regular (~weekly) intervals. Meaning that the
focus for "net" is on stablilization and bugfixes.
Finally, the vX.Y gets released, and the whole cycle starts over.
Q: So where are we now in this cycle?
A: Load the mainline (Linus) page here:
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git
and note the top of the "tags" section. If it is rc1, it is early
in the dev cycle. If it was tagged rc7 a week ago, then a release
is probably imminent.
Q: How do I indicate which tree (net vs. net-next) my patch should be in?
A: Firstly, think whether you have a bug fix or new "next-like" content.
Then once decided, assuming that you use git, use the prefix flag, i.e.
git format-patch --subject-prefix='PATCH net-next' start..finish
Use "net" instead of "net-next" (always lower case) in the above for
bug-fix net content. If you don't use git, then note the only magic in
the above is just the subject text of the outgoing e-mail, and you can
manually change it yourself with whatever MUA you are comfortable with.
Q: I sent a patch and I'm wondering what happened to it. How can I tell
whether it got merged?
A: Start by looking at the main patchworks queue for netdev:
http://patchwork.ozlabs.org/project/netdev/list/
The "State" field will tell you exactly where things are at with
your patch.
Q: The above only says "Under Review". How can I find out more?
A: Generally speaking, the patches get triaged quickly (in less than 48h).
So be patient. Asking the maintainer for status updates on your
patch is a good way to ensure your patch is ignored or pushed to
the bottom of the priority list.
Q: How can I tell what patches are queued up for backporting to the
various stable releases?
A: Normally Greg Kroah-Hartman collects stable commits himself, but
for networking, Dave collects up patches he deems critical for the
networking subsystem, and then hands them off to Greg.
There is a patchworks queue that you can see here:
http://patchwork.ozlabs.org/bundle/davem/stable/?state=*
It contains the patches which Dave has selected, but not yet handed
off to Greg. If Greg already has the patch, then it will be here:
http://git.kernel.org/cgit/linux/kernel/git/stable/stable-queue.git
A quick way to find whether the patch is in this stable-queue is
to simply clone the repo, and then git grep the mainline commit ID, e.g.
stable-queue$ git grep -l 284041ef21fdf2e
releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
stable/stable-queue$
Q: I see a network patch and I think it should be backported to stable.
Should I request it via "stable@vger.kernel.org" like the references in
the kernel's Documentation/stable_kernel_rules.txt file say?
A: No, not for networking. Check the stable queues as per above 1st to see
if it is already queued. If not, then send a mail to netdev, listing
the upstream commit ID and why you think it should be a stable candidate.
Before you jump to go do the above, do note that the normal stable rules
in Documentation/stable_kernel_rules.txt still apply. So you need to
explicitly indicate why it is a critical fix and exactly what users are
impacted. In addition, you need to convince yourself that you _really_
think it has been overlooked, vs. having been considered and rejected.
Generally speaking, the longer it has had a chance to "soak" in mainline,
the better the odds that it is an OK candidate for stable. So scrambling
to request a commit be added the day after it appears should be avoided.
Q: I have created a network patch and I think it should be backported to
stable. Should I add a "Cc: stable@vger.kernel.org" like the references
in the kernel's Documentation/ directory say?
A: No. See above answer. In short, if you think it really belongs in
stable, then ensure you write a decent commit log that describes who
gets impacted by the bugfix and how it manifests itself, and when the
bug was introduced. If you do that properly, then the commit will
get handled appropriately and most likely get put in the patchworks
stable queue if it really warrants it.
If you think there is some valid information relating to it being in
stable that does _not_ belong in the commit log, then use the three
dash marker line as described in Documentation/SubmittingPatches to
temporarily embed that information into the patch that you send.
Q: Someone said that the comment style and coding convention is different
for the networking content. Is this true?
A: Yes, in a largely trivial way. Instead of this:
/*
* foobar blah blah blah
* another line of text
*/
it is requested that you make it look like this:
/* foobar blah blah blah
* another line of text
*/
Q: I am working in existing code that has the former comment style and not the
latter. Should I submit new code in the former style or the latter?
A: Make it the latter style, so that eventually all code in the domain of
netdev is of this format.
Q: I found a bug that might have possible security implications or similar.
Should I mail the main netdev maintainer off-list?
A: No. The current netdev maintainer has consistently requested that people
use the mailing lists and not reach out directly. If you aren't OK with
that, then perhaps consider mailing "security@kernel.org" or reading about
http://oss-security.openwall.org/wiki/mailing-lists/distros
as possible alternative mechanisms.
Q: What level of testing is expected before I submit my change?
A: If your changes are against net-next, the expectation is that you
have tested by layering your changes on top of net-next. Ideally you
will have done run-time testing specific to your change, but at a
minimum, your changes should survive an "allyesconfig" and an
"allmodconfig" build without new warnings or failures.
Q: Any other tips to help ensure my net/net-next patch gets OK'd?
A: Attention to detail. Re-read your own work as if you were the
reviewer. You can start with using checkpatch.pl, perhaps even
with the "--strict" flag. But do not be mindlessly robotic in
doing so. If your change is a bug-fix, make sure your commit log
indicates the end-user visible symptom, the underlying reason as
to why it happens, and then if necessary, explain why the fix proposed
is the best way to get things done. Don't mangle whitespace, and as
is common, don't mis-indent function arguments that span multiple lines.
If it is your 1st patch, mail it to yourself so you can test apply
it to an unpatched tree to confirm infrastructure didn't mangle it.
Finally, go back and read Documentation/SubmittingPatches to be
sure you are not repeating some common mistake documented there.

View File

@@ -91,6 +91,46 @@ Often we ellipsize arguments not important to the discussion, e.g.:
in_port(1), eth(...), eth_type(0x0800), ipv4(...), tcp(...) in_port(1), eth(...), eth_type(0x0800), ipv4(...), tcp(...)
Wildcarded flow key format
--------------------------
A wildcarded flow is described with two sequences of Netlink attributes
passed over the Netlink socket. A flow key, exactly as described above, and an
optional corresponding flow mask.
A wildcarded flow can represent a group of exact match flows. Each '1' bit
in the mask specifies a exact match with the corresponding bit in the flow key.
A '0' bit specifies a don't care bit, which will match either a '1' or '0' bit
of a incoming packet. Using wildcarded flow can improve the flow set up rate
by reduce the number of new flows need to be processed by the user space program.
Support for the mask Netlink attribute is optional for both the kernel and user
space program. The kernel can ignore the mask attribute, installing an exact
match flow, or reduce the number of don't care bits in the kernel to less than
what was specified by the user space program. In this case, variations in bits
that the kernel does not implement will simply result in additional flow setups.
The kernel module will also work with user space programs that neither support
nor supply flow mask attributes.
Since the kernel may ignore or modify wildcard bits, it can be difficult for
the userspace program to know exactly what matches are installed. There are
two possible approaches: reactively install flows as they miss the kernel
flow table (and therefore not attempt to determine wildcard changes at all)
or use the kernel's response messages to determine the installed wildcards.
When interacting with userspace, the kernel should maintain the match portion
of the key exactly as originally installed. This will provides a handle to
identify the flow for all future operations. However, when reporting the
mask of an installed flow, the mask should include any restrictions imposed
by the kernel.
The behavior when using overlapping wildcarded flows is undefined. It is the
responsibility of the user space program to ensure that any incoming packet
can match at most one flow, wildcarded or not. The current implementation
performs best-effort detection of overlapping wildcarded flows and may reject
some but not all of them. However, this behavior may change in future versions.
Basic rule for evolving flow keys Basic rule for evolving flow keys
--------------------------------- ---------------------------------

View File

@@ -543,6 +543,14 @@ TPACKET_V2 --> TPACKET_V3:
In the AF_PACKET fanout mode, packet reception can be load balanced among In the AF_PACKET fanout mode, packet reception can be load balanced among
processes. This also works in combination with mmap(2) on packet sockets. processes. This also works in combination with mmap(2) on packet sockets.
Currently implemented fanout policies are:
- PACKET_FANOUT_HASH: schedule to socket by skb's rxhash
- PACKET_FANOUT_LB: schedule to socket by round-robin
- PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
- PACKET_FANOUT_RND: schedule to socket by random selection
- PACKET_FANOUT_ROLLOVER: if one socket is full, rollover to another
Minimal example code by David S. Miller (try things like "./test eth0 hash", Minimal example code by David S. Miller (try things like "./test eth0 hash",
"./test eth0 lb", etc.): "./test eth0 lb", etc.):

View File

@@ -19,7 +19,6 @@ of SCTP that is RFC 2960 compliant and provides an programming interface
referred to as the UDP-style API of the Sockets Extensions for SCTP, as referred to as the UDP-style API of the Sockets Extensions for SCTP, as
proposed in IETF Internet-Drafts. proposed in IETF Internet-Drafts.
Caveats: Caveats:
-lksctp can be built as statically or as a module. However, be aware that -lksctp can be built as statically or as a module. However, be aware that
@@ -33,6 +32,4 @@ For more information, please visit the lksctp project website:
http://www.sf.net/projects/lksctp http://www.sf.net/projects/lksctp
Or contact the lksctp developers through the mailing list: Or contact the lksctp developers through the mailing list:
<lksctp-developers@lists.sourceforge.net> <linux-sctp@vger.kernel.org>

View File

@@ -123,6 +123,7 @@ struct plat_stmmacenet_data {
int bugged_jumbo; int bugged_jumbo;
int pmt; int pmt;
int force_sf_dma_mode; int force_sf_dma_mode;
int force_thresh_dma_mode;
int riwt_off; int riwt_off;
void (*fix_mac_speed)(void *priv, unsigned int speed); void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr); void (*bus_setup)(void __iomem *ioaddr);
@@ -159,6 +160,8 @@ Where:
o pmt: core has the embedded power module (optional). o pmt: core has the embedded power module (optional).
o force_sf_dma_mode: force DMA to use the Store and Forward mode o force_sf_dma_mode: force DMA to use the Store and Forward mode
instead of the Threshold. instead of the Threshold.
o force_thresh_dma_mode: force DMA to use the Shreshold mode other than
the Store and Forward mode.
o riwt_off: force to disable the RX watchdog feature and switch to NAPI mode. o riwt_off: force to disable the RX watchdog feature and switch to NAPI mode.
o fix_mac_speed: this callback is used for modifying some syscfg registers o fix_mac_speed: this callback is used for modifying some syscfg registers
(on ST SoCs) according to the link speed negotiated by the (on ST SoCs) according to the link speed negotiated by the

View File

@@ -2,9 +2,8 @@ Transparent proxy support
========================= =========================
This feature adds Linux 2.2-like transparent proxy support to current kernels. This feature adds Linux 2.2-like transparent proxy support to current kernels.
To use it, enable NETFILTER_TPROXY, the socket match and the TPROXY target in To use it, enable the socket match and the TPROXY target in your kernel config.
your kernel config. You will need policy routing too, so be sure to enable that You will need policy routing too, so be sure to enable that as well.
as well.
1. Making non-local sockets work 1. Making non-local sockets work

View File

@@ -50,6 +50,19 @@ The maximum number of packets that kernel can handle on a NAPI interrupt,
it's a Per-CPU variable. it's a Per-CPU variable.
Default: 64 Default: 64
default_qdisc
--------------
The default queuing discipline to use for network devices. This allows
overriding the default queue discipline of pfifo_fast with an
alternative. Since the default queuing discipline is created with the
no additional parameters so is best suited to queuing disciplines that
work well without configuration like stochastic fair queue (sfq),
CoDel (codel) or fair queue CoDel (fq_codel). Don't use queuing disciplines
like Hierarchical Token Bucket or Deficit Round Robin which require setting
up classes and bandwidths.
Default: pfifo_fast
busy_read busy_read
---------------- ----------------
Low latency busy poll timeout for socket reads. (needs CONFIG_NET_RX_BUSY_POLL) Low latency busy poll timeout for socket reads. (needs CONFIG_NET_RX_BUSY_POLL)

View File

@@ -2108,7 +2108,8 @@ F: drivers/usb/chipidea/
CISCO VIC ETHERNET NIC DRIVER CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com> M: Christian Benvenuti <benve@cisco.com>
M: Roopa Prabhu <roprabhu@cisco.com> M: Sujith Sankar <ssujith@cisco.com>
M: Govindarajulu Varadarajan <govindarajulu90@gmail.com>
M: Neel Patel <neepatel@cisco.com> M: Neel Patel <neepatel@cisco.com>
M: Nishank Trivedi <nistrive@cisco.com> M: Nishank Trivedi <nistrive@cisco.com>
S: Supported S: Supported
@@ -4404,7 +4405,7 @@ F: drivers/net/wireless/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi) INTEL WIRELESS WIFI LINK (iwlwifi)
M: Johannes Berg <johannes.berg@intel.com> M: Johannes Berg <johannes.berg@intel.com>
M: Wey-Yi Guy <wey-yi.w.guy@intel.com> M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
M: Intel Linux Wireless <ilw@linux.intel.com> M: Intel Linux Wireless <ilw@linux.intel.com>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
W: http://intellinuxwireless.org W: http://intellinuxwireless.org
@@ -5831,7 +5832,7 @@ M: Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
M: Samuel Ortiz <sameo@linux.intel.com> M: Samuel Ortiz <sameo@linux.intel.com>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
L: linux-nfc@lists.01.org (moderated for non-subscribers) L: linux-nfc@lists.01.org (moderated for non-subscribers)
S: Maintained S: Supported
F: net/nfc/ F: net/nfc/
F: include/net/nfc/ F: include/net/nfc/
F: include/uapi/linux/nfc.h F: include/uapi/linux/nfc.h
@@ -7274,6 +7275,7 @@ W: http://lksctp.sourceforge.net
S: Maintained S: Maintained
F: Documentation/networking/sctp.txt F: Documentation/networking/sctp.txt
F: include/linux/sctp.h F: include/linux/sctp.h
F: include/uapi/linux/sctp.h
F: include/net/sctp/ F: include/net/sctp/
F: net/sctp/ F: net/sctp/
@@ -8022,6 +8024,12 @@ F: arch/m68k/sun3*/
F: arch/m68k/include/asm/sun3* F: arch/m68k/include/asm/sun3*
F: drivers/net/ethernet/i825xx/sun3* F: drivers/net/ethernet/i825xx/sun3*
SUNDANCE NETWORK DRIVER
M: Denis Kirjanov <kda@linux-powerpc.org>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/dlink/sundance.c
SUPERH SUPERH
M: Paul Mundt <lethal@linux-sh.org> M: Paul Mundt <lethal@linux-sh.org>
L: linux-sh@vger.kernel.org L: linux-sh@vger.kernel.org

View File

@@ -81,6 +81,14 @@
macb1: ethernet@f802c000 { macb1: ethernet@f802c000 {
phy-mode = "rmii"; phy-mode = "rmii";
#address-cells = <1>;
#size-cells = <0>;
phy0: ethernet-phy@1 {
interrupt-parent = <&pioE>;
interrupts = <30 IRQ_TYPE_EDGE_FALLING>;
reg = <1>;
};
}; };
pinctrl@fffff200 { pinctrl@fffff200 {

View File

@@ -73,9 +73,6 @@ static struct pxa2xx_spi_chip mcp251x_chip_info4 = {
static struct mcp251x_platform_data mcp251x_info = { static struct mcp251x_platform_data mcp251x_info = {
.oscillator_frequency = 16E6, .oscillator_frequency = 16E6,
.board_specific_setup = NULL,
.power_enable = NULL,
.transceiver_enable = NULL
}; };
static struct spi_board_info mcp251x_board_info[] = { static struct spi_board_info mcp251x_board_info[] = {

View File

@@ -29,6 +29,8 @@
#include <linux/i2c/pca953x.h> #include <linux/i2c/pca953x.h>
#include <linux/apm-emulation.h> #include <linux/apm-emulation.h>
#include <linux/can/platform/mcp251x.h> #include <linux/can/platform/mcp251x.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
#include <asm/suspend.h> #include <asm/suspend.h>
@@ -391,33 +393,34 @@ static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = {
}; };
/* CAN bus on SPI */ /* CAN bus on SPI */
static int zeus_mcp2515_setup(struct spi_device *sdev) static struct regulator_consumer_supply can_regulator_consumer =
{ REGULATOR_SUPPLY("vdd", "spi3.0");
int err;
err = gpio_request(ZEUS_CAN_SHDN_GPIO, "CAN shutdown"); static struct regulator_init_data can_regulator_init_data = {
if (err) .constraints = {
return err; .valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = &can_regulator_consumer,
.num_consumer_supplies = 1,
};
err = gpio_direction_output(ZEUS_CAN_SHDN_GPIO, 1); static struct fixed_voltage_config can_regulator_pdata = {
if (err) { .supply_name = "CAN_SHDN",
gpio_free(ZEUS_CAN_SHDN_GPIO); .microvolts = 3300000,
return err; .gpio = ZEUS_CAN_SHDN_GPIO,
} .init_data = &can_regulator_init_data,
};
return 0; static struct platform_device can_regulator_device = {
} .name = "reg-fixed-volage",
.id = -1,
static int zeus_mcp2515_transceiver_enable(int enable) .dev = {
{ .platform_data = &can_regulator_pdata,
gpio_set_value(ZEUS_CAN_SHDN_GPIO, !enable); },
return 0; };
}
static struct mcp251x_platform_data zeus_mcp2515_pdata = { static struct mcp251x_platform_data zeus_mcp2515_pdata = {
.oscillator_frequency = 16*1000*1000, .oscillator_frequency = 16*1000*1000,
.board_specific_setup = zeus_mcp2515_setup,
.power_enable = zeus_mcp2515_transceiver_enable,
}; };
static struct spi_board_info zeus_spi_board_info[] = { static struct spi_board_info zeus_spi_board_info[] = {
@@ -516,6 +519,7 @@ static struct platform_device *zeus_devices[] __initdata = {
&zeus_leds_device, &zeus_leds_device,
&zeus_pcmcia_device, &zeus_pcmcia_device,
&zeus_max6369_device, &zeus_max6369_device,
&can_regulator_device,
}; };
/* AC'97 */ /* AC'97 */

View File

@@ -358,7 +358,6 @@ static struct platform_device usbhsf_device = {
static struct sh_eth_plat_data sh_eth_platdata = { static struct sh_eth_plat_data sh_eth_platdata = {
.phy = 0x00, /* LAN8710A */ .phy = 0x00, /* LAN8710A */
.edmac_endian = EDMAC_LITTLE_ENDIAN, .edmac_endian = EDMAC_LITTLE_ENDIAN,
.register_type = SH_ETH_REG_GIGABIT,
.phy_interface = PHY_INTERFACE_MODE_MII, .phy_interface = PHY_INTERFACE_MODE_MII,
}; };

View File

@@ -91,7 +91,6 @@ static struct sh_mobile_sdhi_info sdhi0_info = {
static struct sh_eth_plat_data ether_platform_data __initdata = { static struct sh_eth_plat_data ether_platform_data __initdata = {
.phy = 0x01, .phy = 0x01,
.edmac_endian = EDMAC_LITTLE_ENDIAN, .edmac_endian = EDMAC_LITTLE_ENDIAN,
.register_type = SH_ETH_REG_FAST_RCAR,
.phy_interface = PHY_INTERFACE_MODE_RMII, .phy_interface = PHY_INTERFACE_MODE_RMII,
/* /*
* Although the LINK signal is available on the board, it's connected to * Although the LINK signal is available on the board, it's connected to

View File

@@ -80,7 +80,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh7763_eth_pdata = { static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 0, .phy = 0,
.edmac_endian = EDMAC_LITTLE_ENDIAN, .edmac_endian = EDMAC_LITTLE_ENDIAN,
.register_type = SH_ETH_REG_GIGABIT,
.phy_interface = PHY_INTERFACE_MODE_MII, .phy_interface = PHY_INTERFACE_MODE_MII,
}; };

View File

@@ -77,7 +77,6 @@ static struct resource sh_eth0_resources[] = {
static struct sh_eth_plat_data sh7757_eth0_pdata = { static struct sh_eth_plat_data sh7757_eth0_pdata = {
.phy = 1, .phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN, .edmac_endian = EDMAC_LITTLE_ENDIAN,
.register_type = SH_ETH_REG_FAST_SH4,
.set_mdio_gate = sh7757_eth_set_mdio_gate, .set_mdio_gate = sh7757_eth_set_mdio_gate,
}; };
@@ -106,7 +105,6 @@ static struct resource sh_eth1_resources[] = {
static struct sh_eth_plat_data sh7757_eth1_pdata = { static struct sh_eth_plat_data sh7757_eth1_pdata = {
.phy = 1, .phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN, .edmac_endian = EDMAC_LITTLE_ENDIAN,
.register_type = SH_ETH_REG_FAST_SH4,
.set_mdio_gate = sh7757_eth_set_mdio_gate, .set_mdio_gate = sh7757_eth_set_mdio_gate,
}; };
@@ -151,7 +149,6 @@ static struct resource sh_eth_giga0_resources[] = {
static struct sh_eth_plat_data sh7757_eth_giga0_pdata = { static struct sh_eth_plat_data sh7757_eth_giga0_pdata = {
.phy = 18, .phy = 18,
.edmac_endian = EDMAC_LITTLE_ENDIAN, .edmac_endian = EDMAC_LITTLE_ENDIAN,
.register_type = SH_ETH_REG_GIGABIT,
.set_mdio_gate = sh7757_eth_giga_set_mdio_gate, .set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
.phy_interface = PHY_INTERFACE_MODE_RGMII_ID, .phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
}; };
@@ -186,7 +183,6 @@ static struct resource sh_eth_giga1_resources[] = {
static struct sh_eth_plat_data sh7757_eth_giga1_pdata = { static struct sh_eth_plat_data sh7757_eth_giga1_pdata = {
.phy = 19, .phy = 19,
.edmac_endian = EDMAC_LITTLE_ENDIAN, .edmac_endian = EDMAC_LITTLE_ENDIAN,
.register_type = SH_ETH_REG_GIGABIT,
.set_mdio_gate = sh7757_eth_giga_set_mdio_gate, .set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
.phy_interface = PHY_INTERFACE_MODE_RGMII_ID, .phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
}; };

View File

@@ -159,7 +159,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh_eth_plat = { static struct sh_eth_plat_data sh_eth_plat = {
.phy = 0x1f, /* SMSC LAN8700 */ .phy = 0x1f, /* SMSC LAN8700 */
.edmac_endian = EDMAC_LITTLE_ENDIAN, .edmac_endian = EDMAC_LITTLE_ENDIAN,
.register_type = SH_ETH_REG_FAST_SH4,
.phy_interface = PHY_INTERFACE_MODE_MII, .phy_interface = PHY_INTERFACE_MODE_MII,
.ether_link_active_low = 1 .ether_link_active_low = 1
}; };

View File

@@ -365,7 +365,7 @@ static struct platform_device keysc_device = {
static struct resource sh_eth_resources[] = { static struct resource sh_eth_resources[] = {
[0] = { [0] = {
.start = SH_ETH_ADDR, .start = SH_ETH_ADDR,
.end = SH_ETH_ADDR + 0x1FC, .end = SH_ETH_ADDR + 0x1FC - 1,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
@@ -377,6 +377,7 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh_eth_plat = { static struct sh_eth_plat_data sh_eth_plat = {
.phy = 0x1f, /* SMSC LAN8187 */ .phy = 0x1f, /* SMSC LAN8187 */
.edmac_endian = EDMAC_LITTLE_ENDIAN, .edmac_endian = EDMAC_LITTLE_ENDIAN,
.phy_interface = PHY_INTERFACE_MODE_MII,
}; };
static struct platform_device sh_eth_device = { static struct platform_device sh_eth_device = {

View File

@@ -88,7 +88,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh7763_eth_pdata = { static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 1, .phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN, .edmac_endian = EDMAC_LITTLE_ENDIAN,
.register_type = SH_ETH_REG_GIGABIT,
.phy_interface = PHY_INTERFACE_MODE_MII, .phy_interface = PHY_INTERFACE_MODE_MII,
}; };

View File

@@ -12,6 +12,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/serial.h> #include <linux/serial.h>
#include <linux/serial_sci.h> #include <linux/serial_sci.h>
#include <linux/sh_eth.h>
#include <linux/sh_timer.h> #include <linux/sh_timer.h>
#include <linux/io.h> #include <linux/io.h>
@@ -110,10 +111,16 @@ static struct platform_device scif2_device = {
}, },
}; };
static struct sh_eth_plat_data eth_platform_data = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct resource eth_resources[] = { static struct resource eth_resources[] = {
[0] = { [0] = {
.start = 0xfb000000, .start = 0xfb000000,
.end = 0xfb0001c8, .end = 0xfb0001c7,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
@@ -127,7 +134,7 @@ static struct platform_device eth_device = {
.name = "sh7619-ether", .name = "sh7619-ether",
.id = -1, .id = -1,
.dev = { .dev = {
.platform_data = (void *)1, .platform_data = &eth_platform_data,
}, },
.num_resources = ARRAY_SIZE(eth_resources), .num_resources = ARRAY_SIZE(eth_resources),
.resource = eth_resources, .resource = eth_resources,

View File

@@ -387,6 +387,27 @@ int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac)
EXPORT_SYMBOL(gxio_mpipe_link_close_aux); EXPORT_SYMBOL(gxio_mpipe_link_close_aux);
struct link_set_attr_aux_param {
int mac;
uint32_t attr;
int64_t val;
};
int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac,
uint32_t attr, int64_t val)
{
struct link_set_attr_aux_param temp;
struct link_set_attr_aux_param *params = &temp;
params->mac = mac;
params->attr = attr;
params->val = val;
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
sizeof(*params), GXIO_MPIPE_OP_LINK_SET_ATTR_AUX);
}
EXPORT_SYMBOL(gxio_mpipe_link_set_attr_aux);
struct get_timestamp_aux_param { struct get_timestamp_aux_param {
uint64_t sec; uint64_t sec;
@@ -454,6 +475,51 @@ int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux); EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux);
struct adjust_timestamp_freq_param {
int32_t ppb;
};
int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
int32_t ppb)
{
struct adjust_timestamp_freq_param temp;
struct adjust_timestamp_freq_param *params = &temp;
params->ppb = ppb;
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
sizeof(*params),
GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ);
}
EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq);
struct config_edma_ring_blks_param {
unsigned int ering;
unsigned int max_blks;
unsigned int min_snf_blks;
unsigned int db;
};
int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context,
unsigned int ering, unsigned int max_blks,
unsigned int min_snf_blks, unsigned int db)
{
struct config_edma_ring_blks_param temp;
struct config_edma_ring_blks_param *params = &temp;
params->ering = ering;
params->max_blks = max_blks;
params->min_snf_blks = min_snf_blks;
params->db = db;
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
sizeof(*params),
GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS);
}
EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks);
struct arm_pollfd_param { struct arm_pollfd_param {
union iorpc_pollfd pollfd; union iorpc_pollfd pollfd;
}; };

View File

@@ -16,6 +16,24 @@
#include "gxio/iorpc_mpipe_info.h" #include "gxio/iorpc_mpipe_info.h"
struct instance_aux_param {
_gxio_mpipe_link_name_t name;
};
int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
_gxio_mpipe_link_name_t name)
{
struct instance_aux_param temp;
struct instance_aux_param *params = &temp;
params->name = name;
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
sizeof(*params), GXIO_MPIPE_INFO_OP_INSTANCE_AUX);
}
EXPORT_SYMBOL(gxio_mpipe_info_instance_aux);
struct enumerate_aux_param { struct enumerate_aux_param {
_gxio_mpipe_link_name_t name; _gxio_mpipe_link_name_t name;
_gxio_mpipe_link_mac_t mac; _gxio_mpipe_link_mac_t mac;

View File

@@ -36,8 +36,14 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
int fd; int fd;
int i; int i;
if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
return -EINVAL;
snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index); snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
fd = hv_dev_open((HV_VirtAddr) file, 0); fd = hv_dev_open((HV_VirtAddr) file, 0);
context->fd = fd;
if (fd < 0) { if (fd < 0) {
if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX) if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
return fd; return fd;
@@ -45,8 +51,6 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
return -ENODEV; return -ENODEV;
} }
context->fd = fd;
/* Map in the MMIO space. */ /* Map in the MMIO space. */
context->mmio_cfg_base = (void __force *) context->mmio_cfg_base = (void __force *)
iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET, iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
@@ -64,12 +68,15 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
context->__stacks.stacks[i] = 255; context->__stacks.stacks[i] = 255;
context->instance = mpipe_index;
return 0; return 0;
fast_failed: fast_failed:
iounmap((void __force __iomem *)(context->mmio_cfg_base)); iounmap((void __force __iomem *)(context->mmio_cfg_base));
cfg_failed: cfg_failed:
hv_dev_close(context->fd); hv_dev_close(context->fd);
context->fd = -1;
return -ENODEV; return -ENODEV;
} }
@@ -383,7 +390,7 @@ EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
gxio_mpipe_context_t *context, gxio_mpipe_context_t *context,
unsigned int edma_ring_id, unsigned int ering,
unsigned int channel, unsigned int channel,
void *mem, unsigned int mem_size, void *mem, unsigned int mem_size,
unsigned int mem_flags) unsigned int mem_flags)
@@ -394,7 +401,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
/* Offset used to read number of completed commands. */ /* Offset used to read number of completed commands. */
MPIPE_EDMA_POST_REGION_ADDR_t offset; MPIPE_EDMA_POST_REGION_ADDR_t offset;
int result = gxio_mpipe_init_edma_ring(context, edma_ring_id, channel, int result = gxio_mpipe_init_edma_ring(context, ering, channel,
mem, mem_size, mem_flags); mem, mem_size, mem_flags);
if (result < 0) if (result < 0)
return result; return result;
@@ -405,7 +412,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
offset.region = offset.region =
MPIPE_MMIO_ADDR__REGION_VAL_EDMA - MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
MPIPE_MMIO_ADDR__REGION_VAL_IDMA; MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
offset.ring = edma_ring_id; offset.ring = ering;
__gxio_dma_queue_init(&equeue->dma_queue, __gxio_dma_queue_init(&equeue->dma_queue,
context->mmio_fast_base + offset.word, context->mmio_fast_base + offset.word,
@@ -413,6 +420,9 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
equeue->edescs = mem; equeue->edescs = mem;
equeue->mask_num_entries = num_entries - 1; equeue->mask_num_entries = num_entries - 1;
equeue->log2_num_entries = __builtin_ctz(num_entries); equeue->log2_num_entries = __builtin_ctz(num_entries);
equeue->context = context;
equeue->ering = ering;
equeue->channel = channel;
return 0; return 0;
} }
@@ -493,6 +503,20 @@ static gxio_mpipe_context_t *_gxio_get_link_context(void)
return contextp; return contextp;
} }
int gxio_mpipe_link_instance(const char *link_name)
{
_gxio_mpipe_link_name_t name;
gxio_mpipe_context_t *context = _gxio_get_link_context();
if (!context)
return GXIO_ERR_NO_DEVICE;
strncpy(name.name, link_name, sizeof(name.name));
name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
return gxio_mpipe_info_instance_aux(context, name);
}
int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
{ {
int rv; int rv;
@@ -543,3 +567,12 @@ int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
} }
EXPORT_SYMBOL_GPL(gxio_mpipe_link_close); EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
int64_t val)
{
return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr,
val);
}
EXPORT_SYMBOL_GPL(gxio_mpipe_link_set_attr);

View File

@@ -44,10 +44,13 @@
#define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210) #define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210)
#define GXIO_MPIPE_OP_LINK_OPEN_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211) #define GXIO_MPIPE_OP_LINK_OPEN_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211)
#define GXIO_MPIPE_OP_LINK_CLOSE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212) #define GXIO_MPIPE_OP_LINK_CLOSE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212)
#define GXIO_MPIPE_OP_LINK_SET_ATTR_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1213)
#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121e) #define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121e)
#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121f) #define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121f)
#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1220) #define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1220)
#define GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1221)
#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1222)
#define GXIO_MPIPE_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000) #define GXIO_MPIPE_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000)
#define GXIO_MPIPE_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001) #define GXIO_MPIPE_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001)
#define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) #define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
@@ -114,6 +117,8 @@ int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context,
int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac); int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac);
int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac,
uint32_t attr, int64_t val);
int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec,
uint64_t * nsec, uint64_t * cycles); uint64_t * nsec, uint64_t * cycles);
@@ -124,6 +129,9 @@ int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec,
int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
int64_t nsec); int64_t nsec);
int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
int32_t ppb);
int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);

View File

@@ -27,11 +27,15 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#define GXIO_MPIPE_INFO_OP_INSTANCE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1250)
#define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251) #define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251)
#define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) #define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) #define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
_gxio_mpipe_link_name_t name);
int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
unsigned int idx, unsigned int idx,
_gxio_mpipe_link_name_t * name, _gxio_mpipe_link_name_t * name,

View File

@@ -220,6 +220,13 @@ typedef MPIPE_PDESC_t gxio_mpipe_idesc_t;
*/ */
typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t; typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t;
/*
* Max # of mpipe instances. 2 currently.
*/
#define GXIO_MPIPE_INSTANCE_MAX HV_MPIPE_INSTANCE_MAX
#define NR_MPIPE_MAX GXIO_MPIPE_INSTANCE_MAX
/* Get the "va" field from an "idesc". /* Get the "va" field from an "idesc".
* *
* This is the address at which the ingress hardware copied the first * This is the address at which the ingress hardware copied the first
@@ -311,6 +318,9 @@ typedef struct {
/* File descriptor for calling up to Linux (and thus the HV). */ /* File descriptor for calling up to Linux (and thus the HV). */
int fd; int fd;
/* Corresponding mpipe instance #. */
int instance;
/* The VA at which configuration registers are mapped. */ /* The VA at which configuration registers are mapped. */
char *mmio_cfg_base; char *mmio_cfg_base;
@@ -810,7 +820,7 @@ extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
/* Initialize an eDMA ring, using the given memory and size. /* Initialize an eDMA ring, using the given memory and size.
* *
* @param context An initialized mPIPE context. * @param context An initialized mPIPE context.
* @param ring The eDMA ring index. * @param ering The eDMA ring index.
* @param channel The channel to use. This must be one of the channels * @param channel The channel to use. This must be one of the channels
* associated with the context's set of open links. * associated with the context's set of open links.
* @param mem A physically contiguous region of memory to be filled * @param mem A physically contiguous region of memory to be filled
@@ -823,10 +833,37 @@ extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
* ::GXIO_ERR_INVAL_MEMORY_SIZE on failure. * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
*/ */
extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context, extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
unsigned int ring, unsigned int channel, unsigned int ering, unsigned int channel,
void *mem, size_t mem_size, void *mem, size_t mem_size,
unsigned int mem_flags); unsigned int mem_flags);
/* Set the "max_blks", "min_snf_blks", and "db" fields of
* ::MPIPE_EDMA_RG_INIT_DAT_THRESH_t for a given edma ring.
*
* The global pool of dynamic blocks will be automatically adjusted.
*
* This function should not be called after any egress has been done
* on the edma ring.
*
* Most applications should just use gxio_mpipe_equeue_set_snf_size().
*
* @param context An initialized mPIPE context.
* @param ering The eDMA ring index.
* @param max_blks The number of blocks to dedicate to the ring
* (normally min_snf_blks + 1). Must be greater than min_snf_blocks.
* @param min_snf_blks The number of blocks which must be stored
* prior to starting to send the packet (normally 12).
* @param db Whether to allow use of dynamic blocks by the ring
* (normally 1).
*
* @return 0 on success, negative on error.
*/
extern int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context,
unsigned int ering,
unsigned int max_blks,
unsigned int min_snf_blks,
unsigned int db);
/***************************************************************** /*****************************************************************
* Classifier Program * * Classifier Program *
******************************************************************/ ******************************************************************/
@@ -1288,15 +1325,39 @@ typedef struct {
/* The log2() of the number of entries. */ /* The log2() of the number of entries. */
unsigned long log2_num_entries; unsigned long log2_num_entries;
/* The context. */
gxio_mpipe_context_t *context;
/* The ering. */
unsigned int ering;
/* The channel. */
unsigned int channel;
} gxio_mpipe_equeue_t; } gxio_mpipe_equeue_t;
/* Initialize an "equeue". /* Initialize an "equeue".
* *
* Takes the equeue plus the same args as gxio_mpipe_init_edma_ring(). * This function uses gxio_mpipe_init_edma_ring() to initialize the
* underlying edma_ring using the provided arguments.
*
* @param equeue An egress queue to be initialized.
* @param context An initialized mPIPE context.
* @param ering The eDMA ring index.
* @param channel The channel to use. This must be one of the channels
* associated with the context's set of open links.
* @param mem A physically contiguous region of memory to be filled
* with a ring of ::gxio_mpipe_edesc_t structures.
* @param mem_size Number of bytes in the ring. Must be 512, 2048,
* 8192 or 65536, times 16 (i.e. sizeof(gxio_mpipe_edesc_t)).
* @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
*
* @return 0 on success, ::GXIO_MPIPE_ERR_BAD_EDMA_RING or
* ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
*/ */
extern int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, extern int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
gxio_mpipe_context_t *context, gxio_mpipe_context_t *context,
unsigned int edma_ring_id, unsigned int ering,
unsigned int channel, unsigned int channel,
void *mem, unsigned int mem_size, void *mem, unsigned int mem_size,
unsigned int mem_flags); unsigned int mem_flags);
@@ -1494,6 +1555,37 @@ static inline int gxio_mpipe_equeue_is_complete(gxio_mpipe_equeue_t *equeue,
completion_slot, update); completion_slot, update);
} }
/* Set the snf (store and forward) size for an equeue.
*
* The snf size for an equeue defaults to 1536, and encodes the size
* of the largest packet for which egress is guaranteed to avoid
* transmission underruns and/or corrupt checksums under heavy load.
*
* The snf size affects a global resource pool which cannot support,
* for example, all 24 equeues each requesting an snf size of 8K.
*
* To ensure that jumbo packets can be egressed properly, the snf size
* should be set to the size of the largest possible packet, which
* will usually be limited by the size of the app's largest buffer.
*
* This is a convenience wrapper around
* gxio_mpipe_config_edma_ring_blks().
*
* This function should not be called after any egress has been done
* on the equeue.
*
* @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
* @param size The snf size, in bytes.
* @return Zero on success, negative error otherwise.
*/
static inline int gxio_mpipe_equeue_set_snf_size(gxio_mpipe_equeue_t *equeue,
size_t size)
{
int blks = (size + 127) / 128;
return gxio_mpipe_config_edma_ring_blks(equeue->context, equeue->ering,
blks + 1, blks, 1);
}
/***************************************************************** /*****************************************************************
* Link Management * * Link Management *
******************************************************************/ ******************************************************************/
@@ -1634,6 +1726,24 @@ typedef struct {
uint8_t mac; uint8_t mac;
} gxio_mpipe_link_t; } gxio_mpipe_link_t;
/* Translate a link name to the instance number of the mPIPE shim which is
* connected to that link. This call does not verify whether the link is
* currently available, and does not reserve any link resources;
* gxio_mpipe_link_open() must be called to perform those functions.
*
* Typically applications will call this function to translate a link name
* to an mPIPE instance number; call gxio_mpipe_init(), passing it that
* instance number, to initialize the mPIPE shim; and then call
* gxio_mpipe_link_open(), passing it the same link name plus the mPIPE
* context, to configure the link.
*
* @param link_name Name of the link; see @ref gxio_mpipe_link_names.
* @return The mPIPE instance number which is associated with the named
* link, or a negative error code (::GXIO_ERR_NO_DEVICE) if the link does
* not exist.
*/
extern int gxio_mpipe_link_instance(const char *link_name);
/* Retrieve one of this system's legal link names, and its MAC address. /* Retrieve one of this system's legal link names, and its MAC address.
* *
* @param index Link name index. If a system supports N legal link names, * @param index Link name index. If a system supports N legal link names,
@@ -1697,6 +1807,17 @@ static inline int gxio_mpipe_link_channel(gxio_mpipe_link_t *link)
return link->channel; return link->channel;
} }
/* Set a link attribute.
*
* @param link A properly initialized link state object.
* @param attr An attribute from the set of @ref gxio_mpipe_link_attrs.
* @param val New value of the attribute.
* @return 0 if the attribute was successfully set, or a negative error
* code.
*/
extern int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
int64_t val);
/////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////
// Timestamp // // Timestamp //
/////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////
@@ -1733,4 +1854,18 @@ extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context,
int64_t delta); int64_t delta);
/** Adjust the mPIPE timestamp clock frequency.
*
* @param context An initialized mPIPE context.
* @param ppb A 32-bit signed PPB (Parts Per Billion) value to adjust.
* The absolute value of ppb must be less than or equal to 1000000000.
* Values less than about 30000 will generally cause a GXIO_ERR_INVAL
* return due to the granularity of the hardware that converts reference
* clock cycles into seconds and nanoseconds.
* @return If the call was successful, zero; otherwise, a negative error
* code.
*/
extern int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t* context,
int32_t ppb);
#endif /* !_GXIO_MPIPE_H_ */ #endif /* !_GXIO_MPIPE_H_ */

View File

@@ -23,6 +23,9 @@
#include <arch/mpipe_constants.h> #include <arch/mpipe_constants.h>
/** Number of mPIPE instances supported */
#define HV_MPIPE_INSTANCE_MAX (2)
/** Number of buffer stacks (32). */ /** Number of buffer stacks (32). */
#define HV_MPIPE_NUM_BUFFER_STACKS \ #define HV_MPIPE_NUM_BUFFER_STACKS \
(MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH) (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH)

View File

@@ -1088,15 +1088,8 @@ static int he_start(struct atm_dev *dev)
for (i = 0; i < 6; ++i) for (i = 0; i < 6; ++i)
dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i); dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
hprintk("%s%s, %x:%x:%x:%x:%x:%x\n", hprintk("%s%s, %pM\n", he_dev->prod_id,
he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
he_dev->media & 0x40 ? "SM" : "MM",
dev->esi[0],
dev->esi[1],
dev->esi[2],
dev->esi[3],
dev->esi[4],
dev->esi[5]);
he_dev->atm_dev->link_rate = he_is622(he_dev) ? he_dev->atm_dev->link_rate = he_is622(he_dev) ?
ATM_OC12_PCR : ATM_OC3_PCR; ATM_OC12_PCR : ATM_OC3_PCR;

View File

@@ -153,7 +153,6 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
static void which_list(ns_dev * card, struct sk_buff *skb); static void which_list(ns_dev * card, struct sk_buff *skb);
#endif #endif
static void ns_poll(unsigned long arg); static void ns_poll(unsigned long arg);
static int ns_parse_mac(char *mac, unsigned char *esi);
static void ns_phy_put(struct atm_dev *dev, unsigned char value, static void ns_phy_put(struct atm_dev *dev, unsigned char value,
unsigned long addr); unsigned long addr);
static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
@@ -779,7 +778,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
return error; return error;
} }
if (ns_parse_mac(mac[i], card->atmdev->esi)) { if (mac[i] == NULL || mac_pton(mac[i], card->atmdev->esi)) {
nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
card->atmdev->esi, 6); card->atmdev->esi, 6);
if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
@@ -2802,29 +2801,6 @@ static void ns_poll(unsigned long arg)
PRINTK("nicstar: Leaving ns_poll().\n"); PRINTK("nicstar: Leaving ns_poll().\n");
} }
static int ns_parse_mac(char *mac, unsigned char *esi)
{
int i, j;
short byte1, byte0;
if (mac == NULL || esi == NULL)
return -1;
j = 0;
for (i = 0; i < 6; i++) {
if ((byte1 = hex_to_bin(mac[j++])) < 0)
return -1;
if ((byte0 = hex_to_bin(mac[j++])) < 0)
return -1;
esi[i] = (unsigned char)(byte1 * 16 + byte0);
if (i < 5) {
if (mac[j++] != ':')
return -1;
}
}
return 0;
}
static void ns_phy_put(struct atm_dev *dev, unsigned char value, static void ns_phy_put(struct atm_dev *dev, unsigned char value,
unsigned long addr) unsigned long addr)
{ {

View File

@@ -35,8 +35,14 @@ config BCMA_DRIVER_PCI_HOSTMODE
PCI core hostmode operation (external PCI bus). PCI core hostmode operation (external PCI bus).
config BCMA_HOST_SOC config BCMA_HOST_SOC
bool bool "Support for BCMA in a SoC"
depends on BCMA_DRIVER_MIPS depends on BCMA
help
Host interface for a Broadcom AIX bus directly mapped into
the memory. This only works with the Broadcom SoCs from the
BCM47XX line.
If unsure, say N
config BCMA_DRIVER_MIPS config BCMA_DRIVER_MIPS
bool "BCMA Broadcom MIPS core driver" bool "BCMA Broadcom MIPS core driver"

View File

@@ -31,7 +31,7 @@ static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data); pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
} }
static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy) static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy)
{ {
u32 v; u32 v;
int i; int i;
@@ -55,7 +55,7 @@ static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
} }
} }
static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address) static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
{ {
int max_retries = 10; int max_retries = 10;
u16 ret = 0; u16 ret = 0;
@@ -98,7 +98,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
return ret; return ret;
} }
static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device, static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
u8 address, u16 data) u8 address, u16 data)
{ {
int max_retries = 10; int max_retries = 10;
@@ -137,6 +137,13 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0); pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
} }
static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
u8 address, u16 data)
{
bcma_pcie_mdio_write(pc, device, address, data);
return bcma_pcie_mdio_read(pc, device, address);
}
/************************************************** /**************************************************
* Workarounds. * Workarounds.
**************************************************/ **************************************************/
@@ -203,6 +210,25 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
} }
} }
static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up)
{
u16 data;
if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
data = up ? 0x74 : 0x7C;
bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
} else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
data = up ? 0x75 : 0x7D;
bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
}
}
/************************************************** /**************************************************
* Init. * Init.
**************************************************/ **************************************************/
@@ -262,7 +288,7 @@ out:
} }
EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl); EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend) static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
{ {
u32 w; u32 w;
@@ -274,4 +300,33 @@ void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w); bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG); bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
} }
EXPORT_SYMBOL_GPL(bcma_core_pci_extend_L1timer);
void bcma_core_pci_up(struct bcma_bus *bus)
{
struct bcma_drv_pci *pc;
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
return;
pc = &bus->drv_pci[0];
bcma_core_pci_power_save(pc, true);
bcma_core_pci_extend_L1timer(pc, true);
}
EXPORT_SYMBOL_GPL(bcma_core_pci_up);
void bcma_core_pci_down(struct bcma_bus *bus)
{
struct bcma_drv_pci *pc;
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
return;
pc = &bus->drv_pci[0];
bcma_core_pci_extend_L1timer(pc, false);
bcma_core_pci_power_save(pc, false);
}
EXPORT_SYMBOL_GPL(bcma_core_pci_down);

View File

@@ -581,6 +581,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
int bcma_core_pci_plat_dev_init(struct pci_dev *dev) int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
{ {
struct bcma_drv_pci_host *pc_host; struct bcma_drv_pci_host *pc_host;
int readrq;
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) { if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
/* This is not a device on the PCI-core bridge. */ /* This is not a device on the PCI-core bridge. */
@@ -595,6 +596,11 @@ int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
dev->irq = bcma_core_irq(pc_host->pdev->core); dev->irq = bcma_core_irq(pc_host->pdev->core);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
readrq = pcie_get_readrq(dev);
if (readrq > 128) {
pr_info("change PCIe max read request size from %i to 128\n", readrq);
pcie_set_readrq(dev, 128);
}
return 0; return 0;
} }
EXPORT_SYMBOL(bcma_core_pci_plat_dev_init); EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);

View File

@@ -237,7 +237,7 @@ int bcma_bus_register(struct bcma_bus *bus)
err = bcma_bus_scan(bus); err = bcma_bus_scan(bus);
if (err) { if (err) {
bcma_err(bus, "Failed to scan: %d\n", err); bcma_err(bus, "Failed to scan: %d\n", err);
return -1; return err;
} }
/* Early init CC core */ /* Early init CC core */

View File

@@ -32,6 +32,18 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = {
{ BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" }, { BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
{ BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" }, { BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
{ BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" }, { BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
{ BCMA_CORE_PCIEG2, "PCIe Gen 2" },
{ BCMA_CORE_DMA, "DMA" },
{ BCMA_CORE_SDIO3, "SDIO3" },
{ BCMA_CORE_USB20, "USB 2.0" },
{ BCMA_CORE_USB30, "USB 3.0" },
{ BCMA_CORE_A9JTAG, "ARM Cortex A9 JTAG" },
{ BCMA_CORE_DDR23, "Denali DDR2/DDR3 memory controller" },
{ BCMA_CORE_ROM, "ROM" },
{ BCMA_CORE_NAND, "NAND flash controller" },
{ BCMA_CORE_QSPI, "SPI flash controller" },
{ BCMA_CORE_CHIPCOMMON_B, "Chipcommon B" },
{ BCMA_CORE_ARMCA9, "ARM Cortex A9 core (ihost)" },
{ BCMA_CORE_AMEMC, "AMEMC (DDR)" }, { BCMA_CORE_AMEMC, "AMEMC (DDR)" },
{ BCMA_CORE_ALTA, "ALTA (I2S)" }, { BCMA_CORE_ALTA, "ALTA (I2S)" },
{ BCMA_CORE_INVALID, "Invalid" }, { BCMA_CORE_INVALID, "Invalid" },
@@ -201,7 +213,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
return ent; return ent;
} }
static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr, static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
u32 type, u8 port) u32 type, u8 port)
{ {
u32 addrl, addrh, sizel, sizeh = 0; u32 addrl, addrh, sizel, sizeh = 0;
@@ -213,7 +225,7 @@ static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
((ent & SCAN_ADDR_TYPE) != type) || ((ent & SCAN_ADDR_TYPE) != type) ||
(((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) { (((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) {
bcma_erom_push_ent(eromptr); bcma_erom_push_ent(eromptr);
return -EINVAL; return (u32)-EINVAL;
} }
addrl = ent & SCAN_ADDR_ADDR; addrl = ent & SCAN_ADDR_ADDR;
@@ -261,7 +273,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
struct bcma_device_id *match, int core_num, struct bcma_device_id *match, int core_num,
struct bcma_device *core) struct bcma_device *core)
{ {
s32 tmp; u32 tmp;
u8 i, j; u8 i, j;
s32 cia, cib; s32 cia, cib;
u8 ports[2], wrappers[2]; u8 ports[2], wrappers[2];
@@ -339,11 +351,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
* the main register space for the core * the main register space for the core
*/ */
tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0); tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
if (tmp <= 0) { if (tmp == 0 || IS_ERR_VALUE(tmp)) {
/* Try again to see if it is a bridge */ /* Try again to see if it is a bridge */
tmp = bcma_erom_get_addr_desc(bus, eromptr, tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_BRIDGE, 0); SCAN_ADDR_TYPE_BRIDGE, 0);
if (tmp <= 0) { if (tmp == 0 || IS_ERR_VALUE(tmp)) {
return -EILSEQ; return -EILSEQ;
} else { } else {
bcma_info(bus, "Bridge found\n"); bcma_info(bus, "Bridge found\n");
@@ -357,7 +369,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
for (j = 0; ; j++) { for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr, tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_SLAVE, i); SCAN_ADDR_TYPE_SLAVE, i);
if (tmp < 0) { if (IS_ERR_VALUE(tmp)) {
/* no more entries for port _i_ */ /* no more entries for port _i_ */
/* pr_debug("erom: slave port %d " /* pr_debug("erom: slave port %d "
* "has %d descriptors\n", i, j); */ * "has %d descriptors\n", i, j); */
@@ -374,7 +386,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
for (j = 0; ; j++) { for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr, tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_MWRAP, i); SCAN_ADDR_TYPE_MWRAP, i);
if (tmp < 0) { if (IS_ERR_VALUE(tmp)) {
/* no more entries for port _i_ */ /* no more entries for port _i_ */
/* pr_debug("erom: master wrapper %d " /* pr_debug("erom: master wrapper %d "
* "has %d descriptors\n", i, j); */ * "has %d descriptors\n", i, j); */
@@ -392,7 +404,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
for (j = 0; ; j++) { for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr, tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_SWRAP, i + hack); SCAN_ADDR_TYPE_SWRAP, i + hack);
if (tmp < 0) { if (IS_ERR_VALUE(tmp)) {
/* no more entries for port _i_ */ /* no more entries for port _i_ */
/* pr_debug("erom: master wrapper %d " /* pr_debug("erom: master wrapper %d "
* has %d descriptors\n", i, j); */ * has %d descriptors\n", i, j); */

View File

@@ -43,7 +43,7 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT; return -EFAULT;
ret = strict_strtol(buf, 10, &result); ret = kstrtol(buf, 10, &result);
if (ret) if (ret)
return ret; return ret;
@@ -89,7 +89,7 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT; return -EFAULT;
ret = strict_strtol(buf, 10, &result); ret = kstrtol(buf, 10, &result);
if (ret) if (ret)
return ret; return ret;
@@ -135,7 +135,7 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT; return -EFAULT;
ret = strict_strtol(buf, 10, &result); ret = kstrtol(buf, 10, &result);
if (ret) if (ret)
return ret; return ret;

View File

@@ -486,7 +486,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
if (firmwarelen - offset < txlen) if (firmwarelen - offset < txlen)
txlen = firmwarelen - offset; txlen = firmwarelen - offset;
tx_blocks = (txlen + blksz_dl - 1) / blksz_dl; tx_blocks = DIV_ROUND_UP(txlen, blksz_dl);
memcpy(fwbuf, &firmware[offset], txlen); memcpy(fwbuf, &firmware[offset], txlen);
} }
@@ -873,7 +873,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
} }
blksz = SDIO_BLOCK_SIZE; blksz = SDIO_BLOCK_SIZE;
buf_block_len = (nb + blksz - 1) / blksz; buf_block_len = DIV_ROUND_UP(nb, blksz);
sdio_claim_host(card->func); sdio_claim_host(card->func);

View File

@@ -1307,11 +1307,11 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
} }
if (fifo2 & 2) { if (fifo2 & 2) {
hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2; hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS + hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS |
HFCPCI_INTS_B2REC); HFCPCI_INTS_B2REC);
} else { } else {
hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1; hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS + hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS |
HFCPCI_INTS_B1REC); HFCPCI_INTS_B1REC);
} }
#ifdef REVERSE_BITORDER #ifdef REVERSE_BITORDER
@@ -1346,14 +1346,14 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
if (fifo2 & 2) { if (fifo2 & 2) {
hc->hw.fifo_en |= HFCPCI_FIFOEN_B2; hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
if (!tics) if (!tics)
hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS + hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
HFCPCI_INTS_B2REC); HFCPCI_INTS_B2REC);
hc->hw.ctmt |= 2; hc->hw.ctmt |= 2;
hc->hw.conn &= ~0x18; hc->hw.conn &= ~0x18;
} else { } else {
hc->hw.fifo_en |= HFCPCI_FIFOEN_B1; hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
if (!tics) if (!tics)
hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS + hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
HFCPCI_INTS_B1REC); HFCPCI_INTS_B1REC);
hc->hw.ctmt |= 1; hc->hw.ctmt |= 1;
hc->hw.conn &= ~0x03; hc->hw.conn &= ~0x03;
@@ -1375,14 +1375,14 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
if (fifo2 & 2) { if (fifo2 & 2) {
hc->hw.last_bfifo_cnt[1] = 0; hc->hw.last_bfifo_cnt[1] = 0;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B2; hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS + hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
HFCPCI_INTS_B2REC); HFCPCI_INTS_B2REC);
hc->hw.ctmt &= ~2; hc->hw.ctmt &= ~2;
hc->hw.conn &= ~0x18; hc->hw.conn &= ~0x18;
} else { } else {
hc->hw.last_bfifo_cnt[0] = 0; hc->hw.last_bfifo_cnt[0] = 0;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B1; hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS + hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
HFCPCI_INTS_B1REC); HFCPCI_INTS_B1REC);
hc->hw.ctmt &= ~1; hc->hw.ctmt &= ~1;
hc->hw.conn &= ~0x03; hc->hw.conn &= ~0x03;

View File

@@ -143,10 +143,9 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
*/ */
static inline struct port *__get_first_port(struct bonding *bond) static inline struct port *__get_first_port(struct bonding *bond)
{ {
if (bond->slave_cnt == 0) struct slave *first_slave = bond_first_slave(bond);
return NULL;
return &(SLAVE_AD_INFO(bond->first_slave).port); return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
} }
/** /**
@@ -159,13 +158,16 @@ static inline struct port *__get_first_port(struct bonding *bond)
static inline struct port *__get_next_port(struct port *port) static inline struct port *__get_next_port(struct port *port)
{ {
struct bonding *bond = __get_bond_by_port(port); struct bonding *bond = __get_bond_by_port(port);
struct slave *slave = port->slave; struct slave *slave = port->slave, *slave_next;
// If there's no bond for this port, or this is the last slave // If there's no bond for this port, or this is the last slave
if ((bond == NULL) || (slave->next == bond->first_slave)) if (bond == NULL)
return NULL;
slave_next = bond_next_slave(bond, slave);
if (!slave_next || bond_is_first_slave(bond, slave_next))
return NULL; return NULL;
return &(SLAVE_AD_INFO(slave->next).port); return &(SLAVE_AD_INFO(slave_next).port);
} }
/** /**
@@ -178,12 +180,14 @@ static inline struct port *__get_next_port(struct port *port)
static inline struct aggregator *__get_first_agg(struct port *port) static inline struct aggregator *__get_first_agg(struct port *port)
{ {
struct bonding *bond = __get_bond_by_port(port); struct bonding *bond = __get_bond_by_port(port);
struct slave *first_slave;
// If there's no bond for this port, or bond has no slaves // If there's no bond for this port, or bond has no slaves
if ((bond == NULL) || (bond->slave_cnt == 0)) if (bond == NULL)
return NULL; return NULL;
first_slave = bond_first_slave(bond);
return &(SLAVE_AD_INFO(bond->first_slave).aggregator); return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
} }
/** /**
@@ -195,14 +199,17 @@ static inline struct aggregator *__get_first_agg(struct port *port)
*/ */
static inline struct aggregator *__get_next_agg(struct aggregator *aggregator) static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
{ {
struct slave *slave = aggregator->slave; struct slave *slave = aggregator->slave, *slave_next;
struct bonding *bond = bond_get_bond_by_slave(slave); struct bonding *bond = bond_get_bond_by_slave(slave);
// If there's no bond for this aggregator, or this is the last slave // If there's no bond for this aggregator, or this is the last slave
if ((bond == NULL) || (slave->next == bond->first_slave)) if (bond == NULL)
return NULL;
slave_next = bond_next_slave(bond, slave);
if (!slave_next || bond_is_first_slave(bond, slave_next))
return NULL; return NULL;
return &(SLAVE_AD_INFO(slave->next).aggregator); return &(SLAVE_AD_INFO(slave_next).aggregator);
} }
/* /*
@@ -2110,7 +2117,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
read_lock(&bond->lock); read_lock(&bond->lock);
//check if there are any slaves //check if there are any slaves
if (bond->slave_cnt == 0) if (list_empty(&bond->slave_list))
goto re_arm; goto re_arm;
// check if agg_select_timer timer after initialize is timed out // check if agg_select_timer timer after initialize is timed out
@@ -2336,8 +2343,12 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
int bond_3ad_set_carrier(struct bonding *bond) int bond_3ad_set_carrier(struct bonding *bond)
{ {
struct aggregator *active; struct aggregator *active;
struct slave *first_slave;
active = __get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator)); first_slave = bond_first_slave(bond);
if (!first_slave)
return 0;
active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
if (active) { if (active) {
/* are enough slaves available to consider link up? */ /* are enough slaves available to consider link up? */
if (active->num_of_ports < bond->params.min_links) { if (active->num_of_ports < bond->params.min_links) {
@@ -2415,6 +2426,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
struct ad_info ad_info; struct ad_info ad_info;
int res = 1; int res = 1;
read_lock(&bond->lock);
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n", pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
dev->name); dev->name);
@@ -2432,7 +2444,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg); slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave) {
struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
if (agg && (agg->aggregator_identifier == agg_id)) { if (agg && (agg->aggregator_identifier == agg_id)) {
@@ -2464,6 +2476,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
} }
out: out:
read_unlock(&bond->lock);
if (res) { if (res) {
/* no suitable interface, frame not sent */ /* no suitable interface, frame not sent */
kfree_skb(skb); kfree_skb(skb);
@@ -2501,18 +2514,13 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
*/ */
void bond_3ad_update_lacp_rate(struct bonding *bond) void bond_3ad_update_lacp_rate(struct bonding *bond)
{ {
int i;
struct slave *slave;
struct port *port = NULL; struct port *port = NULL;
struct slave *slave;
int lacp_fast; int lacp_fast;
write_lock_bh(&bond->lock);
lacp_fast = bond->params.lacp_fast; lacp_fast = bond->params.lacp_fast;
bond_for_each_slave(bond, slave) {
bond_for_each_slave(bond, slave, i) {
port = &(SLAVE_AD_INFO(slave).port); port = &(SLAVE_AD_INFO(slave).port);
if (port->slave == NULL)
continue;
__get_state_machine_lock(port); __get_state_machine_lock(port);
if (lacp_fast) if (lacp_fast)
port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
@@ -2520,6 +2528,4 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT; port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT;
__release_state_machine_lock(port); __release_state_machine_lock(port);
} }
write_unlock_bh(&bond->lock);
} }

View File

@@ -224,13 +224,12 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
{ {
struct slave *slave, *least_loaded; struct slave *slave, *least_loaded;
long long max_gap; long long max_gap;
int i;
least_loaded = NULL; least_loaded = NULL;
max_gap = LLONG_MIN; max_gap = LLONG_MIN;
/* Find the slave with the largest gap */ /* Find the slave with the largest gap */
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave) {
if (SLAVE_IS_OK(slave)) { if (SLAVE_IS_OK(slave)) {
long long gap = compute_gap(slave); long long gap = compute_gap(slave);
@@ -386,11 +385,10 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
struct slave *rx_slave, *slave, *start_at; struct slave *rx_slave, *slave, *start_at;
int i = 0; int i = 0;
if (bond_info->next_rx_slave) { if (bond_info->next_rx_slave)
start_at = bond_info->next_rx_slave; start_at = bond_info->next_rx_slave;
} else { else
start_at = bond->first_slave; start_at = bond_first_slave(bond);
}
rx_slave = NULL; rx_slave = NULL;
@@ -405,7 +403,8 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
} }
if (rx_slave) { if (rx_slave) {
bond_info->next_rx_slave = rx_slave->next; slave = bond_next_slave(bond, rx_slave);
bond_info->next_rx_slave = slave;
} }
return rx_slave; return rx_slave;
@@ -513,7 +512,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
skb->dev = client_info->slave->dev; skb->dev = client_info->slave->dev;
if (client_info->tag) { if (client_info->vlan_id) {
skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id); skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
if (!skb) { if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n", pr_err("%s: Error: failed to insert VLAN tag\n",
@@ -695,10 +694,8 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
client_info->ntt = 0; client_info->ntt = 0;
} }
if (bond_vlan_used(bond)) { if (!vlan_get_tag(skb, &client_info->vlan_id))
if (!vlan_get_tag(skb, &client_info->vlan_id)) client_info->vlan_id = 0;
client_info->tag = 1;
}
if (!client_info->assigned) { if (!client_info->assigned) {
u32 prev_tbl_head = bond_info->rx_hashtbl_used_head; u32 prev_tbl_head = bond_info->rx_hashtbl_used_head;
@@ -804,7 +801,7 @@ static void rlb_init_table_entry_dst(struct rlb_client_info *entry)
entry->used_prev = RLB_NULL_INDEX; entry->used_prev = RLB_NULL_INDEX;
entry->assigned = 0; entry->assigned = 0;
entry->slave = NULL; entry->slave = NULL;
entry->tag = 0; entry->vlan_id = 0;
} }
static void rlb_init_table_entry_src(struct rlb_client_info *entry) static void rlb_init_table_entry_src(struct rlb_client_info *entry)
{ {
@@ -961,7 +958,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]); struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
u32 next_index = bond_info->rx_hashtbl[curr_index].used_next; u32 next_index = bond_info->rx_hashtbl[curr_index].used_next;
if (curr->tag && (curr->vlan_id == vlan_id)) if (curr->vlan_id == vlan_id)
rlb_delete_table_entry(bond, curr_index); rlb_delete_table_entry(bond, curr_index);
curr_index = next_index; curr_index = next_index;
@@ -972,58 +969,62 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
/*********************** tlb/rlb shared functions *********************/ /*********************** tlb/rlb shared functions *********************/
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
u16 vid)
{ {
struct bonding *bond = bond_get_bond_by_slave(slave);
struct learning_pkt pkt; struct learning_pkt pkt;
struct sk_buff *skb;
int size = sizeof(struct learning_pkt); int size = sizeof(struct learning_pkt);
int i; char *data;
memset(&pkt, 0, size); memset(&pkt, 0, size);
memcpy(pkt.mac_dst, mac_addr, ETH_ALEN); memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
memcpy(pkt.mac_src, mac_addr, ETH_ALEN); memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
pkt.type = cpu_to_be16(ETH_P_LOOP); pkt.type = cpu_to_be16(ETH_P_LOOP);
for (i = 0; i < MAX_LP_BURST; i++) { skb = dev_alloc_skb(size);
struct sk_buff *skb; if (!skb)
char *data; return;
skb = dev_alloc_skb(size); data = skb_put(skb, size);
memcpy(data, &pkt, size);
skb_reset_mac_header(skb);
skb->network_header = skb->mac_header + ETH_HLEN;
skb->protocol = pkt.type;
skb->priority = TC_PRIO_CONTROL;
skb->dev = slave->dev;
if (vid) {
skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
if (!skb) { if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n",
slave->bond->dev->name);
return; return;
} }
data = skb_put(skb, size);
memcpy(data, &pkt, size);
skb_reset_mac_header(skb);
skb->network_header = skb->mac_header + ETH_HLEN;
skb->protocol = pkt.type;
skb->priority = TC_PRIO_CONTROL;
skb->dev = slave->dev;
if (bond_vlan_used(bond)) {
struct vlan_entry *vlan;
vlan = bond_next_vlan(bond,
bond->alb_info.current_alb_vlan);
bond->alb_info.current_alb_vlan = vlan;
if (!vlan) {
kfree_skb(skb);
continue;
}
skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan->vlan_id);
if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n",
bond->dev->name);
continue;
}
}
dev_queue_xmit(skb);
} }
dev_queue_xmit(skb);
}
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
{
struct bonding *bond = bond_get_bond_by_slave(slave);
struct net_device *upper;
struct list_head *iter;
/* send untagged */
alb_send_lp_vid(slave, mac_addr, 0);
/* loop through vlans and send one packet for each */
rcu_read_lock();
netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
if (upper->priv_flags & IFF_802_1Q_VLAN)
alb_send_lp_vid(slave, mac_addr,
vlan_dev_vlan_id(upper));
}
rcu_read_unlock();
} }
static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
@@ -1173,9 +1174,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
{ {
struct slave *tmp_slave1, *free_mac_slave = NULL; struct slave *tmp_slave1, *free_mac_slave = NULL;
struct slave *has_bond_addr = bond->curr_active_slave; struct slave *has_bond_addr = bond->curr_active_slave;
int i;
if (bond->slave_cnt == 0) { if (list_empty(&bond->slave_list)) {
/* this is the first slave */ /* this is the first slave */
return 0; return 0;
} }
@@ -1196,7 +1196,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
/* The slave's address is equal to the address of the bond. /* The slave's address is equal to the address of the bond.
* Search for a spare address in the bond for this slave. * Search for a spare address in the bond for this slave.
*/ */
bond_for_each_slave(bond, tmp_slave1, i) { bond_for_each_slave(bond, tmp_slave1) {
if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) { if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
/* no slave has tmp_slave1's perm addr /* no slave has tmp_slave1's perm addr
* as its curr addr * as its curr addr
@@ -1246,17 +1246,15 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
*/ */
static int alb_set_mac_address(struct bonding *bond, void *addr) static int alb_set_mac_address(struct bonding *bond, void *addr)
{ {
struct sockaddr sa;
struct slave *slave, *stop_at;
char tmp_addr[ETH_ALEN]; char tmp_addr[ETH_ALEN];
struct slave *slave;
struct sockaddr sa;
int res; int res;
int i;
if (bond->alb_info.rlb_enabled) { if (bond->alb_info.rlb_enabled)
return 0; return 0;
}
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave) {
/* save net_device's current hw address */ /* save net_device's current hw address */
memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN); memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
@@ -1276,8 +1274,7 @@ unwind:
sa.sa_family = bond->dev->type; sa.sa_family = bond->dev->type;
/* unwind from head to the slave that failed */ /* unwind from head to the slave that failed */
stop_at = slave; bond_for_each_slave_continue_reverse(bond, slave) {
bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN); memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
dev_set_mac_address(slave->dev, &sa); dev_set_mac_address(slave->dev, &sa);
memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN); memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
@@ -1342,6 +1339,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
/* make sure that the curr_active_slave do not change during tx /* make sure that the curr_active_slave do not change during tx
*/ */
read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock); read_lock(&bond->curr_slave_lock);
switch (ntohs(skb->protocol)) { switch (ntohs(skb->protocol)) {
@@ -1446,11 +1444,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
} }
read_unlock(&bond->curr_slave_lock); read_unlock(&bond->curr_slave_lock);
read_unlock(&bond->lock);
if (res) { if (res) {
/* no suitable interface, frame not sent */ /* no suitable interface, frame not sent */
kfree_skb(skb); kfree_skb(skb);
} }
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
@@ -1460,11 +1459,10 @@ void bond_alb_monitor(struct work_struct *work)
alb_work.work); alb_work.work);
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *slave; struct slave *slave;
int i;
read_lock(&bond->lock); read_lock(&bond->lock);
if (bond->slave_cnt == 0) { if (list_empty(&bond->slave_list)) {
bond_info->tx_rebalance_counter = 0; bond_info->tx_rebalance_counter = 0;
bond_info->lp_counter = 0; bond_info->lp_counter = 0;
goto re_arm; goto re_arm;
@@ -1482,9 +1480,8 @@ void bond_alb_monitor(struct work_struct *work)
*/ */
read_lock(&bond->curr_slave_lock); read_lock(&bond->curr_slave_lock);
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave)
alb_send_learning_packets(slave, slave->dev->dev_addr); alb_send_learning_packets(slave, slave->dev->dev_addr);
}
read_unlock(&bond->curr_slave_lock); read_unlock(&bond->curr_slave_lock);
@@ -1496,7 +1493,7 @@ void bond_alb_monitor(struct work_struct *work)
read_lock(&bond->curr_slave_lock); read_lock(&bond->curr_slave_lock);
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave) {
tlb_clear_slave(bond, slave, 1); tlb_clear_slave(bond, slave, 1);
if (slave == bond->curr_active_slave) { if (slave == bond->curr_active_slave) {
SLAVE_TLB_INFO(slave).load = SLAVE_TLB_INFO(slave).load =
@@ -1602,9 +1599,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
*/ */
void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave) void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
{ {
if (bond->slave_cnt > 1) { if (!list_empty(&bond->slave_list))
alb_change_hw_addr_on_detach(bond, slave); alb_change_hw_addr_on_detach(bond, slave);
}
tlb_clear_slave(bond, slave, 0); tlb_clear_slave(bond, slave, 0);
@@ -1661,9 +1657,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
{ {
struct slave *swap_slave; struct slave *swap_slave;
if (bond->curr_active_slave == new_slave) { if (bond->curr_active_slave == new_slave)
return; return;
}
if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) { if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) {
dev_set_promiscuity(bond->curr_active_slave->dev, -1); dev_set_promiscuity(bond->curr_active_slave->dev, -1);
@@ -1672,11 +1667,10 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
} }
swap_slave = bond->curr_active_slave; swap_slave = bond->curr_active_slave;
bond->curr_active_slave = new_slave; rcu_assign_pointer(bond->curr_active_slave, new_slave);
if (!new_slave || (bond->slave_cnt == 0)) { if (!new_slave || list_empty(&bond->slave_list))
return; return;
}
/* set the new curr_active_slave to the bonds mac address /* set the new curr_active_slave to the bonds mac address
* i.e. swap mac addresses of old curr_active_slave and new curr_active_slave * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
@@ -1689,9 +1683,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
* ignored so we can mess with their MAC addresses without * ignored so we can mess with their MAC addresses without
* fear of interference from transmit activity. * fear of interference from transmit activity.
*/ */
if (swap_slave) { if (swap_slave)
tlb_clear_slave(bond, swap_slave, 1); tlb_clear_slave(bond, swap_slave, 1);
}
tlb_clear_slave(bond, new_slave, 1); tlb_clear_slave(bond, new_slave, 1);
write_unlock_bh(&bond->curr_slave_lock); write_unlock_bh(&bond->curr_slave_lock);
@@ -1768,11 +1761,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id) void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
{ {
if (bond->alb_info.current_alb_vlan &&
(bond->alb_info.current_alb_vlan->vlan_id == vlan_id)) {
bond->alb_info.current_alb_vlan = NULL;
}
if (bond->alb_info.rlb_enabled) { if (bond->alb_info.rlb_enabled) {
rlb_clear_vlan(bond, vlan_id); rlb_clear_vlan(bond, vlan_id);
} }

View File

@@ -53,7 +53,6 @@ struct slave;
#define TLB_NULL_INDEX 0xffffffff #define TLB_NULL_INDEX 0xffffffff
#define MAX_LP_BURST 3
/* rlb defs */ /* rlb defs */
#define RLB_HASH_TABLE_SIZE 256 #define RLB_HASH_TABLE_SIZE 256
@@ -126,7 +125,6 @@ struct rlb_client_info {
u8 assigned; /* checking whether this entry is assigned */ u8 assigned; /* checking whether this entry is assigned */
u8 ntt; /* flag - need to transmit client info */ u8 ntt; /* flag - need to transmit client info */
struct slave *slave; /* the slave assigned to this client */ struct slave *slave; /* the slave assigned to this client */
u8 tag; /* flag - need to tag skb */
unsigned short vlan_id; /* VLAN tag associated with IP address */ unsigned short vlan_id; /* VLAN tag associated with IP address */
}; };
@@ -170,7 +168,6 @@ struct alb_bond_info {
* rx traffic should be * rx traffic should be
* rebalanced * rebalanced
*/ */
struct vlan_entry *current_alb_vlan;
}; };
int bond_alb_initialize(struct bonding *bond, int rlb_enabled); int bond_alb_initialize(struct bonding *bond, int rlb_enabled);

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,6 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
struct bonding *bond = seq->private; struct bonding *bond = seq->private;
loff_t off = 0; loff_t off = 0;
struct slave *slave; struct slave *slave;
int i;
/* make sure the bond won't be taken away */ /* make sure the bond won't be taken away */
rcu_read_lock(); rcu_read_lock();
@@ -21,10 +20,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0) if (*pos == 0)
return SEQ_START_TOKEN; return SEQ_START_TOKEN;
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave)
if (++off == *pos) if (++off == *pos)
return slave; return slave;
}
return NULL; return NULL;
} }
@@ -36,11 +34,13 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
++*pos; ++*pos;
if (v == SEQ_START_TOKEN) if (v == SEQ_START_TOKEN)
return bond->first_slave; return bond_first_slave(bond);
slave = slave->next; if (bond_is_last_slave(bond, slave))
return NULL;
slave = bond_next_slave(bond, slave);
return (slave == bond->first_slave) ? NULL : slave; return slave;
} }
static void bond_info_seq_stop(struct seq_file *seq, void *v) static void bond_info_seq_stop(struct seq_file *seq, void *v)

View File

@@ -209,12 +209,12 @@ void bond_destroy_slave_symlinks(struct net_device *master,
static ssize_t bonding_show_slaves(struct device *d, static ssize_t bonding_show_slaves(struct device *d,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct slave *slave;
int i, res = 0;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
struct slave *slave;
int res = 0;
read_lock(&bond->lock); read_lock(&bond->lock);
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave) {
if (res > (PAGE_SIZE - IFNAMSIZ)) { if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */ /* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10) if ((PAGE_SIZE - res) > 10)
@@ -227,6 +227,7 @@ static ssize_t bonding_show_slaves(struct device *d,
read_unlock(&bond->lock); read_unlock(&bond->lock);
if (res) if (res)
buf[res-1] = '\n'; /* eat the leftover space */ buf[res-1] = '\n'; /* eat the leftover space */
return res; return res;
} }
@@ -325,7 +326,7 @@ static ssize_t bonding_store_mode(struct device *d,
goto out; goto out;
} }
if (bond->slave_cnt > 0) { if (!list_empty(&bond->slave_list)) {
pr_err("unable to update mode of %s because it has slaves.\n", pr_err("unable to update mode of %s because it has slaves.\n",
bond->dev->name); bond->dev->name);
ret = -EPERM; ret = -EPERM;
@@ -501,20 +502,25 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
int new_value; int new_value, ret = count;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
if (bond->slave_cnt != 0) { if (!rtnl_trylock())
return restart_syscall();
if (!list_empty(&bond->slave_list)) {
pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n", pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
bond->dev->name); bond->dev->name);
return -EPERM; ret = -EPERM;
goto out;
} }
new_value = bond_parse_parm(buf, fail_over_mac_tbl); new_value = bond_parse_parm(buf, fail_over_mac_tbl);
if (new_value < 0) { if (new_value < 0) {
pr_err("%s: Ignoring invalid fail_over_mac value %s.\n", pr_err("%s: Ignoring invalid fail_over_mac value %s.\n",
bond->dev->name, buf); bond->dev->name, buf);
return -EINVAL; ret = -EINVAL;
goto out;
} }
bond->params.fail_over_mac = new_value; bond->params.fail_over_mac = new_value;
@@ -522,7 +528,9 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
bond->dev->name, fail_over_mac_tbl[new_value].modename, bond->dev->name, fail_over_mac_tbl[new_value].modename,
new_value); new_value);
return count; out:
rtnl_unlock();
return ret;
} }
static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
@@ -661,7 +669,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
&newtarget); &newtarget);
/* not to race with bond_arp_rcv */ /* not to race with bond_arp_rcv */
write_lock_bh(&bond->lock); write_lock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) bond_for_each_slave(bond, slave)
slave->target_last_arp_rx[ind] = jiffies; slave->target_last_arp_rx[ind] = jiffies;
targets[ind] = newtarget; targets[ind] = newtarget;
write_unlock_bh(&bond->lock); write_unlock_bh(&bond->lock);
@@ -687,7 +695,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
&newtarget); &newtarget);
write_lock_bh(&bond->lock); write_lock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave) {
targets_rx = slave->target_last_arp_rx; targets_rx = slave->target_last_arp_rx;
j = ind; j = ind;
for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++) for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -844,8 +852,11 @@ static ssize_t bonding_store_lacp(struct device *d,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
int new_value, ret = count;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
int new_value, ret = count;
if (!rtnl_trylock())
return restart_syscall();
if (bond->dev->flags & IFF_UP) { if (bond->dev->flags & IFF_UP) {
pr_err("%s: Unable to update LACP rate because interface is up.\n", pr_err("%s: Unable to update LACP rate because interface is up.\n",
@@ -875,6 +886,8 @@ static ssize_t bonding_store_lacp(struct device *d,
ret = -EINVAL; ret = -EINVAL;
} }
out: out:
rtnl_unlock();
return ret; return ret;
} }
static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
@@ -1078,10 +1091,9 @@ static ssize_t bonding_store_primary(struct device *d,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
int i;
struct slave *slave;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
char ifname[IFNAMSIZ]; char ifname[IFNAMSIZ];
struct slave *slave;
if (!rtnl_trylock()) if (!rtnl_trylock())
return restart_syscall(); return restart_syscall();
@@ -1107,7 +1119,7 @@ static ssize_t bonding_store_primary(struct device *d,
goto out; goto out;
} }
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave) {
if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
pr_info("%s: Setting %s as primary slave.\n", pr_info("%s: Setting %s as primary slave.\n",
bond->dev->name, slave->dev->name); bond->dev->name, slave->dev->name);
@@ -1236,16 +1248,16 @@ static ssize_t bonding_show_active_slave(struct device *d,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct slave *curr;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
struct slave *curr;
int count = 0; int count = 0;
read_lock(&bond->curr_slave_lock); rcu_read_lock();
curr = bond->curr_active_slave; curr = rcu_dereference(bond->curr_active_slave);
read_unlock(&bond->curr_slave_lock);
if (USES_PRIMARY(bond->params.mode) && curr) if (USES_PRIMARY(bond->params.mode) && curr)
count = sprintf(buf, "%s\n", curr->dev->name); count = sprintf(buf, "%s\n", curr->dev->name);
rcu_read_unlock();
return count; return count;
} }
@@ -1253,16 +1265,14 @@ static ssize_t bonding_store_active_slave(struct device *d,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
int i; struct slave *slave, *old_active, *new_active;
struct slave *slave;
struct slave *old_active = NULL;
struct slave *new_active = NULL;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
char ifname[IFNAMSIZ]; char ifname[IFNAMSIZ];
if (!rtnl_trylock()) if (!rtnl_trylock())
return restart_syscall(); return restart_syscall();
old_active = new_active = NULL;
block_netpoll_tx(); block_netpoll_tx();
read_lock(&bond->lock); read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock); write_lock_bh(&bond->curr_slave_lock);
@@ -1279,12 +1289,12 @@ static ssize_t bonding_store_active_slave(struct device *d,
if (!strlen(ifname) || buf[0] == '\n') { if (!strlen(ifname) || buf[0] == '\n') {
pr_info("%s: Clearing current active slave.\n", pr_info("%s: Clearing current active slave.\n",
bond->dev->name); bond->dev->name);
bond->curr_active_slave = NULL; rcu_assign_pointer(bond->curr_active_slave, NULL);
bond_select_active_slave(bond); bond_select_active_slave(bond);
goto out; goto out;
} }
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave) {
if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
old_active = bond->curr_active_slave; old_active = bond->curr_active_slave;
new_active = slave; new_active = slave;
@@ -1295,8 +1305,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
bond->dev->name, bond->dev->name,
slave->dev->name); slave->dev->name);
goto out; goto out;
} } else {
else {
if ((new_active) && if ((new_active) &&
(old_active) && (old_active) &&
(new_active->link == BOND_LINK_UP) && (new_active->link == BOND_LINK_UP) &&
@@ -1307,8 +1316,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
slave->dev->name); slave->dev->name);
bond_change_active_slave(bond, bond_change_active_slave(bond,
new_active); new_active);
} } else {
else {
pr_info("%s: Could not set %s as" pr_info("%s: Could not set %s as"
" active slave; either %s is" " active slave; either %s is"
" down or the link is down.\n", " down or the link is down.\n",
@@ -1344,14 +1352,9 @@ static ssize_t bonding_show_mii_status(struct device *d,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct slave *curr;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
read_lock(&bond->curr_slave_lock); return sprintf(buf, "%s\n", bond->curr_active_slave ? "up" : "down");
curr = bond->curr_active_slave;
read_unlock(&bond->curr_slave_lock);
return sprintf(buf, "%s\n", curr ? "up" : "down");
} }
static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
@@ -1470,15 +1473,15 @@ static ssize_t bonding_show_queue_id(struct device *d,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct slave *slave;
int i, res = 0;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
struct slave *slave;
int res = 0;
if (!rtnl_trylock()) if (!rtnl_trylock())
return restart_syscall(); return restart_syscall();
read_lock(&bond->lock); read_lock(&bond->lock);
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave) {
if (res > (PAGE_SIZE - IFNAMSIZ - 6)) { if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
/* not enough space for another interface_name:queue_id pair */ /* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10) if ((PAGE_SIZE - res) > 10)
@@ -1493,6 +1496,7 @@ static ssize_t bonding_show_queue_id(struct device *d,
if (res) if (res)
buf[res-1] = '\n'; /* eat the leftover space */ buf[res-1] = '\n'; /* eat the leftover space */
rtnl_unlock(); rtnl_unlock();
return res; return res;
} }
@@ -1507,7 +1511,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
struct slave *slave, *update_slave; struct slave *slave, *update_slave;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
u16 qid; u16 qid;
int i, ret = count; int ret = count;
char *delim; char *delim;
struct net_device *sdev = NULL; struct net_device *sdev = NULL;
@@ -1542,7 +1546,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
/* Search for thes slave and check for duplicate qids */ /* Search for thes slave and check for duplicate qids */
update_slave = NULL; update_slave = NULL;
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave) {
if (sdev == slave->dev) if (sdev == slave->dev)
/* /*
* We don't need to check the matching * We don't need to check the matching
@@ -1594,8 +1598,8 @@ static ssize_t bonding_store_slaves_active(struct device *d,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
int i, new_value, ret = count;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
int new_value, ret = count;
struct slave *slave; struct slave *slave;
if (sscanf(buf, "%d", &new_value) != 1) { if (sscanf(buf, "%d", &new_value) != 1) {
@@ -1618,7 +1622,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
} }
read_lock(&bond->lock); read_lock(&bond->lock);
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave) {
if (!bond_is_active_slave(slave)) { if (!bond_is_active_slave(slave)) {
if (new_value) if (new_value)
slave->inactive = 0; slave->inactive = 0;

View File

@@ -71,6 +71,28 @@
set_fs(fs); \ set_fs(fs); \
res; }) res; })
/* slave list primitives */
#define bond_to_slave(ptr) list_entry(ptr, struct slave, list)
/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
#define bond_first_slave(bond) \
list_first_entry_or_null(&(bond)->slave_list, struct slave, list)
#define bond_last_slave(bond) \
(list_empty(&(bond)->slave_list) ? NULL : \
bond_to_slave((bond)->slave_list.prev))
#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list)
#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list)
/* Since bond_first/last_slave can return NULL, these can return NULL too */
#define bond_next_slave(bond, pos) \
(bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
bond_to_slave((pos)->list.next))
#define bond_prev_slave(bond, pos) \
(bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
bond_to_slave((pos)->list.prev))
/** /**
* bond_for_each_slave_from - iterate the slaves list from a starting point * bond_for_each_slave_from - iterate the slaves list from a starting point
* @bond: the bond holding this list. * @bond: the bond holding this list.
@@ -80,37 +102,33 @@
* *
* Caller must hold bond->lock * Caller must hold bond->lock
*/ */
#define bond_for_each_slave_from(bond, pos, cnt, start) \ #define bond_for_each_slave_from(bond, pos, cnt, start) \
for (cnt = 0, pos = start; \ for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
cnt < (bond)->slave_cnt; \ cnt++, pos = bond_next_slave(bond, pos))
cnt++, pos = (pos)->next)
/** /**
* bond_for_each_slave_from_to - iterate the slaves list from start point to stop point * bond_for_each_slave - iterate over all slaves
* @bond: the bond holding this list. * @bond: the bond holding this list
* @pos: current slave. * @pos: current slave
* @cnt: counter for number max of moves
* @start: start point.
* @stop: stop point.
* *
* Caller must hold bond->lock * Caller must hold bond->lock
*/ */
#define bond_for_each_slave_from_to(bond, pos, cnt, start, stop) \ #define bond_for_each_slave(bond, pos) \
for (cnt = 0, pos = start; \ list_for_each_entry(pos, &(bond)->slave_list, list)
((cnt < (bond)->slave_cnt) && (pos != (stop)->next)); \
cnt++, pos = (pos)->next) /* Caller must have rcu_read_lock */
#define bond_for_each_slave_rcu(bond, pos) \
list_for_each_entry_rcu(pos, &(bond)->slave_list, list)
/** /**
* bond_for_each_slave - iterate the slaves list from head * bond_for_each_slave_reverse - iterate in reverse from a given position
* @bond: the bond holding this list. * @bond: the bond holding this list
* @pos: current slave. * @pos: slave to continue from
* @cnt: counter for max number of moves
* *
* Caller must hold bond->lock * Caller must hold bond->lock
*/ */
#define bond_for_each_slave(bond, pos, cnt) \ #define bond_for_each_slave_continue_reverse(bond, pos) \
bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave) list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx; extern atomic_t netpoll_block_tx;
@@ -167,15 +185,9 @@ struct bond_parm_tbl {
#define BOND_MAX_MODENAME_LEN 20 #define BOND_MAX_MODENAME_LEN 20
struct vlan_entry {
struct list_head vlan_list;
unsigned short vlan_id;
};
struct slave { struct slave {
struct net_device *dev; /* first - useful for panic debug */ struct net_device *dev; /* first - useful for panic debug */
struct slave *next; struct list_head list;
struct slave *prev;
struct bonding *bond; /* our master */ struct bonding *bond; /* our master */
int delay; int delay;
unsigned long jiffies; unsigned long jiffies;
@@ -215,7 +227,7 @@ struct slave {
*/ */
struct bonding { struct bonding {
struct net_device *dev; /* first - useful for panic debug */ struct net_device *dev; /* first - useful for panic debug */
struct slave *first_slave; struct list_head slave_list;
struct slave *curr_active_slave; struct slave *curr_active_slave;
struct slave *current_arp_slave; struct slave *current_arp_slave;
struct slave *primary_slave; struct slave *primary_slave;
@@ -237,7 +249,6 @@ struct bonding {
struct ad_bond_info ad_info; struct ad_bond_info ad_info;
struct alb_bond_info alb_info; struct alb_bond_info alb_info;
struct bond_params params; struct bond_params params;
struct list_head vlan_list;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct delayed_work mii_work; struct delayed_work mii_work;
struct delayed_work arp_work; struct delayed_work arp_work;
@@ -250,11 +261,6 @@ struct bonding {
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
}; };
static inline bool bond_vlan_used(struct bonding *bond)
{
return !list_empty(&bond->vlan_list);
}
#define bond_slave_get_rcu(dev) \ #define bond_slave_get_rcu(dev) \
((struct slave *) rcu_dereference(dev->rx_handler_data)) ((struct slave *) rcu_dereference(dev->rx_handler_data))
@@ -270,13 +276,10 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
struct net_device *slave_dev) struct net_device *slave_dev)
{ {
struct slave *slave = NULL; struct slave *slave = NULL;
int i;
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave)
if (slave->dev == slave_dev) { if (slave->dev == slave_dev)
return slave; return slave;
}
}
return NULL; return NULL;
} }
@@ -416,10 +419,20 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
return addr; return addr;
} }
static inline bool slave_can_tx(struct slave *slave)
{
if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP &&
bond_is_active_slave(slave))
return true;
else
return false;
}
struct bond_net; struct bond_net;
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
int bond_create(struct net *net, const char *name); int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net); int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net); void bond_destroy_sysfs(struct bond_net *net);
@@ -477,10 +490,9 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
static inline struct slave *bond_slave_has_mac(struct bonding *bond, static inline struct slave *bond_slave_has_mac(struct bonding *bond,
const u8 *mac) const u8 *mac)
{ {
int i = 0;
struct slave *tmp; struct slave *tmp;
bond_for_each_slave(bond, tmp, i) bond_for_each_slave(bond, tmp)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return tmp; return tmp;

View File

@@ -347,7 +347,9 @@ static int ldisc_open(struct tty_struct *tty)
/* release devices to avoid name collision */ /* release devices to avoid name collision */
ser_release(NULL); ser_release(NULL);
sprintf(name, "cf%s", tty->name); result = snprintf(name, sizeof(name), "cf%s", tty->name);
if (result >= IFNAMSIZ)
return -EINVAL;
dev = alloc_netdev(sizeof(*ser), name, caifdev_setup); dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
if (!dev) if (!dev)
return -ENOMEM; return -ENOMEM;

View File

@@ -1355,7 +1355,7 @@ static int at91_can_probe(struct platform_device *pdev)
if (at91_is_sam9263(priv)) if (at91_is_sam9263(priv))
dev->sysfs_groups[0] = &at91_sysfs_attr_group; dev->sysfs_groups[0] = &at91_sysfs_attr_group;
dev_set_drvdata(&pdev->dev, dev); platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
err = register_candev(dev); err = register_candev(dev);

View File

@@ -195,7 +195,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1); res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0)
dev_info(&pdev->dev, "control memory is not used for raminit\n"); dev_info(&pdev->dev, "control memory is not used for raminit\n");
else else
priv->raminit = c_can_hw_raminit; priv->raminit = c_can_hw_raminit;

View File

@@ -850,12 +850,17 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_priv *priv = netdev_priv(dev);
int err; int err;
clk_prepare_enable(priv->clk_ipg); err = clk_prepare_enable(priv->clk_ipg);
clk_prepare_enable(priv->clk_per); if (err)
return err;
err = clk_prepare_enable(priv->clk_per);
if (err)
goto out_disable_ipg;
err = open_candev(dev); err = open_candev(dev);
if (err) if (err)
goto out; goto out_disable_per;
err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
if (err) if (err)
@@ -875,8 +880,9 @@ static int flexcan_open(struct net_device *dev)
out_close: out_close:
close_candev(dev); close_candev(dev);
out: out_disable_per:
clk_disable_unprepare(priv->clk_per); clk_disable_unprepare(priv->clk_per);
out_disable_ipg:
clk_disable_unprepare(priv->clk_ipg); clk_disable_unprepare(priv->clk_ipg);
return err; return err;
@@ -933,8 +939,13 @@ static int register_flexcandev(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->base; struct flexcan_regs __iomem *regs = priv->base;
u32 reg, err; u32 reg, err;
clk_prepare_enable(priv->clk_ipg); err = clk_prepare_enable(priv->clk_ipg);
clk_prepare_enable(priv->clk_per); if (err)
return err;
err = clk_prepare_enable(priv->clk_per);
if (err)
goto out_disable_ipg;
/* select "bus clock", chip must be disabled */ /* select "bus clock", chip must be disabled */
flexcan_chip_disable(priv); flexcan_chip_disable(priv);
@@ -959,15 +970,16 @@ static int register_flexcandev(struct net_device *dev)
if (!(reg & FLEXCAN_MCR_FEN)) { if (!(reg & FLEXCAN_MCR_FEN)) {
netdev_err(dev, "Could not enable RX FIFO, unsupported core\n"); netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
err = -ENODEV; err = -ENODEV;
goto out; goto out_disable_per;
} }
err = register_candev(dev); err = register_candev(dev);
out: out_disable_per:
/* disable core and turn off clocks */ /* disable core and turn off clocks */
flexcan_chip_disable(priv); flexcan_chip_disable(priv);
clk_disable_unprepare(priv->clk_per); clk_disable_unprepare(priv->clk_per);
out_disable_ipg:
clk_disable_unprepare(priv->clk_ipg); clk_disable_unprepare(priv->clk_ipg);
return err; return err;
@@ -1001,7 +1013,6 @@ static int flexcan_probe(struct platform_device *pdev)
struct resource *mem; struct resource *mem;
struct clk *clk_ipg = NULL, *clk_per = NULL; struct clk *clk_ipg = NULL, *clk_per = NULL;
void __iomem *base; void __iomem *base;
resource_size_t mem_size;
int err, irq; int err, irq;
u32 clock_freq = 0; u32 clock_freq = 0;
@@ -1013,43 +1024,25 @@ static int flexcan_probe(struct platform_device *pdev)
clk_ipg = devm_clk_get(&pdev->dev, "ipg"); clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(clk_ipg)) { if (IS_ERR(clk_ipg)) {
dev_err(&pdev->dev, "no ipg clock defined\n"); dev_err(&pdev->dev, "no ipg clock defined\n");
err = PTR_ERR(clk_ipg); return PTR_ERR(clk_ipg);
goto failed_clock;
} }
clock_freq = clk_get_rate(clk_ipg); clock_freq = clk_get_rate(clk_ipg);
clk_per = devm_clk_get(&pdev->dev, "per"); clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(clk_per)) { if (IS_ERR(clk_per)) {
dev_err(&pdev->dev, "no per clock defined\n"); dev_err(&pdev->dev, "no per clock defined\n");
err = PTR_ERR(clk_per); return PTR_ERR(clk_per);
goto failed_clock;
} }
} }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (!mem || irq <= 0) { if (irq <= 0)
err = -ENODEV; return -ENODEV;
goto failed_get;
}
mem_size = resource_size(mem); base = devm_ioremap_resource(&pdev->dev, mem);
if (!request_mem_region(mem->start, mem_size, pdev->name)) { if (IS_ERR(base))
err = -EBUSY; return PTR_ERR(base);
goto failed_get;
}
base = ioremap(mem->start, mem_size);
if (!base) {
err = -ENOMEM;
goto failed_map;
}
dev = alloc_candev(sizeof(struct flexcan_priv), 1);
if (!dev) {
err = -ENOMEM;
goto failed_alloc;
}
of_id = of_match_device(flexcan_of_match, &pdev->dev); of_id = of_match_device(flexcan_of_match, &pdev->dev);
if (of_id) { if (of_id) {
@@ -1058,10 +1051,13 @@ static int flexcan_probe(struct platform_device *pdev)
devtype_data = (struct flexcan_devtype_data *) devtype_data = (struct flexcan_devtype_data *)
pdev->id_entry->driver_data; pdev->id_entry->driver_data;
} else { } else {
err = -ENODEV; return -ENODEV;
goto failed_devtype;
} }
dev = alloc_candev(sizeof(struct flexcan_priv), 1);
if (!dev)
return -ENOMEM;
dev->netdev_ops = &flexcan_netdev_ops; dev->netdev_ops = &flexcan_netdev_ops;
dev->irq = irq; dev->irq = irq;
dev->flags |= IFF_ECHO; dev->flags |= IFF_ECHO;
@@ -1087,7 +1083,7 @@ static int flexcan_probe(struct platform_device *pdev)
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
dev_set_drvdata(&pdev->dev, dev); platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
err = register_flexcandev(dev); err = register_flexcandev(dev);
@@ -1104,28 +1100,15 @@ static int flexcan_probe(struct platform_device *pdev)
return 0; return 0;
failed_register: failed_register:
failed_devtype:
free_candev(dev); free_candev(dev);
failed_alloc:
iounmap(base);
failed_map:
release_mem_region(mem->start, mem_size);
failed_get:
failed_clock:
return err; return err;
} }
static int flexcan_remove(struct platform_device *pdev) static int flexcan_remove(struct platform_device *pdev)
{ {
struct net_device *dev = platform_get_drvdata(pdev); struct net_device *dev = platform_get_drvdata(pdev);
struct flexcan_priv *priv = netdev_priv(dev);
struct resource *mem;
unregister_flexcandev(dev); unregister_flexcandev(dev);
iounmap(priv->base);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
free_candev(dev); free_candev(dev);

View File

@@ -37,9 +37,6 @@
* *
* static struct mcp251x_platform_data mcp251x_info = { * static struct mcp251x_platform_data mcp251x_info = {
* .oscillator_frequency = 8000000, * .oscillator_frequency = 8000000,
* .board_specific_setup = &mcp251x_setup,
* .power_enable = mcp251x_power_enable,
* .transceiver_enable = NULL,
* }; * };
* *
* static struct spi_board_info spi_board_info[] = { * static struct spi_board_info spi_board_info[] = {
@@ -76,6 +73,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spi/spi.h> #include <linux/spi/spi.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/regulator/consumer.h>
/* SPI interface instruction set */ /* SPI interface instruction set */
#define INSTRUCTION_WRITE 0x02 #define INSTRUCTION_WRITE 0x02
@@ -264,6 +262,8 @@ struct mcp251x_priv {
#define AFTER_SUSPEND_POWER 4 #define AFTER_SUSPEND_POWER 4
#define AFTER_SUSPEND_RESTART 8 #define AFTER_SUSPEND_RESTART 8
int restart_tx; int restart_tx;
struct regulator *power;
struct regulator *transceiver;
}; };
#define MCP251X_IS(_model) \ #define MCP251X_IS(_model) \
@@ -667,16 +667,25 @@ static int mcp251x_hw_probe(struct spi_device *spi)
return (st1 == 0x80 && st2 == 0x07) ? 1 : 0; return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
} }
static int mcp251x_power_enable(struct regulator *reg, int enable)
{
if (IS_ERR(reg))
return 0;
if (enable)
return regulator_enable(reg);
else
return regulator_disable(reg);
}
static void mcp251x_open_clean(struct net_device *net) static void mcp251x_open_clean(struct net_device *net)
{ {
struct mcp251x_priv *priv = netdev_priv(net); struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi; struct spi_device *spi = priv->spi;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
free_irq(spi->irq, priv); free_irq(spi->irq, priv);
mcp251x_hw_sleep(spi); mcp251x_hw_sleep(spi);
if (pdata->transceiver_enable) mcp251x_power_enable(priv->transceiver, 0);
pdata->transceiver_enable(0);
close_candev(net); close_candev(net);
} }
@@ -684,7 +693,6 @@ static int mcp251x_stop(struct net_device *net)
{ {
struct mcp251x_priv *priv = netdev_priv(net); struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi; struct spi_device *spi = priv->spi;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
close_candev(net); close_candev(net);
@@ -704,8 +712,7 @@ static int mcp251x_stop(struct net_device *net)
mcp251x_hw_sleep(spi); mcp251x_hw_sleep(spi);
if (pdata->transceiver_enable) mcp251x_power_enable(priv->transceiver, 0);
pdata->transceiver_enable(0);
priv->can.state = CAN_STATE_STOPPED; priv->can.state = CAN_STATE_STOPPED;
@@ -928,8 +935,7 @@ static int mcp251x_open(struct net_device *net)
{ {
struct mcp251x_priv *priv = netdev_priv(net); struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi; struct spi_device *spi = priv->spi;
struct mcp251x_platform_data *pdata = spi->dev.platform_data; unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING;
unsigned long flags;
int ret; int ret;
ret = open_candev(net); ret = open_candev(net);
@@ -939,25 +945,17 @@ static int mcp251x_open(struct net_device *net)
} }
mutex_lock(&priv->mcp_lock); mutex_lock(&priv->mcp_lock);
if (pdata->transceiver_enable) mcp251x_power_enable(priv->transceiver, 1);
pdata->transceiver_enable(1);
priv->force_quit = 0; priv->force_quit = 0;
priv->tx_skb = NULL; priv->tx_skb = NULL;
priv->tx_len = 0; priv->tx_len = 0;
flags = IRQF_ONESHOT;
if (pdata->irq_flags)
flags |= pdata->irq_flags;
else
flags |= IRQF_TRIGGER_FALLING;
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
flags, DEVICE_NAME, priv); flags, DEVICE_NAME, priv);
if (ret) { if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
if (pdata->transceiver_enable) mcp251x_power_enable(priv->transceiver, 0);
pdata->transceiver_enable(0);
close_candev(net); close_candev(net);
goto open_unlock; goto open_unlock;
} }
@@ -1026,6 +1024,19 @@ static int mcp251x_can_probe(struct spi_device *spi)
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
priv->model = spi_get_device_id(spi)->driver_data; priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net; priv->net = net;
priv->power = devm_regulator_get(&spi->dev, "vdd");
priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
(PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
ret = -EPROBE_DEFER;
goto error_power;
}
ret = mcp251x_power_enable(priv->power, 1);
if (ret)
goto error_power;
spi_set_drvdata(spi, priv); spi_set_drvdata(spi, priv);
priv->spi = spi; priv->spi = spi;
@@ -1068,30 +1079,24 @@ static int mcp251x_can_probe(struct spi_device *spi)
} }
} }
if (pdata->power_enable)
pdata->power_enable(1);
/* Call out to platform specific setup */
if (pdata->board_specific_setup)
pdata->board_specific_setup(spi);
SET_NETDEV_DEV(net, &spi->dev); SET_NETDEV_DEV(net, &spi->dev);
/* Configure the SPI bus */ /* Configure the SPI bus */
spi->mode = SPI_MODE_0; spi->mode = spi->mode ? : SPI_MODE_0;
if (mcp251x_is_2510(spi))
spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
else
spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
spi->bits_per_word = 8; spi->bits_per_word = 8;
spi_setup(spi); spi_setup(spi);
/* Here is OK to not lock the MCP, no one knows about it yet */ /* Here is OK to not lock the MCP, no one knows about it yet */
if (!mcp251x_hw_probe(spi)) { if (!mcp251x_hw_probe(spi)) {
dev_info(&spi->dev, "Probe failed\n"); ret = -ENODEV;
goto error_probe; goto error_probe;
} }
mcp251x_hw_sleep(spi); mcp251x_hw_sleep(spi);
if (pdata->transceiver_enable)
pdata->transceiver_enable(0);
ret = register_candev(net); ret = register_candev(net);
if (ret) if (ret)
goto error_probe; goto error_probe;
@@ -1109,13 +1114,13 @@ error_rx_buf:
if (!mcp251x_enable_dma) if (!mcp251x_enable_dma)
kfree(priv->spi_tx_buf); kfree(priv->spi_tx_buf);
error_tx_buf: error_tx_buf:
free_candev(net);
if (mcp251x_enable_dma) if (mcp251x_enable_dma)
dma_free_coherent(&spi->dev, PAGE_SIZE, dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma); priv->spi_tx_buf, priv->spi_tx_dma);
mcp251x_power_enable(priv->power, 0);
error_power:
free_candev(net);
error_alloc: error_alloc:
if (pdata->power_enable)
pdata->power_enable(0);
dev_err(&spi->dev, "probe failed\n"); dev_err(&spi->dev, "probe failed\n");
error_out: error_out:
return ret; return ret;
@@ -1123,12 +1128,10 @@ error_out:
static int mcp251x_can_remove(struct spi_device *spi) static int mcp251x_can_remove(struct spi_device *spi)
{ {
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = spi_get_drvdata(spi); struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net; struct net_device *net = priv->net;
unregister_candev(net); unregister_candev(net);
free_candev(net);
if (mcp251x_enable_dma) { if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE, dma_free_coherent(&spi->dev, PAGE_SIZE,
@@ -1138,8 +1141,9 @@ static int mcp251x_can_remove(struct spi_device *spi)
kfree(priv->spi_rx_buf); kfree(priv->spi_rx_buf);
} }
if (pdata->power_enable) mcp251x_power_enable(priv->power, 0);
pdata->power_enable(0);
free_candev(net);
return 0; return 0;
} }
@@ -1149,7 +1153,6 @@ static int mcp251x_can_remove(struct spi_device *spi)
static int mcp251x_can_suspend(struct device *dev) static int mcp251x_can_suspend(struct device *dev)
{ {
struct spi_device *spi = to_spi_device(dev); struct spi_device *spi = to_spi_device(dev);
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = spi_get_drvdata(spi); struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net; struct net_device *net = priv->net;
@@ -1163,15 +1166,14 @@ static int mcp251x_can_suspend(struct device *dev)
netif_device_detach(net); netif_device_detach(net);
mcp251x_hw_sleep(spi); mcp251x_hw_sleep(spi);
if (pdata->transceiver_enable) mcp251x_power_enable(priv->transceiver, 0);
pdata->transceiver_enable(0);
priv->after_suspend = AFTER_SUSPEND_UP; priv->after_suspend = AFTER_SUSPEND_UP;
} else { } else {
priv->after_suspend = AFTER_SUSPEND_DOWN; priv->after_suspend = AFTER_SUSPEND_DOWN;
} }
if (pdata->power_enable) { if (!IS_ERR(priv->power)) {
pdata->power_enable(0); regulator_disable(priv->power);
priv->after_suspend |= AFTER_SUSPEND_POWER; priv->after_suspend |= AFTER_SUSPEND_POWER;
} }
@@ -1181,16 +1183,14 @@ static int mcp251x_can_suspend(struct device *dev)
static int mcp251x_can_resume(struct device *dev) static int mcp251x_can_resume(struct device *dev)
{ {
struct spi_device *spi = to_spi_device(dev); struct spi_device *spi = to_spi_device(dev);
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = spi_get_drvdata(spi); struct mcp251x_priv *priv = spi_get_drvdata(spi);
if (priv->after_suspend & AFTER_SUSPEND_POWER) { if (priv->after_suspend & AFTER_SUSPEND_POWER) {
pdata->power_enable(1); mcp251x_power_enable(priv->power, 1);
queue_work(priv->wq, &priv->restart_work); queue_work(priv->wq, &priv->restart_work);
} else { } else {
if (priv->after_suspend & AFTER_SUSPEND_UP) { if (priv->after_suspend & AFTER_SUSPEND_UP) {
if (pdata->transceiver_enable) mcp251x_power_enable(priv->transceiver, 1);
pdata->transceiver_enable(1);
queue_work(priv->wq, &priv->restart_work); queue_work(priv->wq, &priv->restart_work);
} else { } else {
priv->after_suspend = 0; priv->after_suspend = 0;

View File

@@ -40,6 +40,7 @@ struct mpc5xxx_can_data {
unsigned int type; unsigned int type;
u32 (*get_clock)(struct platform_device *ofdev, const char *clock_name, u32 (*get_clock)(struct platform_device *ofdev, const char *clock_name,
int *mscan_clksrc); int *mscan_clksrc);
void (*put_clock)(struct platform_device *ofdev);
}; };
#ifdef CONFIG_PPC_MPC52xx #ifdef CONFIG_PPC_MPC52xx
@@ -148,7 +149,10 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
goto exit_put; goto exit_put;
} }
/* Determine the MSCAN device index from the physical address */ /* Determine the MSCAN device index from the peripheral's
* physical address. Register address offsets against the
* IMMR base are: 0x1300, 0x1380, 0x2300, 0x2380
*/
pval = of_get_property(ofdev->dev.of_node, "reg", &plen); pval = of_get_property(ofdev->dev.of_node, "reg", &plen);
BUG_ON(!pval || plen < sizeof(*pval)); BUG_ON(!pval || plen < sizeof(*pval));
clockidx = (*pval & 0x80) ? 1 : 0; clockidx = (*pval & 0x80) ? 1 : 0;
@@ -177,7 +181,7 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
clockdiv = 1; clockdiv = 1;
if (!clock_name || !strcmp(clock_name, "sys")) { if (!clock_name || !strcmp(clock_name, "sys")) {
sys_clk = clk_get(&ofdev->dev, "sys_clk"); sys_clk = devm_clk_get(&ofdev->dev, "sys_clk");
if (IS_ERR(sys_clk)) { if (IS_ERR(sys_clk)) {
dev_err(&ofdev->dev, "couldn't get sys_clk\n"); dev_err(&ofdev->dev, "couldn't get sys_clk\n");
goto exit_unmap; goto exit_unmap;
@@ -200,7 +204,7 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
} }
if (clocksrc < 0) { if (clocksrc < 0) {
ref_clk = clk_get(&ofdev->dev, "ref_clk"); ref_clk = devm_clk_get(&ofdev->dev, "ref_clk");
if (IS_ERR(ref_clk)) { if (IS_ERR(ref_clk)) {
dev_err(&ofdev->dev, "couldn't get ref_clk\n"); dev_err(&ofdev->dev, "couldn't get ref_clk\n");
goto exit_unmap; goto exit_unmap;
@@ -277,6 +281,8 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
dev = alloc_mscandev(); dev = alloc_mscandev();
if (!dev) if (!dev)
goto exit_dispose_irq; goto exit_dispose_irq;
platform_set_drvdata(ofdev, dev);
SET_NETDEV_DEV(dev, &ofdev->dev);
priv = netdev_priv(dev); priv = netdev_priv(dev);
priv->reg_base = base; priv->reg_base = base;
@@ -293,8 +299,6 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
goto exit_free_mscan; goto exit_free_mscan;
} }
SET_NETDEV_DEV(dev, &ofdev->dev);
err = register_mscandev(dev, mscan_clksrc); err = register_mscandev(dev, mscan_clksrc);
if (err) { if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
@@ -302,8 +306,6 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
goto exit_free_mscan; goto exit_free_mscan;
} }
platform_set_drvdata(ofdev, dev);
dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
priv->reg_base, dev->irq, priv->can.clock.freq); priv->reg_base, dev->irq, priv->can.clock.freq);
@@ -321,10 +323,17 @@ exit_unmap_mem:
static int mpc5xxx_can_remove(struct platform_device *ofdev) static int mpc5xxx_can_remove(struct platform_device *ofdev)
{ {
const struct of_device_id *match;
const struct mpc5xxx_can_data *data;
struct net_device *dev = platform_get_drvdata(ofdev); struct net_device *dev = platform_get_drvdata(ofdev);
struct mscan_priv *priv = netdev_priv(dev); struct mscan_priv *priv = netdev_priv(dev);
match = of_match_device(mpc5xxx_can_table, &ofdev->dev);
data = match ? match->data : NULL;
unregister_mscandev(dev); unregister_mscandev(dev);
if (data && data->put_clock)
data->put_clock(ofdev);
iounmap(priv->reg_base); iounmap(priv->reg_base);
irq_dispose_mapping(dev->irq); irq_dispose_mapping(dev->irq);
free_candev(dev); free_candev(dev);

View File

@@ -573,10 +573,21 @@ static int mscan_open(struct net_device *dev)
struct mscan_priv *priv = netdev_priv(dev); struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs __iomem *regs = priv->reg_base; struct mscan_regs __iomem *regs = priv->reg_base;
if (priv->clk_ipg) {
ret = clk_prepare_enable(priv->clk_ipg);
if (ret)
goto exit_retcode;
}
if (priv->clk_can) {
ret = clk_prepare_enable(priv->clk_can);
if (ret)
goto exit_dis_ipg_clock;
}
/* common open */ /* common open */
ret = open_candev(dev); ret = open_candev(dev);
if (ret) if (ret)
return ret; goto exit_dis_can_clock;
napi_enable(&priv->napi); napi_enable(&priv->napi);
@@ -604,6 +615,13 @@ exit_free_irq:
exit_napi_disable: exit_napi_disable:
napi_disable(&priv->napi); napi_disable(&priv->napi);
close_candev(dev); close_candev(dev);
exit_dis_can_clock:
if (priv->clk_can)
clk_disable_unprepare(priv->clk_can);
exit_dis_ipg_clock:
if (priv->clk_ipg)
clk_disable_unprepare(priv->clk_ipg);
exit_retcode:
return ret; return ret;
} }
@@ -621,6 +639,11 @@ static int mscan_close(struct net_device *dev)
close_candev(dev); close_candev(dev);
free_irq(dev->irq, dev); free_irq(dev->irq, dev);
if (priv->clk_can)
clk_disable_unprepare(priv->clk_can);
if (priv->clk_ipg)
clk_disable_unprepare(priv->clk_ipg);
return 0; return 0;
} }

View File

@@ -21,6 +21,7 @@
#ifndef __MSCAN_H__ #ifndef __MSCAN_H__
#define __MSCAN_H__ #define __MSCAN_H__
#include <linux/clk.h>
#include <linux/types.h> #include <linux/types.h>
/* MSCAN control register 0 (CANCTL0) bits */ /* MSCAN control register 0 (CANCTL0) bits */
@@ -283,6 +284,8 @@ struct mscan_priv {
unsigned int type; /* MSCAN type variants */ unsigned int type; /* MSCAN type variants */
unsigned long flags; unsigned long flags;
void __iomem *reg_base; /* ioremap'ed address to registers */ void __iomem *reg_base; /* ioremap'ed address to registers */
struct clk *clk_ipg; /* clock for registers */
struct clk *clk_can; /* clock for bitrates */
u8 shadow_statflg; u8 shadow_statflg;
u8 shadow_canrier; u8 shadow_canrier;
u8 cur_pri; u8 cur_pri;

View File

@@ -148,7 +148,7 @@ config PCMCIA_PCNET
config NE_H8300 config NE_H8300
tristate "NE2000 compatible support for H8/300" tristate "NE2000 compatible support for H8/300"
depends on H8300 depends on H8300H_AKI3068NET || H8300H_H8MAX
---help--- ---help---
Say Y here if you want to use the NE2000 compatible Say Y here if you want to use the NE2000 compatible
controller on the Renesas H8/300 processor. controller on the Renesas H8/300 processor.

View File

@@ -707,7 +707,7 @@ static int ax_init_dev(struct net_device *dev)
#ifdef CONFIG_AX88796_93CX6 #ifdef CONFIG_AX88796_93CX6
if (ax->plat->flags & AXFLG_HAS_93CX6) { if (ax->plat->flags & AXFLG_HAS_93CX6) {
unsigned char mac_addr[6]; unsigned char mac_addr[ETH_ALEN];
struct eeprom_93cx6 eeprom; struct eeprom_93cx6 eeprom;
eeprom.data = ei_local; eeprom.data = ei_local;
@@ -719,7 +719,7 @@ static int ax_init_dev(struct net_device *dev)
(__le16 __force *)mac_addr, (__le16 __force *)mac_addr,
sizeof(mac_addr) >> 1); sizeof(mac_addr) >> 1);
memcpy(dev->dev_addr, mac_addr, 6); memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
} }
#endif #endif
if (ax->plat->wordlength == 2) { if (ax->plat->wordlength == 2) {
@@ -840,7 +840,7 @@ static int ax_probe(struct platform_device *pdev)
ei_local = netdev_priv(dev); ei_local = netdev_priv(dev);
ax = to_ax_dev(dev); ax = to_ax_dev(dev);
ax->plat = pdev->dev.platform_data; ax->plat = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, dev); platform_set_drvdata(pdev, dev);
ei_local->rxcr_base = ax->plat->rcr_val; ei_local->rxcr_base = ax->plat->rcr_val;

View File

@@ -90,6 +90,7 @@ source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig" source "drivers/net/ethernet/microchip/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig" source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX config FEALNX

View File

@@ -42,6 +42,7 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/

View File

@@ -1647,12 +1647,12 @@ static int bfin_mac_probe(struct platform_device *pdev)
setup_mac_addr(ndev->dev_addr); setup_mac_addr(ndev->dev_addr);
if (!pdev->dev.platform_data) { if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n"); dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
rc = -ENODEV; rc = -ENODEV;
goto out_err_probe_mac; goto out_err_probe_mac;
} }
pd = pdev->dev.platform_data; pd = dev_get_platdata(&pdev->dev);
lp->mii_bus = platform_get_drvdata(pd); lp->mii_bus = platform_get_drvdata(pd);
if (!lp->mii_bus) { if (!lp->mii_bus) {
dev_err(&pdev->dev, "Cannot get mii_bus!\n"); dev_err(&pdev->dev, "Cannot get mii_bus!\n");
@@ -1660,7 +1660,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
goto out_err_probe_mac; goto out_err_probe_mac;
} }
lp->mii_bus->priv = ndev; lp->mii_bus->priv = ndev;
mii_bus_data = pd->dev.platform_data; mii_bus_data = dev_get_platdata(&pd->dev);
rc = mii_probe(ndev, mii_bus_data->phy_mode); rc = mii_probe(ndev, mii_bus_data->phy_mode);
if (rc) { if (rc) {

View File

@@ -1464,18 +1464,18 @@ static int greth_of_probe(struct platform_device *ofdev)
} }
/* Allocate TX descriptor ring in coherent memory */ /* Allocate TX descriptor ring in coherent memory */
greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024, greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
&greth->tx_bd_base_phys, &greth->tx_bd_base_phys,
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL);
if (!greth->tx_bd_base) { if (!greth->tx_bd_base) {
err = -ENOMEM; err = -ENOMEM;
goto error3; goto error3;
} }
/* Allocate RX descriptor ring in coherent memory */ /* Allocate RX descriptor ring in coherent memory */
greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024, greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
&greth->rx_bd_base_phys, &greth->rx_bd_base_phys,
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL);
if (!greth->rx_bd_base) { if (!greth->rx_bd_base) {
err = -ENOMEM; err = -ENOMEM;
goto error4; goto error4;

View File

@@ -1131,7 +1131,7 @@ static int au1000_probe(struct platform_device *pdev)
writel(0, aup->enable); writel(0, aup->enable);
aup->mac_enabled = 0; aup->mac_enabled = 0;
pd = pdev->dev.platform_data; pd = dev_get_platdata(&pdev->dev);
if (!pd) { if (!pd) {
dev_info(&pdev->dev, "no platform_data passed," dev_info(&pdev->dev, "no platform_data passed,"
" PHY search on MAC0\n"); " PHY search on MAC0\n");

View File

@@ -1521,7 +1521,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
char *chipname; char *chipname;
struct net_device *dev; struct net_device *dev;
const struct pcnet32_access *a = NULL; const struct pcnet32_access *a = NULL;
u8 promaddr[6]; u8 promaddr[ETH_ALEN];
int ret = -ENODEV; int ret = -ENODEV;
/* reset the chip */ /* reset the chip */
@@ -1665,10 +1665,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
} }
/* read PROM address and compare with CSR address */ /* read PROM address and compare with CSR address */
for (i = 0; i < 6; i++) for (i = 0; i < ETH_ALEN; i++)
promaddr[i] = inb(ioaddr + i); promaddr[i] = inb(ioaddr + i);
if (memcmp(promaddr, dev->dev_addr, 6) || if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) ||
!is_valid_ether_addr(dev->dev_addr)) { !is_valid_ether_addr(dev->dev_addr)) {
if (is_valid_ether_addr(promaddr)) { if (is_valid_ether_addr(promaddr)) {
if (pcnet32_debug & NETIF_MSG_PROBE) { if (pcnet32_debug & NETIF_MSG_PROBE) {

View File

@@ -149,8 +149,6 @@ static void arc_emac_tx_clean(struct net_device *ndev)
struct sk_buff *skb = tx_buff->skb; struct sk_buff *skb = tx_buff->skb;
unsigned int info = le32_to_cpu(txbd->info); unsigned int info = le32_to_cpu(txbd->info);
*txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
if ((info & FOR_EMAC) || !txbd->data) if ((info & FOR_EMAC) || !txbd->data)
break; break;
@@ -180,6 +178,8 @@ static void arc_emac_tx_clean(struct net_device *ndev)
txbd->data = 0; txbd->data = 0;
txbd->info = 0; txbd->info = 0;
*txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
if (netif_queue_stopped(ndev)) if (netif_queue_stopped(ndev))
netif_wake_queue(ndev); netif_wake_queue(ndev);
} }

View File

@@ -130,7 +130,7 @@ config BNX2X_SRIOV
config BGMAC config BGMAC
tristate "BCMA bus GBit core support" tristate "BCMA bus GBit core support"
depends on BCMA_HOST_SOC && HAS_DMA depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
select PHYLIB select PHYLIB
---help--- ---help---
This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.

View File

@@ -948,8 +948,7 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate rx dma ring */ /* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
GFP_KERNEL | __GFP_ZERO);
if (!p) { if (!p) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_freeirq_tx; goto out_freeirq_tx;
@@ -960,8 +959,7 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate tx dma ring */ /* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
GFP_KERNEL | __GFP_ZERO);
if (!p) { if (!p) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_free_rx_ring; goto out_free_rx_ring;
@@ -1747,11 +1745,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (!bcm_enet_shared_base[0]) if (!bcm_enet_shared_base[0])
return -ENODEV; return -ENODEV;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx) if (!res_irq || !res_irq_rx || !res_irq_tx)
return -ENODEV; return -ENODEV;
ret = 0; ret = 0;
@@ -1767,9 +1764,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret) if (ret)
goto out; goto out;
priv->base = devm_request_and_ioremap(&pdev->dev, res_mem); res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (priv->base == NULL) { priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
ret = -ENOMEM; if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out; goto out;
} }
@@ -1800,7 +1798,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->rx_ring_size = BCMENET_DEF_RX_DESC; priv->rx_ring_size = BCMENET_DEF_RX_DESC;
priv->tx_ring_size = BCMENET_DEF_TX_DESC; priv->tx_ring_size = BCMENET_DEF_TX_DESC;
pd = pdev->dev.platform_data; pd = dev_get_platdata(&pdev->dev);
if (pd) { if (pd) {
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
priv->has_phy = pd->has_phy; priv->has_phy = pd->has_phy;
@@ -1964,7 +1962,7 @@ static int bcm_enet_remove(struct platform_device *pdev)
} else { } else {
struct bcm63xx_enet_platform_data *pd; struct bcm63xx_enet_platform_data *pd;
pd = pdev->dev.platform_data; pd = dev_get_platdata(&pdev->dev);
if (pd && pd->mii_config) if (pd && pd->mii_config)
pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
bcm_enet_mdio_write_mii); bcm_enet_mdio_write_mii);
@@ -2742,7 +2740,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
priv->tx_ring_size = BCMENET_DEF_TX_DESC; priv->tx_ring_size = BCMENET_DEF_TX_DESC;
priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
pd = pdev->dev.platform_data; pd = dev_get_platdata(&pdev->dev);
if (pd) { if (pd) {
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
memcpy(priv->used_ports, pd->used_ports, memcpy(priv->used_ports, pd->used_ports,
@@ -2836,7 +2834,6 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res)); release_mem_region(res->start, resource_size(res));
platform_set_drvdata(pdev, NULL);
free_netdev(dev); free_netdev(dev);
return 0; return 0;
} }

View File

@@ -1,6 +1,6 @@
/* bnx2.c: Broadcom NX2 network driver. /* bnx2.c: Broadcom NX2 network driver.
* *
* Copyright (c) 2004-2011 Broadcom Corporation * Copyright (c) 2004-2013 Broadcom Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
@@ -58,8 +58,8 @@
#include "bnx2_fw.h" #include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2" #define DRV_MODULE_NAME "bnx2"
#define DRV_MODULE_VERSION "2.2.3" #define DRV_MODULE_VERSION "2.2.4"
#define DRV_MODULE_RELDATE "June 27, 2012" #define DRV_MODULE_RELDATE "Aug 05, 2013"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -853,9 +853,8 @@ bnx2_alloc_mem(struct bnx2 *bp)
bp->status_stats_size = status_blk_size + bp->status_stats_size = status_blk_size +
sizeof(struct statistics_block); sizeof(struct statistics_block);
status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
&bp->status_blk_mapping, &bp->status_blk_mapping, GFP_KERNEL);
GFP_KERNEL | __GFP_ZERO);
if (status_blk == NULL) if (status_blk == NULL)
goto alloc_mem_err; goto alloc_mem_err;
@@ -3908,24 +3907,95 @@ init_cpu_err:
return rc; return rc;
} }
static void
bnx2_setup_wol(struct bnx2 *bp)
{
int i;
u32 val, wol_msg;
if (bp->wol) {
u32 advertising;
u8 autoneg;
autoneg = bp->autoneg;
advertising = bp->advertising;
if (bp->phy_port == PORT_TP) {
bp->autoneg = AUTONEG_SPEED;
bp->advertising = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_Autoneg;
}
spin_lock_bh(&bp->phy_lock);
bnx2_setup_phy(bp, bp->phy_port);
spin_unlock_bh(&bp->phy_lock);
bp->autoneg = autoneg;
bp->advertising = advertising;
bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
val = BNX2_RD(bp, BNX2_EMAC_MODE);
/* Enable port mode. */
val &= ~BNX2_EMAC_MODE_PORT;
val |= BNX2_EMAC_MODE_MPKT_RCVD |
BNX2_EMAC_MODE_ACPI_RCVD |
BNX2_EMAC_MODE_MPKT;
if (bp->phy_port == PORT_TP) {
val |= BNX2_EMAC_MODE_PORT_MII;
} else {
val |= BNX2_EMAC_MODE_PORT_GMII;
if (bp->line_speed == SPEED_2500)
val |= BNX2_EMAC_MODE_25G_MODE;
}
BNX2_WR(bp, BNX2_EMAC_MODE, val);
/* receive all multicast */
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
0xffffffff);
}
BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
/* Need to enable EMAC and RPM for WOL. */
BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
val = BNX2_RD(bp, BNX2_RPM_CONFIG);
val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
BNX2_WR(bp, BNX2_RPM_CONFIG, val);
wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
} else {
wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
}
if (!(bp->flags & BNX2_FLAG_NO_WOL))
bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
}
static int static int
bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
{ {
u16 pmcsr;
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
switch (state) { switch (state) {
case PCI_D0: { case PCI_D0: {
u32 val; u32 val;
pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pci_enable_wake(bp->pdev, PCI_D0, false);
(pmcsr & ~PCI_PM_CTRL_STATE_MASK) | pci_set_power_state(bp->pdev, PCI_D0);
PCI_PM_CTRL_PME_STATUS);
if (pmcsr & PCI_PM_CTRL_STATE_MASK)
/* delay required during transition out of D3hot */
msleep(20);
val = BNX2_RD(bp, BNX2_EMAC_MODE); val = BNX2_RD(bp, BNX2_EMAC_MODE);
val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
@@ -3938,106 +4008,20 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
break; break;
} }
case PCI_D3hot: { case PCI_D3hot: {
int i; bnx2_setup_wol(bp);
u32 val, wol_msg; pci_wake_from_d3(bp->pdev, bp->wol);
if (bp->wol) {
u32 advertising;
u8 autoneg;
autoneg = bp->autoneg;
advertising = bp->advertising;
if (bp->phy_port == PORT_TP) {
bp->autoneg = AUTONEG_SPEED;
bp->advertising = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_Autoneg;
}
spin_lock_bh(&bp->phy_lock);
bnx2_setup_phy(bp, bp->phy_port);
spin_unlock_bh(&bp->phy_lock);
bp->autoneg = autoneg;
bp->advertising = advertising;
bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
val = BNX2_RD(bp, BNX2_EMAC_MODE);
/* Enable port mode. */
val &= ~BNX2_EMAC_MODE_PORT;
val |= BNX2_EMAC_MODE_MPKT_RCVD |
BNX2_EMAC_MODE_ACPI_RCVD |
BNX2_EMAC_MODE_MPKT;
if (bp->phy_port == PORT_TP)
val |= BNX2_EMAC_MODE_PORT_MII;
else {
val |= BNX2_EMAC_MODE_PORT_GMII;
if (bp->line_speed == SPEED_2500)
val |= BNX2_EMAC_MODE_25G_MODE;
}
BNX2_WR(bp, BNX2_EMAC_MODE, val);
/* receive all multicast */
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
0xffffffff);
}
BNX2_WR(bp, BNX2_EMAC_RX_MODE,
BNX2_EMAC_RX_MODE_SORT_MODE);
val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
BNX2_RPM_SORT_USER0_MC_EN;
BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
BNX2_RPM_SORT_USER0_ENA);
/* Need to enable EMAC and RPM for WOL. */
BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
val = BNX2_RD(bp, BNX2_RPM_CONFIG);
val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
BNX2_WR(bp, BNX2_RPM_CONFIG, val);
wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
}
else {
wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
}
if (!(bp->flags & BNX2_FLAG_NO_WOL))
bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
1, 0);
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
(BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) { (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
if (bp->wol) if (bp->wol)
pmcsr |= 3; pci_set_power_state(bp->pdev, PCI_D3hot);
} else {
pci_set_power_state(bp->pdev, PCI_D3hot);
} }
else {
pmcsr |= 3;
}
if (bp->wol) {
pmcsr |= PCI_PM_CTRL_PME_ENABLE;
}
pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
pmcsr);
/* No more memory access after this point until /* No more memory access after this point until
* device is brought back to D0. * device is brought back to D0.
*/ */
udelay(50);
break; break;
} }
default: default:
@@ -6317,7 +6301,6 @@ bnx2_open(struct net_device *dev)
netif_carrier_off(dev); netif_carrier_off(dev);
bnx2_set_power_state(bp, PCI_D0);
bnx2_disable_int(bp); bnx2_disable_int(bp);
rc = bnx2_setup_int_mode(bp, disable_msi); rc = bnx2_setup_int_mode(bp, disable_msi);
@@ -6724,7 +6707,6 @@ bnx2_close(struct net_device *dev)
bnx2_del_napi(bp); bnx2_del_napi(bp);
bp->link_up = 0; bp->link_up = 0;
netif_carrier_off(bp->dev); netif_carrier_off(bp->dev);
bnx2_set_power_state(bp, PCI_D3hot);
return 0; return 0;
} }
@@ -7081,6 +7063,9 @@ bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
else { else {
bp->wol = 0; bp->wol = 0;
} }
device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
return 0; return 0;
} }
@@ -7156,9 +7141,6 @@ bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
struct bnx2 *bp = netdev_priv(dev); struct bnx2 *bp = netdev_priv(dev);
int rc; int rc;
if (!netif_running(dev))
return -EAGAIN;
/* parameters already validated in ethtool_get_eeprom */ /* parameters already validated in ethtool_get_eeprom */
rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7173,9 +7155,6 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
struct bnx2 *bp = netdev_priv(dev); struct bnx2 *bp = netdev_priv(dev);
int rc; int rc;
if (!netif_running(dev))
return -EAGAIN;
/* parameters already validated in ethtool_set_eeprom */ /* parameters already validated in ethtool_set_eeprom */
rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7535,8 +7514,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
{ {
struct bnx2 *bp = netdev_priv(dev); struct bnx2 *bp = netdev_priv(dev);
bnx2_set_power_state(bp, PCI_D0);
memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS); memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
if (etest->flags & ETH_TEST_FL_OFFLINE) { if (etest->flags & ETH_TEST_FL_OFFLINE) {
int i; int i;
@@ -7585,8 +7562,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
etest->flags |= ETH_TEST_FL_FAILED; etest->flags |= ETH_TEST_FL_FAILED;
} }
if (!netif_running(bp->dev))
bnx2_set_power_state(bp, PCI_D3hot);
} }
static void static void
@@ -7658,8 +7633,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
switch (state) { switch (state) {
case ETHTOOL_ID_ACTIVE: case ETHTOOL_ID_ACTIVE:
bnx2_set_power_state(bp, PCI_D0);
bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG); bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
return 1; /* cycle on/off once per second */ return 1; /* cycle on/off once per second */
@@ -7680,9 +7653,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
case ETHTOOL_ID_INACTIVE: case ETHTOOL_ID_INACTIVE:
BNX2_WR(bp, BNX2_EMAC_LED, 0); BNX2_WR(bp, BNX2_EMAC_LED, 0);
BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save); BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
if (!netif_running(dev))
bnx2_set_power_state(bp, PCI_D3hot);
break; break;
} }
@@ -8130,8 +8100,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
goto err_out_release; goto err_out_release;
} }
bnx2_set_power_state(bp, PCI_D0);
/* Configure byte swap and enable write to the reg_window registers. /* Configure byte swap and enable write to the reg_window registers.
* Rely on CPU to do target byte swapping on big endian systems * Rely on CPU to do target byte swapping on big endian systems
* The chip's target access swapping will not swap all accesses * The chip's target access swapping will not swap all accesses
@@ -8170,13 +8138,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) { BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) if (pdev->msix_cap)
bp->flags |= BNX2_FLAG_MSIX_CAP; bp->flags |= BNX2_FLAG_MSIX_CAP;
} }
if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 && if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) { BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) if (pdev->msi_cap)
bp->flags |= BNX2_FLAG_MSI_CAP; bp->flags |= BNX2_FLAG_MSI_CAP;
} }
@@ -8369,6 +8337,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->wol = 0; bp->wol = 0;
} }
if (bp->flags & BNX2_FLAG_NO_WOL)
device_set_wakeup_capable(&bp->pdev->dev, false);
else
device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
bp->tx_quick_cons_trip_int = bp->tx_quick_cons_trip_int =
bp->tx_quick_cons_trip; bp->tx_quick_cons_trip;
@@ -8609,46 +8582,52 @@ bnx2_remove_one(struct pci_dev *pdev)
} }
static int static int
bnx2_suspend(struct pci_dev *pdev, pm_message_t state) bnx2_suspend(struct device *device)
{ {
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev); struct bnx2 *bp = netdev_priv(dev);
/* PCI register 4 needs to be saved whether netif_running() or not. if (netif_running(dev)) {
* MSI address and data need to be saved if using MSI and cancel_work_sync(&bp->reset_task);
* netif_running(). bnx2_netif_stop(bp, true);
*/ netif_device_detach(dev);
pci_save_state(pdev); del_timer_sync(&bp->timer);
if (!netif_running(dev)) bnx2_shutdown_chip(bp);
return 0; __bnx2_free_irq(bp);
bnx2_free_skbs(bp);
cancel_work_sync(&bp->reset_task); }
bnx2_netif_stop(bp, true); bnx2_setup_wol(bp);
netif_device_detach(dev);
del_timer_sync(&bp->timer);
bnx2_shutdown_chip(bp);
bnx2_free_skbs(bp);
bnx2_set_power_state(bp, pci_choose_state(pdev, state));
return 0; return 0;
} }
static int static int
bnx2_resume(struct pci_dev *pdev) bnx2_resume(struct device *device)
{ {
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev); struct bnx2 *bp = netdev_priv(dev);
pci_restore_state(pdev);
if (!netif_running(dev)) if (!netif_running(dev))
return 0; return 0;
bnx2_set_power_state(bp, PCI_D0); bnx2_set_power_state(bp, PCI_D0);
netif_device_attach(dev); netif_device_attach(dev);
bnx2_request_irq(bp);
bnx2_init_nic(bp, 1); bnx2_init_nic(bp, 1);
bnx2_netif_start(bp, true); bnx2_netif_start(bp, true);
return 0; return 0;
} }
#ifdef CONFIG_PM_SLEEP
static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
#define BNX2_PM_OPS (&bnx2_pm_ops)
#else
#define BNX2_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
/** /**
* bnx2_io_error_detected - called when PCI error is detected * bnx2_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device * @pdev: Pointer to PCI device
@@ -8694,24 +8673,28 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
{ {
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev); struct bnx2 *bp = netdev_priv(dev);
pci_ers_result_t result; pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
int err; int err = 0;
rtnl_lock(); rtnl_lock();
if (pci_enable_device(pdev)) { if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n"); "Cannot re-enable PCI device after reset\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else { } else {
pci_set_master(pdev); pci_set_master(pdev);
pci_restore_state(pdev); pci_restore_state(pdev);
pci_save_state(pdev); pci_save_state(pdev);
if (netif_running(dev)) { if (netif_running(dev))
bnx2_set_power_state(bp, PCI_D0); err = bnx2_init_nic(bp, 1);
bnx2_init_nic(bp, 1);
} if (!err)
result = PCI_ERS_RESULT_RECOVERED; result = PCI_ERS_RESULT_RECOVERED;
}
if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
bnx2_napi_enable(bp);
dev_close(dev);
} }
rtnl_unlock(); rtnl_unlock();
@@ -8748,6 +8731,28 @@ static void bnx2_io_resume(struct pci_dev *pdev)
rtnl_unlock(); rtnl_unlock();
} }
static void bnx2_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp;
if (!dev)
return;
bp = netdev_priv(dev);
if (!bp)
return;
rtnl_lock();
if (netif_running(dev))
dev_close(bp->dev);
if (system_state == SYSTEM_POWER_OFF)
bnx2_set_power_state(bp, PCI_D3hot);
rtnl_unlock();
}
static const struct pci_error_handlers bnx2_err_handler = { static const struct pci_error_handlers bnx2_err_handler = {
.error_detected = bnx2_io_error_detected, .error_detected = bnx2_io_error_detected,
.slot_reset = bnx2_io_slot_reset, .slot_reset = bnx2_io_slot_reset,
@@ -8759,9 +8764,9 @@ static struct pci_driver bnx2_pci_driver = {
.id_table = bnx2_pci_tbl, .id_table = bnx2_pci_tbl,
.probe = bnx2_init_one, .probe = bnx2_init_one,
.remove = bnx2_remove_one, .remove = bnx2_remove_one,
.suspend = bnx2_suspend, .driver.pm = BNX2_PM_OPS,
.resume = bnx2_resume,
.err_handler = &bnx2_err_handler, .err_handler = &bnx2_err_handler,
.shutdown = bnx2_shutdown,
}; };
module_pci_driver(bnx2_pci_driver); module_pci_driver(bnx2_pci_driver);

View File

@@ -1,6 +1,6 @@
/* bnx2.h: Broadcom NX2 network driver. /* bnx2.h: Broadcom NX2 network driver.
* *
* Copyright (c) 2004-2011 Broadcom Corporation * Copyright (c) 2004-2013 Broadcom Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by

View File

@@ -825,15 +825,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
#define BNX2X_DB_SHIFT 7 /* 128 bytes*/ #define BNX2X_DB_SHIFT 3 /* 8 bytes*/
#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
#error "Min DB doorbell stride is 8" #error "Min DB doorbell stride is 8"
#endif #endif
#define DPM_TRIGER_TYPE 0x40
#define DOORBELL(bp, cid, val) \ #define DOORBELL(bp, cid, val) \
do { \ do { \
writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
DPM_TRIGER_TYPE); \
} while (0) } while (0)
/* TX CSUM helpers */ /* TX CSUM helpers */
@@ -1100,13 +1098,27 @@ struct bnx2x_port {
extern struct workqueue_struct *bnx2x_wq; extern struct workqueue_struct *bnx2x_wq;
#define BNX2X_MAX_NUM_OF_VFS 64 #define BNX2X_MAX_NUM_OF_VFS 64
#define BNX2X_VF_CID_WND 0 #define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) #define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
#define BNX2X_CLIENTS_PER_VF 1
#define BNX2X_FIRST_VF_CID 256 /* We need to reserve doorbell addresses for all VF and queue combinations */
#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF) #define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
/* The doorbell is configured to have the same number of CIDs for PFs and for
* VFs. For this reason the PF CID zone is as large as the VF zone.
*/
#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS
#define BNX2X_MAX_NUM_VF_QUEUES 64
#define BNX2X_VF_ID_INVALID 0xFF #define BNX2X_VF_ID_INVALID 0xFF
/* the number of VF CIDS multiplied by the amount of bytes reserved for each
* cid must not exceed the size of the VF doorbell
*/
#define BNX2X_VF_BAR_SIZE 512
#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
#error "VF doorbell bar size is 512"
#endif
/* /*
* The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
* control by the number of fast-path status blocks supported by the * control by the number of fast-path status blocks supported by the
@@ -1331,7 +1343,7 @@ enum {
BNX2X_SP_RTNL_ENABLE_SRIOV, BNX2X_SP_RTNL_ENABLE_SRIOV,
BNX2X_SP_RTNL_VFPF_MCAST, BNX2X_SP_RTNL_VFPF_MCAST,
BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, BNX2X_SP_RTNL_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_SP_RTNL_TX_STOP, BNX2X_SP_RTNL_TX_STOP,
BNX2X_SP_RTNL_TX_RESUME, BNX2X_SP_RTNL_TX_RESUME,
@@ -1650,10 +1662,10 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping; dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz; int fw_stats_data_sz;
/* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
* context size we need 8 ILT entries. * context size we need 8 ILT entries.
*/ */
#define ILT_MAX_L2_LINES 8 #define ILT_MAX_L2_LINES 32
struct hw_context context[ILT_MAX_L2_LINES]; struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt; struct bnx2x_ilt *ilt;
@@ -1869,7 +1881,7 @@ extern int num_queues;
#define FUNC_FLG_TPA 0x0008 #define FUNC_FLG_TPA 0x0008
#define FUNC_FLG_SPQ 0x0010 #define FUNC_FLG_SPQ 0x0010
#define FUNC_FLG_LEADING 0x0020 /* PF only */ #define FUNC_FLG_LEADING 0x0020 /* PF only */
#define FUNC_FLG_LEADING_STATS 0x0040
struct bnx2x_func_init_params { struct bnx2x_func_init_params {
/* dma */ /* dma */
dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
@@ -2069,9 +2081,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf); bool is_pf);
#define BNX2X_ILT_ZALLOC(x, y, size) \ #define BNX2X_ILT_ZALLOC(x, y, size) \
x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
GFP_KERNEL | __GFP_ZERO)
#define BNX2X_ILT_FREE(x, y, size) \ #define BNX2X_ILT_FREE(x, y, size) \
do { \ do { \

View File

@@ -1948,7 +1948,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
} }
} }
static int bnx2x_init_rss_pf(struct bnx2x *bp) static int bnx2x_init_rss(struct bnx2x *bp)
{ {
int i; int i;
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
@@ -1972,8 +1972,8 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
} }
int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
bool config_hash) bool config_hash, bool enable)
{ {
struct bnx2x_config_rss_params params = {NULL}; struct bnx2x_config_rss_params params = {NULL};
@@ -1988,17 +1988,21 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags); __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags); if (enable) {
__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
/* RSS configuration */ /* RSS configuration */
__set_bit(BNX2X_RSS_IPV4, &params.rss_flags); __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags); __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV6, &params.rss_flags); __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags); __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
if (rss_obj->udp_rss_v4) if (rss_obj->udp_rss_v4)
__set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags); __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
if (rss_obj->udp_rss_v6) if (rss_obj->udp_rss_v6)
__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags); __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
} else {
__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
}
/* Hash bits */ /* Hash bits */
params.rss_result_mask = MULTI_MASK; params.rss_result_mask = MULTI_MASK;
@@ -2007,11 +2011,14 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (config_hash) { if (config_hash) {
/* RSS keys */ /* RSS keys */
prandom_bytes(params.rss_key, sizeof(params.rss_key)); prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags); __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
} }
return bnx2x_config_rss(bp, &params); if (IS_PF(bp))
return bnx2x_config_rss(bp, &params);
else
return bnx2x_vfpf_config_rss(bp, &params);
} }
static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
@@ -2066,7 +2073,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
rparam.mcast_obj = &bp->mcast_obj; rparam.mcast_obj = &bp->mcast_obj;
__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
/* Add a DEL command... */ /* Add a DEL command... - Since we're doing a driver cleanup only,
* we take a lock surrounding both the initial send and the CONTs,
* as we don't want a true completion to disrupt us in the middle.
*/
netif_addr_lock_bh(bp->dev);
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
if (rc < 0) if (rc < 0)
BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
@@ -2078,11 +2089,13 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
if (rc < 0) { if (rc < 0) {
BNX2X_ERR("Failed to clean multi-cast object: %d\n", BNX2X_ERR("Failed to clean multi-cast object: %d\n",
rc); rc);
netif_addr_unlock_bh(bp->dev);
return; return;
} }
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
} }
netif_addr_unlock_bh(bp->dev);
} }
#ifndef BNX2X_STOP_ON_ERROR #ifndef BNX2X_STOP_ON_ERROR
@@ -2438,9 +2451,7 @@ int bnx2x_load_cnic(struct bnx2x *bp)
} }
/* Initialize Rx filter. */ /* Initialize Rx filter. */
netif_addr_lock_bh(bp->dev); bnx2x_set_rx_mode_inner(bp);
bnx2x_set_rx_mode(bp->dev);
netif_addr_unlock_bh(bp->dev);
/* re-read iscsi info */ /* re-read iscsi info */
bnx2x_get_iscsi_info(bp); bnx2x_get_iscsi_info(bp);
@@ -2647,38 +2658,32 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* initialize FW coalescing state machines in RAM */ /* initialize FW coalescing state machines in RAM */
bnx2x_update_coalesce(bp); bnx2x_update_coalesce(bp);
}
/* setup the leading queue */ /* setup the leading queue */
rc = bnx2x_setup_leading(bp); rc = bnx2x_setup_leading(bp);
if (rc) {
BNX2X_ERR("Setup leading failed!\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
/* set up the rest of the queues */
for_each_nondefault_eth_queue(bp, i) {
if (IS_PF(bp))
rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
else /* VF */
rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
if (rc) { if (rc) {
BNX2X_ERR("Setup leading failed!\n"); BNX2X_ERR("Queue %d setup failed\n", i);
LOAD_ERROR_EXIT(bp, load_error3); LOAD_ERROR_EXIT(bp, load_error3);
} }
}
/* set up the rest of the queues */ /* setup rss */
for_each_nondefault_eth_queue(bp, i) { rc = bnx2x_init_rss(bp);
rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); if (rc) {
if (rc) { BNX2X_ERR("PF RSS init failed\n");
BNX2X_ERR("Queue setup failed\n"); LOAD_ERROR_EXIT(bp, load_error3);
LOAD_ERROR_EXIT(bp, load_error3);
}
}
/* setup rss */
rc = bnx2x_init_rss_pf(bp);
if (rc) {
BNX2X_ERR("PF RSS init failed\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
} else { /* vf */
for_each_eth_queue(bp, i) {
rc = bnx2x_vfpf_setup_q(bp, i);
if (rc) {
BNX2X_ERR("Queue setup failed\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
}
} }
/* Now when Clients are configured we are ready to work */ /* Now when Clients are configured we are ready to work */
@@ -2710,9 +2715,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Start fast path */ /* Start fast path */
/* Initialize Rx filter. */ /* Initialize Rx filter. */
netif_addr_lock_bh(bp->dev); bnx2x_set_rx_mode_inner(bp);
bnx2x_set_rx_mode(bp->dev);
netif_addr_unlock_bh(bp->dev);
/* Start the Tx */ /* Start the Tx */
switch (load_mode) { switch (load_mode) {
@@ -4789,6 +4792,11 @@ int bnx2x_resume(struct pci_dev *pdev)
void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
u32 cid) u32 cid)
{ {
if (!cxt) {
BNX2X_ERR("bad context pointer %p\n", cxt);
return;
}
/* ustorm cxt validation */ /* ustorm cxt validation */
cxt->ustorm_ag_context.cdu_usage = cxt->ustorm_ag_context.cdu_usage =
CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),

View File

@@ -51,8 +51,7 @@ extern int int_mode;
#define BNX2X_PCI_ALLOC(x, y, size) \ #define BNX2X_PCI_ALLOC(x, y, size) \
do { \ do { \
x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
GFP_KERNEL | __GFP_ZERO); \
if (x == NULL) \ if (x == NULL) \
goto alloc_mem_err; \ goto alloc_mem_err; \
DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
@@ -106,9 +105,10 @@ void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
* @rss_obj: RSS object to use * @rss_obj: RSS object to use
* @ind_table: indirection table to configure * @ind_table: indirection table to configure
* @config_hash: re-configure RSS hash keys configuration * @config_hash: re-configure RSS hash keys configuration
* @enable: enabled or disabled configuration
*/ */
int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
bool config_hash); bool config_hash, bool enable);
/** /**
* bnx2x__init_func_obj - init function object * bnx2x__init_func_obj - init function object
@@ -418,6 +418,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
* netif_addr_lock_bh() * netif_addr_lock_bh()
*/ */
void bnx2x_set_rx_mode(struct net_device *dev); void bnx2x_set_rx_mode(struct net_device *dev);
void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
/** /**
* bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
@@ -980,7 +981,7 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
{ {
return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash); return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true);
} }
/** /**

View File

@@ -3281,14 +3281,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL, DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n", "rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled"); udp_rss_requested ? "enabled" : "disabled");
return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
} else if ((info->flow_type == UDP_V6_FLOW) && } else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL, DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n", "rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled"); udp_rss_requested ? "enabled" : "disabled");
return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
} }
return 0; return 0;

View File

@@ -6501,12 +6501,13 @@ static int bnx2x_link_initialize(struct link_params *params,
struct bnx2x_phy *phy = &params->phy[INT_PHY]; struct bnx2x_phy *phy = &params->phy[INT_PHY];
if (vars->line_speed == SPEED_AUTO_NEG && if (vars->line_speed == SPEED_AUTO_NEG &&
(CHIP_IS_E1x(bp) || (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp))) CHIP_IS_E2(bp))) {
bnx2x_set_parallel_detection(phy, params); bnx2x_set_parallel_detection(phy, params);
if (params->phy[INT_PHY].config_init) if (params->phy[INT_PHY].config_init)
params->phy[INT_PHY].config_init(phy, params->phy[INT_PHY].config_init(phy,
params, params,
vars); vars);
}
} }
/* Init external phy*/ /* Init external phy*/

View File

@@ -6893,7 +6893,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
if (!CHIP_REV_IS_SLOW(bp)) if (!CHIP_REV_IS_SLOW(bp))
/* enable hw interrupt from doorbell Q */ /* enable hw interrupt from doorbell Q */
REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
@@ -8063,7 +8063,10 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
int bnx2x_setup_leading(struct bnx2x *bp) int bnx2x_setup_leading(struct bnx2x *bp)
{ {
return bnx2x_setup_queue(bp, &bp->fp[0], 1); if (IS_PF(bp))
return bnx2x_setup_queue(bp, &bp->fp[0], true);
else /* VF */
return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
} }
/** /**
@@ -8077,8 +8080,10 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
{ {
int rc = 0; int rc = 0;
if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
return -EINVAL; return -EINVAL;
}
switch (int_mode) { switch (int_mode) {
case BNX2X_INT_MODE_MSIX: case BNX2X_INT_MODE_MSIX:
@@ -9647,11 +9652,9 @@ sp_rtnl_not_reset:
} }
} }
if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
&bp->sp_rtnl_state)) { DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
DP(BNX2X_MSG_SP, bnx2x_set_rx_mode_inner(bp);
"sending set storm rx mode vf pf channel message from rtnl sp-task\n");
bnx2x_vfpf_storm_rx_mode(bp);
} }
if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
@@ -11649,9 +11652,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* second status block for the L2 queue, and a third status block for * second status block for the L2 queue, and a third status block for
* CNIC if supported. * CNIC if supported.
*/ */
if (CNIC_SUPPORT(bp)) if (IS_VF(bp))
bp->min_msix_vec_cnt = 1;
else if (CNIC_SUPPORT(bp))
bp->min_msix_vec_cnt = 3; bp->min_msix_vec_cnt = 3;
else else /* PF w/o cnic */
bp->min_msix_vec_cnt = 2; bp->min_msix_vec_cnt = 2;
BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
@@ -11868,34 +11873,48 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
void bnx2x_set_rx_mode(struct net_device *dev) void bnx2x_set_rx_mode(struct net_device *dev)
{ {
struct bnx2x *bp = netdev_priv(dev); struct bnx2x *bp = netdev_priv(dev);
u32 rx_mode = BNX2X_RX_MODE_NORMAL;
if (bp->state != BNX2X_STATE_OPEN) { if (bp->state != BNX2X_STATE_OPEN) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
return; return;
} else {
/* Schedule an SP task to handle rest of change */
DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
smp_mb__before_clear_bit();
set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
} }
}
void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
{
u32 rx_mode = BNX2X_RX_MODE_NORMAL;
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
if (dev->flags & IFF_PROMISC) netif_addr_lock_bh(bp->dev);
if (bp->dev->flags & IFF_PROMISC) {
rx_mode = BNX2X_RX_MODE_PROMISC; rx_mode = BNX2X_RX_MODE_PROMISC;
else if ((dev->flags & IFF_ALLMULTI) || } else if ((bp->dev->flags & IFF_ALLMULTI) ||
((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
CHIP_IS_E1(bp))) CHIP_IS_E1(bp))) {
rx_mode = BNX2X_RX_MODE_ALLMULTI; rx_mode = BNX2X_RX_MODE_ALLMULTI;
else { } else {
if (IS_PF(bp)) { if (IS_PF(bp)) {
/* some multicasts */ /* some multicasts */
if (bnx2x_set_mc_list(bp) < 0) if (bnx2x_set_mc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_ALLMULTI; rx_mode = BNX2X_RX_MODE_ALLMULTI;
/* release bh lock, as bnx2x_set_uc_list might sleep */
netif_addr_unlock_bh(bp->dev);
if (bnx2x_set_uc_list(bp) < 0) if (bnx2x_set_uc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_PROMISC; rx_mode = BNX2X_RX_MODE_PROMISC;
netif_addr_lock_bh(bp->dev);
} else { } else {
/* configuring mcast to a vf involves sleeping (when we /* configuring mcast to a vf involves sleeping (when we
* wait for the pf's response). Since this function is * wait for the pf's response).
* called from non sleepable context we must schedule
* a work item for this purpose
*/ */
smp_mb__before_clear_bit(); smp_mb__before_clear_bit();
set_bit(BNX2X_SP_RTNL_VFPF_MCAST, set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
@@ -11913,22 +11932,20 @@ void bnx2x_set_rx_mode(struct net_device *dev)
/* Schedule the rx_mode command */ /* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
netif_addr_unlock_bh(bp->dev);
return; return;
} }
if (IS_PF(bp)) { if (IS_PF(bp)) {
bnx2x_set_storm_rx_mode(bp); bnx2x_set_storm_rx_mode(bp);
netif_addr_unlock_bh(bp->dev);
} else { } else {
/* configuring rx mode to storms in a vf involves sleeping (when /* VF will need to request the PF to make this change, and so
* we wait for the pf's response). Since this function is * the VF needs to release the bottom-half lock prior to the
* called from non sleepable context we must schedule * request (as it will likely require sleep on the VF side)
* a work item for this purpose
*/ */
smp_mb__before_clear_bit(); netif_addr_unlock_bh(bp->dev);
set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, bnx2x_vfpf_storm_rx_mode(bp);
&bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
} }
} }
@@ -12550,19 +12567,16 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
* @dev: pci device * @dev: pci device
* *
*/ */
static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
int cnic_cnt, bool is_vf)
{ {
int pos, index; int index;
u16 control = 0; u16 control = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
/* /*
* If MSI-X is not supported - return number of SBs needed to support * If MSI-X is not supported - return number of SBs needed to support
* one fast path queue: one FP queue + SB for CNIC * one fast path queue: one FP queue + SB for CNIC
*/ */
if (!pos) { if (!pdev->msix_cap) {
dev_info(&pdev->dev, "no msix capability found\n"); dev_info(&pdev->dev, "no msix capability found\n");
return 1 + cnic_cnt; return 1 + cnic_cnt;
} }
@@ -12575,11 +12589,11 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
* without the default SB. * without the default SB.
* For VFs there is no default SB, then we return (index+1). * For VFs there is no default SB, then we return (index+1).
*/ */
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
index = control & PCI_MSIX_FLAGS_QSIZE; index = control & PCI_MSIX_FLAGS_QSIZE;
return is_vf ? index + 1 : index; return index;
} }
static int set_max_cos_est(int chip_id) static int set_max_cos_est(int chip_id)
@@ -12659,10 +12673,13 @@ static int bnx2x_init_one(struct pci_dev *pdev,
is_vf = set_is_vf(ent->driver_data); is_vf = set_is_vf(ent->driver_data);
cnic_cnt = is_vf ? 0 : 1; cnic_cnt = is_vf ? 0 : 1;
max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf); max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
/* add another SB for VF as it has no default SB */
max_non_def_sbs += is_vf ? 1 : 0;
/* Maximum number of RSS queues: one IGU SB goes to CNIC */ /* Maximum number of RSS queues: one IGU SB goes to CNIC */
rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt; rss_count = max_non_def_sbs - cnic_cnt;
if (rss_count < 1) if (rss_count < 1)
return -EINVAL; return -EINVAL;

View File

@@ -6335,6 +6335,7 @@
#define PCI_ID_VAL2 0x438 #define PCI_ID_VAL2 0x438
#define PCI_ID_VAL3 0x43c #define PCI_ID_VAL3 0x43c
#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
#define GRC_CONFIG_REG_PF_INIT_VF 0x624 #define GRC_CONFIG_REG_PF_INIT_VF 0x624
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf #define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
/* First VF_NUM for PF is encoded in this register. /* First VF_NUM for PF is encoded in this register.

View File

@@ -159,16 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending(
} }
} }
static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
struct bnx2x_exe_queue_obj *o)
{
spin_lock_bh(&o->lock);
__bnx2x_exe_queue_reset_pending(bp, o);
spin_unlock_bh(&o->lock);
}
/** /**
* bnx2x_exe_queue_step - execute one execution chunk atomically * bnx2x_exe_queue_step - execute one execution chunk atomically
* *
@@ -176,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
* @o: queue * @o: queue
* @ramrod_flags: flags * @ramrod_flags: flags
* *
* (Atomicity is ensured using the exe_queue->lock). * (Should be called while holding the exe_queue->lock).
*/ */
static inline int bnx2x_exe_queue_step(struct bnx2x *bp, static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
struct bnx2x_exe_queue_obj *o, struct bnx2x_exe_queue_obj *o,
@@ -187,8 +177,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
memset(&spacer, 0, sizeof(spacer)); memset(&spacer, 0, sizeof(spacer));
spin_lock_bh(&o->lock);
/* Next step should not be performed until the current is finished, /* Next step should not be performed until the current is finished,
* unless a DRV_CLEAR_ONLY bit is set. In this case we just want to * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
* properly clear object internals without sending any command to the FW * properly clear object internals without sending any command to the FW
@@ -200,7 +188,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
__bnx2x_exe_queue_reset_pending(bp, o); __bnx2x_exe_queue_reset_pending(bp, o);
} else { } else {
spin_unlock_bh(&o->lock);
return 1; return 1;
} }
} }
@@ -228,10 +215,8 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
} }
/* Sanity check */ /* Sanity check */
if (!cur_len) { if (!cur_len)
spin_unlock_bh(&o->lock);
return 0; return 0;
}
rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
if (rc < 0) if (rc < 0)
@@ -245,7 +230,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
*/ */
__bnx2x_exe_queue_reset_pending(bp, o); __bnx2x_exe_queue_reset_pending(bp, o);
spin_unlock_bh(&o->lock);
return rc; return rc;
} }
@@ -432,12 +416,219 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
return true; return true;
} }
/**
* __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
*
* @bp: device handle
* @o: vlan_mac object
*
* @details: Non-blocking implementation; should be called under execution
* queue lock.
*/
static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o)
{
if (o->head_reader) {
DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
return -EBUSY;
}
DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
return 0;
}
/**
* __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
*
* @bp: device handle
* @o: vlan_mac object
*
* @details Should be called under execution queue lock; notice it might release
* and reclaim it during its run.
*/
static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o)
{
int rc;
unsigned long ramrod_flags = o->saved_ramrod_flags;
DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
ramrod_flags);
o->head_exe_request = false;
o->saved_ramrod_flags = 0;
rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
if (rc != 0) {
BNX2X_ERR("execution of pending commands failed with rc %d\n",
rc);
#ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic();
#endif
}
}
/**
* __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
*
* @bp: device handle
* @o: vlan_mac object
* @ramrod_flags: ramrod flags of missed execution
*
* @details Should be called under execution queue lock.
*/
static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o,
unsigned long ramrod_flags)
{
o->head_exe_request = true;
o->saved_ramrod_flags = ramrod_flags;
DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
ramrod_flags);
}
/**
* __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
*
* @bp: device handle
* @o: vlan_mac object
*
* @details Should be called under execution queue lock. Notice if a pending
* execution exists, it would perform it - possibly releasing and
* reclaiming the execution queue lock.
*/
static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o)
{
/* It's possible a new pending execution was added since this writer
* executed. If so, execute again. [Ad infinitum]
*/
while (o->head_exe_request) {
DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
__bnx2x_vlan_mac_h_exec_pending(bp, o);
}
}
/**
* bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
*
* @bp: device handle
* @o: vlan_mac object
*
* @details Notice if a pending execution exists, it would perform it -
* possibly releasing and reclaiming the execution queue lock.
*/
void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o)
{
spin_lock_bh(&o->exe_queue.lock);
__bnx2x_vlan_mac_h_write_unlock(bp, o);
spin_unlock_bh(&o->exe_queue.lock);
}
/**
* __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
*
* @bp: device handle
* @o: vlan_mac object
*
* @details Should be called under the execution queue lock. May sleep. May
* release and reclaim execution queue lock during its run.
*/
static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o)
{
/* If we got here, we're holding lock --> no WRITER exists */
o->head_reader++;
DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
o->head_reader);
return 0;
}
/**
* bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
*
* @bp: device handle
* @o: vlan_mac object
*
* @details May sleep. Claims and releases execution queue lock during its run.
*/
int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o)
{
int rc;
spin_lock_bh(&o->exe_queue.lock);
rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
spin_unlock_bh(&o->exe_queue.lock);
return rc;
}
/**
* __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
*
* @bp: device handle
* @o: vlan_mac object
*
* @details Should be called under execution queue lock. Notice if a pending
* execution exists, it would be performed if this was the last
* reader. possibly releasing and reclaiming the execution queue lock.
*/
static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o)
{
if (!o->head_reader) {
BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
#ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic();
#endif
} else {
o->head_reader--;
DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
o->head_reader);
}
/* It's possible a new pending execution was added, and that this reader
* was last - if so we need to execute the command.
*/
if (!o->head_reader && o->head_exe_request) {
DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
/* Writer release will do the trick */
__bnx2x_vlan_mac_h_write_unlock(bp, o);
}
}
/**
* bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
*
* @bp: device handle
* @o: vlan_mac object
*
* @details Notice if a pending execution exists, it would be performed if this
* was the last reader. Claims and releases the execution queue lock
* during its run.
*/
void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o)
{
spin_lock_bh(&o->exe_queue.lock);
__bnx2x_vlan_mac_h_read_unlock(bp, o);
spin_unlock_bh(&o->exe_queue.lock);
}
static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
int n, u8 *base, u8 stride, u8 size) int n, u8 *base, u8 stride, u8 size)
{ {
struct bnx2x_vlan_mac_registry_elem *pos; struct bnx2x_vlan_mac_registry_elem *pos;
u8 *next = base; u8 *next = base;
int counter = 0; int counter = 0;
int read_lock;
DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
if (read_lock != 0)
BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
/* traverse list */ /* traverse list */
list_for_each_entry(pos, &o->head, link) { list_for_each_entry(pos, &o->head, link) {
@@ -449,6 +640,12 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
next += stride + size; next += stride + size;
} }
} }
if (read_lock == 0) {
DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
bnx2x_vlan_mac_h_read_unlock(bp, o);
}
return counter * ETH_ALEN; return counter * ETH_ALEN;
} }
@@ -1397,6 +1594,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
return -EBUSY; return -EBUSY;
} }
static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o,
unsigned long *ramrod_flags)
{
int rc = 0;
spin_lock_bh(&o->exe_queue.lock);
DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
if (rc != 0) {
__bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
/* Calling function should not diffrentiate between this case
* and the case in which there is already a pending ramrod
*/
rc = 1;
} else {
rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
}
spin_unlock_bh(&o->exe_queue.lock);
return rc;
}
/** /**
* bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
* *
@@ -1414,19 +1637,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw; struct bnx2x_raw_obj *r = &o->raw;
int rc; int rc;
/* Clearing the pending list & raw state should be made
* atomically (as execution flow assumes they represent the same).
*/
spin_lock_bh(&o->exe_queue.lock);
/* Reset pending list */ /* Reset pending list */
bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
/* Clear pending */ /* Clear pending */
r->clear_pending(r); r->clear_pending(r);
spin_unlock_bh(&o->exe_queue.lock);
/* If ramrod failed this is most likely a SW bug */ /* If ramrod failed this is most likely a SW bug */
if (cqe->message.error) if (cqe->message.error)
return -EINVAL; return -EINVAL;
/* Run the next bulk of pending commands if requested */ /* Run the next bulk of pending commands if requested */
if (test_bit(RAMROD_CONT, ramrod_flags)) { if (test_bit(RAMROD_CONT, ramrod_flags)) {
rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
if (rc < 0) if (rc < 0)
return rc; return rc;
} }
@@ -1719,9 +1950,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd(
* @p: * @p:
* *
*/ */
int bnx2x_config_vlan_mac( int bnx2x_config_vlan_mac(struct bnx2x *bp,
struct bnx2x *bp, struct bnx2x_vlan_mac_ramrod_params *p)
struct bnx2x_vlan_mac_ramrod_params *p)
{ {
int rc = 0; int rc = 0;
struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
@@ -1752,7 +1982,8 @@ int bnx2x_config_vlan_mac(
/* Execute commands if required */ /* Execute commands if required */
if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
&p->ramrod_flags);
if (rc < 0) if (rc < 0)
return rc; return rc;
} }
@@ -1775,8 +2006,9 @@ int bnx2x_config_vlan_mac(
return rc; return rc;
/* Make a next step */ /* Make a next step */
rc = bnx2x_exe_queue_step(bp, &o->exe_queue, rc = __bnx2x_vlan_mac_execute_step(bp,
ramrod_flags); p->vlan_mac_obj,
&p->ramrod_flags);
if (rc < 0) if (rc < 0)
return rc; return rc;
} }
@@ -1806,10 +2038,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
unsigned long *ramrod_flags) unsigned long *ramrod_flags)
{ {
struct bnx2x_vlan_mac_registry_elem *pos = NULL; struct bnx2x_vlan_mac_registry_elem *pos = NULL;
int rc = 0;
struct bnx2x_vlan_mac_ramrod_params p; struct bnx2x_vlan_mac_ramrod_params p;
struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
int read_lock;
int rc = 0;
/* Clear pending commands first */ /* Clear pending commands first */
@@ -1844,6 +2077,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
__clear_bit(RAMROD_EXEC, &p.ramrod_flags); __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
__clear_bit(RAMROD_CONT, &p.ramrod_flags); __clear_bit(RAMROD_CONT, &p.ramrod_flags);
DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
if (read_lock != 0)
return read_lock;
list_for_each_entry(pos, &o->head, link) { list_for_each_entry(pos, &o->head, link) {
if (pos->vlan_mac_flags == *vlan_mac_flags) { if (pos->vlan_mac_flags == *vlan_mac_flags) {
p.user_req.vlan_mac_flags = pos->vlan_mac_flags; p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
@@ -1851,11 +2089,15 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
rc = bnx2x_config_vlan_mac(bp, &p); rc = bnx2x_config_vlan_mac(bp, &p);
if (rc < 0) { if (rc < 0) {
BNX2X_ERR("Failed to add a new DEL command\n"); BNX2X_ERR("Failed to add a new DEL command\n");
bnx2x_vlan_mac_h_read_unlock(bp, o);
return rc; return rc;
} }
} }
} }
DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
bnx2x_vlan_mac_h_read_unlock(bp, o);
p.ramrod_flags = *ramrod_flags; p.ramrod_flags = *ramrod_flags;
__set_bit(RAMROD_CONT, &p.ramrod_flags); __set_bit(RAMROD_CONT, &p.ramrod_flags);
@@ -1887,6 +2129,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
struct bnx2x_credit_pool_obj *vlans_pool) struct bnx2x_credit_pool_obj *vlans_pool)
{ {
INIT_LIST_HEAD(&o->head); INIT_LIST_HEAD(&o->head);
o->head_reader = 0;
o->head_exe_request = false;
o->saved_ramrod_flags = 0;
o->macs_pool = macs_pool; o->macs_pool = macs_pool;
o->vlans_pool = vlans_pool; o->vlans_pool = vlans_pool;
@@ -4171,6 +4416,16 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
rss_obj->config_rss = bnx2x_setup_rss; rss_obj->config_rss = bnx2x_setup_rss;
} }
int validate_vlan_mac(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *vlan_mac)
{
if (!vlan_mac->get_n_elements) {
BNX2X_ERR("vlan mac object was not intialized\n");
return -EINVAL;
}
return 0;
}
/********************** Queue state object ***********************************/ /********************** Queue state object ***********************************/
/** /**

View File

@@ -285,6 +285,12 @@ struct bnx2x_vlan_mac_obj {
* entries. * entries.
*/ */
struct list_head head; struct list_head head;
/* Implement a simple reader/writer lock on the head list.
* all these fields should only be accessed under the exe_queue lock
*/
u8 head_reader; /* Num. of readers accessing head list */
bool head_exe_request; /* Pending execution request. */
unsigned long saved_ramrod_flags; /* Ramrods of pending execution */
/* TODO: Add it's initialization in the init functions */ /* TODO: Add it's initialization in the init functions */
struct bnx2x_exe_queue_obj exe_queue; struct bnx2x_exe_queue_obj exe_queue;
@@ -1302,8 +1308,16 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
struct bnx2x_credit_pool_obj *macs_pool, struct bnx2x_credit_pool_obj *macs_pool,
struct bnx2x_credit_pool_obj *vlans_pool); struct bnx2x_credit_pool_obj *vlans_pool);
int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
int bnx2x_config_vlan_mac(struct bnx2x *bp, int bnx2x_config_vlan_mac(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params *p); struct bnx2x_vlan_mac_ramrod_params *p);
int bnx2x_vlan_mac_move(struct bnx2x *bp, int bnx2x_vlan_mac_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params *p, struct bnx2x_vlan_mac_ramrod_params *p,
@@ -1393,4 +1407,6 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table); u8 *ind_table);
int validate_vlan_mac(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *vlan_mac);
#endif /* BNX2X_SP_VERBS */ #endif /* BNX2X_SP_VERBS */

View File

@@ -170,6 +170,11 @@ enum bnx2x_vfop_qteardown_state {
BNX2X_VFOP_QTEARDOWN_DONE BNX2X_VFOP_QTEARDOWN_DONE
}; };
enum bnx2x_vfop_rss_state {
BNX2X_VFOP_RSS_CONFIG,
BNX2X_VFOP_RSS_DONE
};
#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -265,11 +270,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
if (vfq_is_leading(q)) {
__set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
}
/* Setup-op rx parameters */ /* Setup-op rx parameters */
if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
@@ -398,7 +398,11 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
BNX2X_Q_LOGICAL_STATE_STOPPED) { BNX2X_Q_LOGICAL_STATE_STOPPED) {
DP(BNX2X_MSG_IOV, DP(BNX2X_MSG_IOV,
"Entered qdtor but queue was already stopped. Aborting gracefully\n"); "Entered qdtor but queue was already stopped. Aborting gracefully\n");
goto op_done;
/* next state */
vfop->state = BNX2X_VFOP_QDTOR_DONE;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
} }
/* next state */ /* next state */
@@ -432,8 +436,10 @@ op_err:
op_done: op_done:
case BNX2X_VFOP_QDTOR_DONE: case BNX2X_VFOP_QDTOR_DONE:
/* invalidate the context */ /* invalidate the context */
qdtor->cxt->ustorm_ag_context.cdu_usage = 0; if (qdtor->cxt) {
qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
}
bnx2x_vfop_end(bp, vf, vfop); bnx2x_vfop_end(bp, vf, vfop);
return; return;
default: default:
@@ -465,7 +471,8 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
cmd->block); cmd->block);
} }
DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
vf->abs_vfid, vfop->rc);
return -ENOMEM; return -ENOMEM;
} }
@@ -474,10 +481,18 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
{ {
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
if (vf) { if (vf) {
/* the first igu entry belonging to VFs of this PF */
if (!BP_VFDB(bp)->first_vf_igu_entry)
BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
/* the first igu entry belonging to this VF */
if (!vf_sb_count(vf)) if (!vf_sb_count(vf))
vf->igu_base_id = igu_sb_id; vf->igu_base_id = igu_sb_id;
++vf_sb_count(vf); ++vf_sb_count(vf);
++vf->sb_count;
} }
BP_VFDB(bp)->vf_sbs_pool++;
} }
/* VFOP MAC/VLAN helpers */ /* VFOP MAC/VLAN helpers */
@@ -491,12 +506,20 @@ static inline void bnx2x_vfop_credit(struct bnx2x *bp,
* and a valid credit counter * and a valid credit counter
*/ */
if (!vfop->rc && args->credit) { if (!vfop->rc && args->credit) {
int cnt = 0;
struct list_head *pos; struct list_head *pos;
int read_lock;
int cnt = 0;
read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
if (read_lock)
DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
list_for_each(pos, &obj->head) list_for_each(pos, &obj->head)
cnt++; cnt++;
if (!read_lock)
bnx2x_vlan_mac_h_read_unlock(bp, obj);
atomic_set(args->credit, cnt); atomic_set(args->credit, cnt);
} }
} }
@@ -692,6 +715,7 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only) int qid, bool drv_only)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
@@ -711,6 +735,9 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */ /* set extra args */
@@ -731,6 +758,7 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
int qid, bool drv_only) int qid, bool drv_only)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
@@ -753,6 +781,9 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */ /* set extra args */
@@ -773,6 +804,7 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
int qid, u16 vid, bool add) int qid, u16 vid, bool add)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
@@ -793,6 +825,9 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
ramrod->user_req.u.vlan.vlan = vid; ramrod->user_req.u.vlan.vlan = vid;
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */ /* set extra args */
@@ -812,6 +847,7 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only) int qid, bool drv_only)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
@@ -831,6 +867,9 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */ /* set extra args */
@@ -851,6 +890,7 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
int qid, bool drv_only) int qid, bool drv_only)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
@@ -870,6 +910,9 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */ /* set extra args */
@@ -980,21 +1023,25 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_QFLR_CLR_VLAN: case BNX2X_VFOP_QFLR_CLR_VLAN:
/* vlan-clear-all: driver-only, don't consume credit */ /* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
true);
if (vfop->rc) if (vfop->rc)
goto op_err; goto op_err;
return; bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QFLR_CLR_MAC: case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */ /* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE; vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
true);
DP(BNX2X_MSG_IOV, DP(BNX2X_MSG_IOV,
"VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
vf->abs_vfid, vfop->rc); vf->abs_vfid, vfop->rc);
if (vfop->rc) if (vfop->rc)
goto op_err; goto op_err;
return; bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QFLR_TERMINATE: case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate; qstate = &vfop->op_p->qctor.qstate;
@@ -1291,10 +1338,13 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
/* for non leading queues skip directly to qdown sate */
if (vfop) { if (vfop) {
vfop->args.qx.qid = qid; vfop->args.qx.qid = qid;
bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, bnx2x_vfop_opset(qid == LEADING_IDX ?
bnx2x_vfop_qdown, cmd->done); BNX2X_VFOP_QTEARDOWN_RXMODE :
BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
cmd->block); cmd->block);
} }
@@ -1447,15 +1497,16 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
* both known * both known
*/ */
static void static void
bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{ {
struct vf_pf_resc_request *resc = &vf->alloc_resc;
u16 vlan_count = 0; u16 vlan_count = 0;
/* will be set only during VF-ACQUIRE */ /* will be set only during VF-ACQUIRE */
resc->num_rxqs = 0; resc->num_rxqs = 0;
resc->num_txqs = 0; resc->num_txqs = 0;
/* no credit calculcis for macs (just yet) */ /* no credit calculations for macs (just yet) */
resc->num_mac_filters = 1; resc->num_mac_filters = 1;
/* divvy up vlan rules */ /* divvy up vlan rules */
@@ -1467,13 +1518,14 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
resc->num_mc_filters = 0; resc->num_mc_filters = 0;
/* num_sbs already set */ /* num_sbs already set */
resc->num_sbs = vf->sb_count;
} }
/* FLR routines: */ /* FLR routines: */
static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{ {
/* reset the state variables */ /* reset the state variables */
bnx2x_iov_static_resc(bp, &vf->alloc_resc); bnx2x_iov_static_resc(bp, vf);
vf->state = VF_FREE; vf->state = VF_FREE;
} }
@@ -1693,8 +1745,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
/* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
* the Pf doorbell size although the 2 are independent. * the Pf doorbell size although the 2 are independent.
*/ */
REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
/* No security checks for now - /* No security checks for now -
* configure single rule (out of 16) mask = 0x1, value = 0x0, * configure single rule (out of 16) mask = 0x1, value = 0x0,
@@ -1761,7 +1812,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
{ {
int sb_id; int sb_id;
u32 val; u32 val;
u8 fid; u8 fid, current_pf = 0;
/* IGU in normal mode - read CAM */ /* IGU in normal mode - read CAM */
for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
@@ -1769,16 +1820,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
continue; continue;
fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
if (!(fid & IGU_FID_ENCODE_IS_PF)) if (fid & IGU_FID_ENCODE_IS_PF)
current_pf = fid & IGU_FID_PF_NUM_MASK;
else if (current_pf == BP_ABS_FUNC(bp))
bnx2x_vf_set_igu_info(bp, sb_id, bnx2x_vf_set_igu_info(bp, sb_id,
(fid & IGU_FID_VF_NUM_MASK)); (fid & IGU_FID_VF_NUM_MASK));
DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
(fid & IGU_FID_VF_NUM_MASK)), sb_id, (fid & IGU_FID_VF_NUM_MASK)), sb_id,
GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
} }
DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
} }
static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
@@ -1844,23 +1897,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
return 0; return 0;
} }
static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
{
int i;
u8 queue_count = 0;
if (IS_SRIOV(bp))
for_each_vf(bp, i)
queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
return queue_count;
}
/* must be called after PF bars are mapped */ /* must be called after PF bars are mapped */
int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
int num_vfs_param) int num_vfs_param)
{ {
int err, i, qcount; int err, i;
struct bnx2x_sriov *iov; struct bnx2x_sriov *iov;
struct pci_dev *dev = bp->pdev; struct pci_dev *dev = bp->pdev;
@@ -1958,12 +1999,13 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* re-read the IGU CAM for VFs - index and abs_vfid must be set */ /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
bnx2x_get_vf_igu_cam_info(bp); bnx2x_get_vf_igu_cam_info(bp);
/* get the total queue count and allocate the global queue arrays */
qcount = bnx2x_iov_get_max_queue_count(bp);
/* allocate the queue arrays for all VFs */ /* allocate the queue arrays for all VFs */
bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), bp->vfdb->vfqs = kzalloc(
GFP_KERNEL); BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
GFP_KERNEL);
DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
if (!bp->vfdb->vfqs) { if (!bp->vfdb->vfqs) {
BNX2X_ERR("failed to allocate vf queue array\n"); BNX2X_ERR("failed to allocate vf queue array\n");
err = -ENOMEM; err = -ENOMEM;
@@ -2084,49 +2126,14 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
q_type); q_type);
DP(BNX2X_MSG_IOV, DP(BNX2X_MSG_IOV,
"initialized vf %d's queue object. func id set to %d\n", "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
vf->abs_vfid, q->sp_obj.func_id); vf->abs_vfid, q->sp_obj.func_id, q->cid);
/* mac/vlan objects are per queue, but only those
* that belong to the leading queue are initialized
*/
if (vfq_is_leading(q)) {
/* mac */
bnx2x_init_mac_obj(bp, &q->mac_obj,
cl_id, q->cid, func_id,
bnx2x_vf_sp(bp, vf, mac_rdata),
bnx2x_vf_sp_map(bp, vf, mac_rdata),
BNX2X_FILTER_MAC_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
&bp->macs_pool);
/* vlan */
bnx2x_init_vlan_obj(bp, &q->vlan_obj,
cl_id, q->cid, func_id,
bnx2x_vf_sp(bp, vf, vlan_rdata),
bnx2x_vf_sp_map(bp, vf, vlan_rdata),
BNX2X_FILTER_VLAN_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
&bp->vlans_pool);
/* mcast */
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
q->cid, func_id, func_id,
bnx2x_vf_sp(bp, vf, mcast_rdata),
bnx2x_vf_sp_map(bp, vf, mcast_rdata),
BNX2X_FILTER_MCAST_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX);
vf->leading_rss = cl_id;
}
} }
/* called by bnx2x_nic_load */ /* called by bnx2x_nic_load */
int bnx2x_iov_nic_init(struct bnx2x *bp) int bnx2x_iov_nic_init(struct bnx2x *bp)
{ {
int vfid, qcount, i; int vfid;
if (!IS_SRIOV(bp)) { if (!IS_SRIOV(bp)) {
DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
@@ -2155,7 +2162,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
/* init statically provisioned resources */ /* init statically provisioned resources */
bnx2x_iov_static_resc(bp, &vf->alloc_resc); bnx2x_iov_static_resc(bp, vf);
/* queues are initialized during VF-ACQUIRE */ /* queues are initialized during VF-ACQUIRE */
@@ -2191,13 +2198,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
} }
/* Final VF init */ /* Final VF init */
qcount = 0; for_each_vf(bp, vfid) {
for_each_vf(bp, i) { struct bnx2x_virtf *vf = BP_VF(bp, vfid);
struct bnx2x_virtf *vf = BP_VF(bp, i);
/* fill in the BDF and bars */ /* fill in the BDF and bars */
vf->bus = bnx2x_vf_bus(bp, i); vf->bus = bnx2x_vf_bus(bp, vfid);
vf->devfn = bnx2x_vf_devfn(bp, i); vf->devfn = bnx2x_vf_devfn(bp, vfid);
bnx2x_vf_set_bars(bp, vf); bnx2x_vf_set_bars(bp, vf);
DP(BNX2X_MSG_IOV, DP(BNX2X_MSG_IOV,
@@ -2206,10 +2212,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
(unsigned)vf->bars[0].bar, vf->bars[0].size, (unsigned)vf->bars[0].bar, vf->bars[0].size,
(unsigned)vf->bars[1].bar, vf->bars[1].size, (unsigned)vf->bars[1].bar, vf->bars[1].size,
(unsigned)vf->bars[2].bar, vf->bars[2].size); (unsigned)vf->bars[2].bar, vf->bars[2].size);
/* set local queue arrays */
vf->vfqs = &bp->vfdb->vfqs[qcount];
qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
} }
return 0; return 0;
@@ -2515,6 +2517,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
for_each_vfq(vf, j) { for_each_vfq(vf, j) {
struct bnx2x_vf_queue *rxq = vfq_get(vf, j); struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
dma_addr_t q_stats_addr =
vf->fw_stat_map + j * vf->stats_stride;
/* collect stats fro active queues only */ /* collect stats fro active queues only */
if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
BNX2X_Q_LOGICAL_STATE_STOPPED) BNX2X_Q_LOGICAL_STATE_STOPPED)
@@ -2522,13 +2527,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
/* create stats query entry for this queue */ /* create stats query entry for this queue */
cur_query_entry->kind = STATS_TYPE_QUEUE; cur_query_entry->kind = STATS_TYPE_QUEUE;
cur_query_entry->index = vfq_cl_id(vf, rxq); cur_query_entry->index = vfq_stat_id(vf, rxq);
cur_query_entry->funcID = cur_query_entry->funcID =
cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
cur_query_entry->address.hi = cur_query_entry->address.hi =
cpu_to_le32(U64_HI(vf->fw_stat_map)); cpu_to_le32(U64_HI(q_stats_addr));
cur_query_entry->address.lo = cur_query_entry->address.lo =
cpu_to_le32(U64_LO(vf->fw_stat_map)); cpu_to_le32(U64_LO(q_stats_addr));
DP(BNX2X_MSG_IOV, DP(BNX2X_MSG_IOV,
"added address %x %x for vf %d queue %d client %d\n", "added address %x %x for vf %d queue %d client %d\n",
cur_query_entry->address.hi, cur_query_entry->address.hi,
@@ -2537,6 +2542,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
cur_query_entry++; cur_query_entry++;
cur_data_offset += sizeof(struct per_queue_stats); cur_data_offset += sizeof(struct per_queue_stats);
stats_count++; stats_count++;
/* all stats are coalesced to the leading queue */
if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
break;
} }
} }
bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
@@ -2555,6 +2564,11 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
for_each_vf(bp, i) { for_each_vf(bp, i) {
struct bnx2x_virtf *vf = BP_VF(bp, i); struct bnx2x_virtf *vf = BP_VF(bp, i);
if (!vf) {
BNX2X_ERR("VF was null! skipping...\n");
continue;
}
if (!list_empty(&vf->op_list_head) && if (!list_empty(&vf->op_list_head) &&
atomic_read(&vf->op_in_progress)) { atomic_read(&vf->op_in_progress)) {
DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
@@ -2702,7 +2716,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q = vfq_get(vf, i); struct bnx2x_vf_queue *q = vfq_get(vf, i);
if (!q) { if (!q) {
DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); BNX2X_ERR("q number %d was not allocated\n", i);
return -EINVAL; return -EINVAL;
} }
@@ -2930,6 +2944,43 @@ op_done:
bnx2x_vfop_end(bp, vf, vfop); bnx2x_vfop_end(bp, vf, vfop);
} }
static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
enum bnx2x_vfop_rss_state state;
if (!vfop) {
BNX2X_ERR("vfop was null\n");
return;
}
state = vfop->state;
bnx2x_vfop_reset_wq(vf);
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
switch (state) {
case BNX2X_VFOP_RSS_CONFIG:
/* next state */
vfop->state = BNX2X_VFOP_RSS_DONE;
bnx2x_config_rss(bp, &vfop->op_p->rss);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
op_err:
BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
op_done:
case BNX2X_VFOP_RSS_DONE:
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
bnx2x_vfop_default(state);
}
op_pending:
return;
}
int bnx2x_vfop_release_cmd(struct bnx2x *bp, int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf, struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd) struct bnx2x_vfop_cmd *cmd)
@@ -2944,6 +2995,21 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
return -ENOMEM; return -ENOMEM;
} }
int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
cmd->block);
}
return -ENOMEM;
}
/* VF release ~ VF close + VF release-resources /* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an * Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered. * irrecoverable error is encountered.
@@ -2955,6 +3021,8 @@ void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
.block = block, .block = block,
}; };
int rc; int rc;
DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
@@ -2983,6 +3051,12 @@ static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv) enum channel_tlvs tlv)
{ {
/* we don't lock the channel for unsupported tlvs */
if (!bnx2x_tlv_supported(tlv)) {
BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
return;
}
/* lock the channel */ /* lock the channel */
mutex_lock(&vf->op_mutex); mutex_lock(&vf->op_mutex);
@@ -2997,19 +3071,32 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs expected_tlv) enum channel_tlvs expected_tlv)
{ {
enum channel_tlvs current_tlv;
if (!vf) {
BNX2X_ERR("VF was %p\n", vf);
return;
}
current_tlv = vf->op_current;
/* we don't unlock the channel for unsupported tlvs */
if (!bnx2x_tlv_supported(expected_tlv))
return;
WARN(expected_tlv != vf->op_current, WARN(expected_tlv != vf->op_current,
"lock mismatch: expected %d found %d", expected_tlv, "lock mismatch: expected %d found %d", expected_tlv,
vf->op_current); vf->op_current);
/* record the locking op */
vf->op_current = CHANNEL_TLV_NONE;
/* lock the channel */ /* lock the channel */
mutex_unlock(&vf->op_mutex); mutex_unlock(&vf->op_mutex);
/* log the unlock */ /* log the unlock */
DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
vf->abs_vfid, vf->op_current); vf->abs_vfid, vf->op_current);
/* record the locking op */
vf->op_current = CHANNEL_TLV_NONE;
} }
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
@@ -3040,11 +3127,77 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
return bnx2x_enable_sriov(bp); return bnx2x_enable_sriov(bp);
} }
} }
#define IGU_ENTRY_SIZE 4
int bnx2x_enable_sriov(struct bnx2x *bp) int bnx2x_enable_sriov(struct bnx2x *bp)
{ {
int rc = 0, req_vfs = bp->requested_nr_virtfn; int rc = 0, req_vfs = bp->requested_nr_virtfn;
int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
u32 igu_entry, address;
u16 num_vf_queues;
if (req_vfs == 0)
return 0;
first_vf = bp->vfdb->sriov.first_vf_in_pf;
/* statically distribute vf sb pool between VFs */
num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
BP_VFDB(bp)->vf_sbs_pool / req_vfs);
/* zero previous values learned from igu cam */
for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
vf->sb_count = 0;
vf_sb_count(BP_VF(bp, vf_idx)) = 0;
}
bp->vfdb->vf_sbs_pool = 0;
/* prepare IGU cam */
sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
IGU_REG_MAPPING_MEMORY_VALID;
DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
sb_idx, vf_idx);
REG_WR(bp, address, igu_entry);
sb_idx++;
address += IGU_ENTRY_SIZE;
}
}
/* Reinitialize vf database according to igu cam */
bnx2x_get_vf_igu_cam_info(bp);
DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
qcount = 0;
for_each_vf(bp, vf_idx) {
struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
/* set local queue arrays */
vf->vfqs = &bp->vfdb->vfqs[qcount];
qcount += vf_sb_count(vf);
}
/* prepare msix vectors in VF configuration space */
for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
num_vf_queues);
}
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
/* enable sriov. This will probe all the VFs, and consequentially cause
* the "acquire" messages to appear on the VF PF channel.
*/
DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
pci_disable_sriov(bp->pdev);
rc = pci_enable_sriov(bp->pdev, req_vfs); rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) { if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3072,9 +3225,8 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
pci_disable_sriov(bp->pdev); pci_disable_sriov(bp->pdev);
} }
static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
struct bnx2x_virtf **vf, struct pf_vf_bulletin_content **bulletin)
struct pf_vf_bulletin_content **bulletin)
{ {
if (bp->state != BNX2X_STATE_OPEN) { if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("vf ndo called though PF is down\n"); BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3097,7 +3249,13 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
*bulletin = BP_VF_BULLETIN(bp, vfidx); *bulletin = BP_VF_BULLETIN(bp, vfidx);
if (!*vf) { if (!*vf) {
BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
vfidx);
return -EINVAL;
}
if (!(*vf)->vfqs) {
BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
vfidx); vfidx);
return -EINVAL; return -EINVAL;
} }
@@ -3125,8 +3283,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc) if (rc)
return rc; return rc;
mac_obj = &bnx2x_vfq(vf, 0, mac_obj); mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
if (!mac_obj || !vlan_obj) { if (!mac_obj || !vlan_obj) {
BNX2X_ERR("VF partially initialized\n"); BNX2X_ERR("VF partially initialized\n");
return -EINVAL; return -EINVAL;
@@ -3138,10 +3296,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->spoofchk = 1; /*always enabled */ ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) { if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */ /* mac and vlan are in vlan_mac objects */
mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
0, ETH_ALEN); mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, 0, ETH_ALEN);
0, VLAN_HLEN); if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
vlan_obj->get_n_elements(bp, vlan_obj, 1,
(u8 *)&ivi->vlan, 0,
VLAN_HLEN);
} else { } else {
/* mac */ /* mac */
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3209,14 +3370,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
return rc; return rc;
} }
/* is vf initialized and queue set up? */
q_logical_state = q_logical_state =
bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED && if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */ /* configure the mac in device on this vf's queue */
unsigned long ramrod_flags = 0; unsigned long ramrod_flags = 0;
struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); struct bnx2x_vlan_mac_obj *mac_obj =
&bnx2x_leading_vfq(vf, mac_obj);
rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
if (rc)
return rc;
/* must lock vfpf channel to protect against vf flows */ /* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
@@ -3276,18 +3441,21 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
/* is vf initialized and queue set up? */ /* is vf initialized and queue set up? */
q_logical_state = q_logical_state =
bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED && if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the vlan in device on this vf's queue */ /* configure the vlan in device on this vf's queue */
unsigned long ramrod_flags = 0; unsigned long ramrod_flags = 0;
unsigned long vlan_mac_flags = 0; unsigned long vlan_mac_flags = 0;
struct bnx2x_vlan_mac_obj *vlan_obj = struct bnx2x_vlan_mac_obj *vlan_obj =
&bnx2x_vfq(vf, 0, vlan_obj); &bnx2x_leading_vfq(vf, vlan_obj);
struct bnx2x_vlan_mac_ramrod_params ramrod_param; struct bnx2x_vlan_mac_ramrod_params ramrod_param;
struct bnx2x_queue_state_params q_params = {NULL}; struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_queue_update_params *update_params; struct bnx2x_queue_update_params *update_params;
rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
if (rc)
return rc;
memset(&ramrod_param, 0, sizeof(ramrod_param)); memset(&ramrod_param, 0, sizeof(ramrod_param));
/* must lock vfpf channel to protect against vf flows */ /* must lock vfpf channel to protect against vf flows */
@@ -3307,7 +3475,7 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
*/ */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
q_params.cmd = BNX2X_Q_CMD_UPDATE; q_params.cmd = BNX2X_Q_CMD_UPDATE;
q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
update_params = &q_params.params.update; update_params = &q_params.params.update;
__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
&update_params->update_flags); &update_params->update_flags);

View File

@@ -81,6 +81,7 @@ struct bnx2x_vf_queue {
u32 cid; u32 cid;
u16 index; u16 index;
u16 sb_idx; u16 sb_idx;
bool is_leading;
}; };
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: /* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
@@ -194,6 +195,7 @@ struct bnx2x_virtf {
#define VF_CFG_INT_SIMD 0x0008 #define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010 #define VF_CACHE_LINE 0x0010
#define VF_CFG_VLAN 0x0020 #define VF_CFG_VLAN 0x0020
#define VF_CFG_STATS_COALESCE 0x0040
u8 state; u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */ #define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -213,6 +215,7 @@ struct bnx2x_virtf {
/* dma */ /* dma */
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
u16 stats_stride;
dma_addr_t spq_map; dma_addr_t spq_map;
dma_addr_t bulletin_map; dma_addr_t bulletin_map;
@@ -239,7 +242,10 @@ struct bnx2x_virtf {
u8 igu_base_id; /* base igu status block id */ u8 igu_base_id; /* base igu status block id */
struct bnx2x_vf_queue *vfqs; struct bnx2x_vf_queue *vfqs;
#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) #define LEADING_IDX 0
#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX)
#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var)
u8 index; /* index in the vf array */ u8 index; /* index in the vf array */
u8 abs_vfid; u8 abs_vfid;
@@ -358,6 +364,10 @@ struct bnx2x_vf_sp {
struct client_init_ramrod_data init_data; struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data; struct client_update_ramrod_data update_data;
} q_data; } q_data;
union {
struct eth_rss_update_ramrod_data e2;
} rss_rdata;
}; };
struct hw_dma { struct hw_dma {
@@ -403,6 +413,10 @@ struct bnx2x_vfdb {
#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) #define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
u32 flrd_vfs[FLRD_VFS_DWORDS]; u32 flrd_vfs[FLRD_VFS_DWORDS];
/* the number of msix vectors belonging to this PF designated for VFs */
u16 vf_sbs_pool;
u16 first_vf_igu_entry;
}; };
/* queue access */ /* queue access */
@@ -411,11 +425,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
return &(vf->vfqs[index]); return &(vf->vfqs[index]);
} }
static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
{
return (vfq->index == 0);
}
/* FW ids */ /* FW ids */
static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
{ {
@@ -434,7 +443,10 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
{ {
return vfq_cl_id(vf, q); if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
return vf->leading_rss;
else
return vfq_cl_id(vf, q);
} }
static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
@@ -691,6 +703,10 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf, struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd); struct bnx2x_vfop_cmd *cmd);
int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
/* VF release ~ VF close + VF release-resources /* VF release ~ VF close + VF release-resources
* *
* Release is the ultimate SW shutdown and is called whenever an * Release is the ultimate SW shutdown and is called whenever an
@@ -730,9 +746,12 @@ int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_release(struct bnx2x *bp); int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_init(struct bnx2x *bp); int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp); void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx); int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool is_leading);
int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params);
int bnx2x_vfpf_set_mcast(struct net_device *dev); int bnx2x_vfpf_set_mcast(struct net_device *dev);
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
@@ -758,7 +777,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp);
void bnx2x_disable_sriov(struct bnx2x *bp); void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp) static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{ {
return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
} }
void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
@@ -793,7 +812,7 @@ static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; } static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; } static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
u8 vf_qid, bool set) {return 0; } u8 vf_qid, bool set) {return 0; }

View File

@@ -257,17 +257,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* humble our request */ /* humble our request */
req->resc_request.num_txqs = req->resc_request.num_txqs =
bp->acquire_resp.resc.num_txqs; min(req->resc_request.num_txqs,
bp->acquire_resp.resc.num_txqs);
req->resc_request.num_rxqs = req->resc_request.num_rxqs =
bp->acquire_resp.resc.num_rxqs; min(req->resc_request.num_rxqs,
bp->acquire_resp.resc.num_rxqs);
req->resc_request.num_sbs = req->resc_request.num_sbs =
bp->acquire_resp.resc.num_sbs; min(req->resc_request.num_sbs,
bp->acquire_resp.resc.num_sbs);
req->resc_request.num_mac_filters = req->resc_request.num_mac_filters =
bp->acquire_resp.resc.num_mac_filters; min(req->resc_request.num_mac_filters,
bp->acquire_resp.resc.num_mac_filters);
req->resc_request.num_vlan_filters = req->resc_request.num_vlan_filters =
bp->acquire_resp.resc.num_vlan_filters; min(req->resc_request.num_vlan_filters,
bp->acquire_resp.resc.num_vlan_filters);
req->resc_request.num_mc_filters = req->resc_request.num_mc_filters =
bp->acquire_resp.resc.num_mc_filters; min(req->resc_request.num_mc_filters,
bp->acquire_resp.resc.num_mc_filters);
/* Clear response buffer */ /* Clear response buffer */
memset(&bp->vf2pf_mbox->resp, 0, memset(&bp->vf2pf_mbox->resp, 0,
@@ -293,7 +299,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->common.flash_size = 0; bp->common.flash_size = 0;
bp->flags |= bp->flags |=
NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
bp->igu_sb_cnt = 1; bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver)); sizeof(bp->fw_ver));
@@ -373,6 +379,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
req->stats_addr = bp->fw_stats_data_mapping + req->stats_addr = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats); offsetof(struct bnx2x_fw_stats_data, queue_stats);
req->stats_stride = sizeof(struct per_queue_stats);
/* add list termination tlv */ /* add list termination tlv */
bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv)); sizeof(struct channel_list_end_tlv));
@@ -452,12 +460,60 @@ free_irq:
bnx2x_free_irq(bp); bnx2x_free_irq(bp);
} }
static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q)
{
u8 cl_id = vfq_cl_id(vf, q);
u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
/* mac */
bnx2x_init_mac_obj(bp, &q->mac_obj,
cl_id, q->cid, func_id,
bnx2x_vf_sp(bp, vf, mac_rdata),
bnx2x_vf_sp_map(bp, vf, mac_rdata),
BNX2X_FILTER_MAC_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
&bp->macs_pool);
/* vlan */
bnx2x_init_vlan_obj(bp, &q->vlan_obj,
cl_id, q->cid, func_id,
bnx2x_vf_sp(bp, vf, vlan_rdata),
bnx2x_vf_sp_map(bp, vf, vlan_rdata),
BNX2X_FILTER_VLAN_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
&bp->vlans_pool);
/* mcast */
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
q->cid, func_id, func_id,
bnx2x_vf_sp(bp, vf, mcast_rdata),
bnx2x_vf_sp_map(bp, vf, mcast_rdata),
BNX2X_FILTER_MCAST_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX);
/* rss */
bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
func_id, func_id,
bnx2x_vf_sp(bp, vf, rss_rdata),
bnx2x_vf_sp_map(bp, vf, rss_rdata),
BNX2X_FILTER_RSS_CONF_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX);
vf->leading_rss = cl_id;
q->is_leading = true;
}
/* ask the pf to open a queue for the vf */ /* ask the pf to open a queue for the vf */
int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool is_leading)
{ {
struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; u8 fp_idx = fp->index;
u16 tpa_agg_size = 0, flags = 0; u16 tpa_agg_size = 0, flags = 0;
int rc; int rc;
@@ -473,6 +529,9 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
tpa_agg_size = TPA_AGG_SIZE; tpa_agg_size = TPA_AGG_SIZE;
} }
if (is_leading)
flags |= VFPF_QUEUE_FLG_LEADING_RSS;
/* calculate queue flags */ /* calculate queue flags */
flags |= VFPF_QUEUE_FLG_STATS; flags |= VFPF_QUEUE_FLG_STATS;
flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
@@ -646,6 +705,71 @@ out:
return 0; return 0;
} }
/* request pf to config rss table for vf queues*/
int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params)
{
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
int rc = 0;
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
sizeof(*req));
/* add list termination tlv */
bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
req->rss_key_size = T_ETH_RSS_KEY;
req->rss_result_mask = params->rss_result_mask;
/* flags handled individually for backward/forward compatability */
if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
req->rss_flags |= VFPF_RSS_MODE_DISABLED;
if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
req->rss_flags |= VFPF_RSS_MODE_REGULAR;
if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
req->rss_flags |= VFPF_RSS_SET_SRCH;
if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
req->rss_flags |= VFPF_RSS_IPV4;
if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
req->rss_flags |= VFPF_RSS_IPV4_TCP;
if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
req->rss_flags |= VFPF_RSS_IPV4_UDP;
if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
req->rss_flags |= VFPF_RSS_IPV6;
if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
req->rss_flags |= VFPF_RSS_IPV6_TCP;
if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
req->rss_flags |= VFPF_RSS_IPV6_UDP;
DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
/* output tlvs list */
bnx2x_dp_tlv_list(bp, req);
/* send message to pf */
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc) {
BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
goto out;
}
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
resp->hdr.status);
rc = -EINVAL;
}
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
return 0;
}
int bnx2x_vfpf_set_mcast(struct net_device *dev) int bnx2x_vfpf_set_mcast(struct net_device *dev)
{ {
struct bnx2x *bp = netdev_priv(dev); struct bnx2x *bp = netdev_priv(dev);
@@ -948,7 +1072,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* fill in pfdev info */ /* fill in pfdev info */
resp->pfdev_info.chip_num = bp->common.chip_id; resp->pfdev_info.chip_num = bp->common.chip_id;
resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT); resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
/* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
@@ -1054,8 +1178,13 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* record ghost addresses from vf message */ /* record ghost addresses from vf message */
vf->spq_map = init->spq_addr; vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr; vf->fw_stat_map = init->stats_addr;
vf->stats_stride = init->stats_stride;
vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
/* set VF multiqueue statistics collection mode */
if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
vf->cfg_flags |= VF_CFG_STATS_COALESCE;
/* response */ /* response */
bnx2x_vf_mbx_resp(bp, vf); bnx2x_vf_mbx_resp(bp, vf);
} }
@@ -1080,6 +1209,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_HC, sp_q_flags); __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
__set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
/* outer vlan removal is set according to PF's multi function mode */ /* outer vlan removal is set according to PF's multi function mode */
if (IS_MF_SD(bp)) if (IS_MF_SD(bp))
@@ -1113,6 +1244,9 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_p; struct bnx2x_queue_init_params *init_p;
struct bnx2x_queue_setup_params *setup_p; struct bnx2x_queue_setup_params *setup_p;
if (bnx2x_vfq_is_leading(q))
bnx2x_leading_vfq_init(bp, vf, q);
/* re-init the VF operation context */ /* re-init the VF operation context */
memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
setup_p = &vf->op_params.qctor.prep_qsetup; setup_p = &vf->op_params.qctor.prep_qsetup;
@@ -1552,6 +1686,68 @@ static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_mbx_resp(bp, vf); bnx2x_vf_mbx_resp(bp, vf);
} }
static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
vf->index);
vf->op_rc = -EINVAL;
goto mbx_resp;
}
/* set vfop params according to rss tlv */
memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
T_ETH_INDIRECTION_TABLE_SIZE);
memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
sizeof(rss_tlv->rss_key));
vf_op_params->rss_obj = &vf->rss_conf_obj;
vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
/* flags handled individually for backward/forward compatability */
if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
__set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
__set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
__set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
__set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
__set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
__set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
__set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
__set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
__set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
(!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
BNX2X_ERR("about to hit a FW assert. aborting...\n");
vf->op_rc = -EINVAL;
goto mbx_resp;
}
vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
mbx_resp:
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
}
/* dispatch request */ /* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx) struct bnx2x_vf_mbx *mbx)
@@ -1588,6 +1784,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
case CHANNEL_TLV_RELEASE: case CHANNEL_TLV_RELEASE:
bnx2x_vf_mbx_release_vf(bp, vf, mbx); bnx2x_vf_mbx_release_vf(bp, vf, mbx);
break; break;
case CHANNEL_TLV_UPDATE_RSS:
bnx2x_vf_mbx_update_rss(bp, vf, mbx);
break;
} }
} else { } else {
@@ -1607,7 +1806,7 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* test whether we can respond to the VF (do we have an address /* test whether we can respond to the VF (do we have an address
* for it?) * for it?)
*/ */
if (vf->state == VF_ACQUIRED) { if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
/* mbx_resp uses the op_rc of the VF */ /* mbx_resp uses the op_rc of the VF */
vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;

View File

@@ -51,6 +51,7 @@ struct hw_sb_info {
#define VFPF_QUEUE_FLG_COS 0x0080 #define VFPF_QUEUE_FLG_COS 0x0080
#define VFPF_QUEUE_FLG_HC 0x0100 #define VFPF_QUEUE_FLG_HC 0x0100
#define VFPF_QUEUE_FLG_DHC 0x0200 #define VFPF_QUEUE_FLG_DHC 0x0200
#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400
#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0) #define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1) #define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
@@ -131,6 +132,27 @@ struct vfpf_q_op_tlv {
u8 padding[3]; u8 padding[3];
}; };
/* receive side scaling tlv */
struct vfpf_rss_tlv {
struct vfpf_first_tlv first_tlv;
u32 rss_flags;
#define VFPF_RSS_MODE_DISABLED (1 << 0)
#define VFPF_RSS_MODE_REGULAR (1 << 1)
#define VFPF_RSS_SET_SRCH (1 << 2)
#define VFPF_RSS_IPV4 (1 << 3)
#define VFPF_RSS_IPV4_TCP (1 << 4)
#define VFPF_RSS_IPV4_UDP (1 << 5)
#define VFPF_RSS_IPV6 (1 << 6)
#define VFPF_RSS_IPV6_TCP (1 << 7)
#define VFPF_RSS_IPV6_UDP (1 << 8)
u8 rss_result_mask;
u8 ind_table_size;
u8 rss_key_size;
u8 padding;
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
u32 rss_key[T_ETH_RSS_KEY]; /* hash values */
};
/* acquire response tlv - carries the allocated resources */ /* acquire response tlv - carries the allocated resources */
struct pfvf_acquire_resp_tlv { struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr; struct pfvf_tlv hdr;
@@ -166,12 +188,20 @@ struct pfvf_acquire_resp_tlv {
} resc; } resc;
}; };
#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
* stats will be coalesced on
* the leading RSS queue
*/
/* Init VF */ /* Init VF */
struct vfpf_init_tlv { struct vfpf_init_tlv {
struct vfpf_first_tlv first_tlv; struct vfpf_first_tlv first_tlv;
aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */ aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
aligned_u64 spq_addr; aligned_u64 spq_addr;
aligned_u64 stats_addr; aligned_u64 stats_addr;
u16 stats_stride;
u32 flags;
u32 padding[2];
}; };
/* Setup Queue */ /* Setup Queue */
@@ -293,13 +323,14 @@ union vfpf_tlvs {
struct vfpf_q_op_tlv q_op; struct vfpf_q_op_tlv q_op;
struct vfpf_setup_q_tlv setup_q; struct vfpf_setup_q_tlv setup_q;
struct vfpf_set_q_filters_tlv set_q_filters; struct vfpf_set_q_filters_tlv set_q_filters;
struct vfpf_release_tlv release; struct vfpf_release_tlv release;
struct channel_list_end_tlv list_end; struct vfpf_rss_tlv update_rss;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size; struct tlv_buffer_size tlv_buf_size;
}; };
union pfvf_tlvs { union pfvf_tlvs {
struct pfvf_general_resp_tlv general_resp; struct pfvf_general_resp_tlv general_resp;
struct pfvf_acquire_resp_tlv acquire_resp; struct pfvf_acquire_resp_tlv acquire_resp;
struct channel_list_end_tlv list_end; struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size; struct tlv_buffer_size tlv_buf_size;
@@ -355,14 +386,18 @@ enum channel_tlvs {
CHANNEL_TLV_INIT, CHANNEL_TLV_INIT,
CHANNEL_TLV_SETUP_Q, CHANNEL_TLV_SETUP_Q,
CHANNEL_TLV_SET_Q_FILTERS, CHANNEL_TLV_SET_Q_FILTERS,
CHANNEL_TLV_ACTIVATE_Q,
CHANNEL_TLV_DEACTIVATE_Q,
CHANNEL_TLV_TEARDOWN_Q, CHANNEL_TLV_TEARDOWN_Q,
CHANNEL_TLV_CLOSE, CHANNEL_TLV_CLOSE,
CHANNEL_TLV_RELEASE, CHANNEL_TLV_RELEASE,
CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
CHANNEL_TLV_PF_RELEASE_VF, CHANNEL_TLV_PF_RELEASE_VF,
CHANNEL_TLV_LIST_END, CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR, CHANNEL_TLV_FLR,
CHANNEL_TLV_PF_SET_MAC, CHANNEL_TLV_PF_SET_MAC,
CHANNEL_TLV_PF_SET_VLAN, CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_MAX CHANNEL_TLV_MAX
}; };

View File

@@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver. /* cnic.c: Broadcom CNIC core network driver.
* *
* Copyright (c) 2006-2012 Broadcom Corporation * Copyright (c) 2006-2013 Broadcom Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
@@ -1184,6 +1184,7 @@ error:
static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
int ctx_blk_size = cp->ethdev->ctx_blk_size; int ctx_blk_size = cp->ethdev->ctx_blk_size;
int total_mem, blks, i; int total_mem, blks, i;
@@ -1201,7 +1202,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
cp->ctx_blks = blks; cp->ctx_blks = blks;
cp->ctx_blk_size = ctx_blk_size; cp->ctx_blk_size = ctx_blk_size;
if (!BNX2X_CHIP_IS_57710(cp->chip_id)) if (!CHIP_IS_E1(bp))
cp->ctx_align = 0; cp->ctx_align = 0;
else else
cp->ctx_align = ctx_blk_size; cp->ctx_align = ctx_blk_size;
@@ -1231,6 +1232,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_eth_dev *ethdev = cp->ethdev; struct cnic_eth_dev *ethdev = cp->ethdev;
u32 start_cid = ethdev->starting_cid; u32 start_cid = ethdev->starting_cid;
int i, j, n, ret, pages; int i, j, n, ret, pages;
@@ -1240,7 +1242,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->iscsi_start_cid = start_cid; cp->iscsi_start_cid = start_cid;
cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
cp->max_cid_space += dev->max_fcoe_conn; cp->max_cid_space += dev->max_fcoe_conn;
cp->fcoe_init_cid = ethdev->fcoe_init_cid; cp->fcoe_init_cid = ethdev->fcoe_init_cid;
if (!cp->fcoe_init_cid) if (!cp->fcoe_init_cid)
@@ -1288,7 +1290,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
if (ret) if (ret)
goto error; goto error;
if (CNIC_SUPPORTS_FCOE(cp)) { if (CNIC_SUPPORTS_FCOE(bp)) {
ret = cnic_alloc_kcq(dev, &cp->kcq2, true); ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
if (ret) if (ret)
goto error; goto error;
@@ -1382,6 +1384,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
u32 type, union l5cm_specific_data *l5_data) u32 type, union l5cm_specific_data *l5_data)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct l5cm_spe kwqe; struct l5cm_spe kwqe;
struct kwqe_16 *kwq[1]; struct kwqe_16 *kwq[1];
u16 type_16; u16 type_16;
@@ -1389,10 +1392,10 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
kwqe.hdr.conn_and_cmd_data = kwqe.hdr.conn_and_cmd_data =
cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
BNX2X_HW_CID(cp, cid))); BNX2X_HW_CID(bp, cid)));
type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID; SPE_HDR_FUNCTION_ID;
kwqe.hdr.type = cpu_to_le16(type_16); kwqe.hdr.type = cpu_to_le16(type_16);
@@ -1427,13 +1430,34 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
rcu_read_unlock(); rcu_read_unlock();
} }
static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
int en_tcp_dack)
{
struct bnx2x *bp = netdev_priv(dev->netdev);
u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
u16 tstorm_flags = 0;
if (time_stamps) {
xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
}
if (en_tcp_dack)
tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
}
static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev); struct bnx2x *bp = netdev_priv(dev->netdev);
struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
int hq_bds, pages; int hq_bds, pages;
u32 pfid = cp->pfid; u32 pfid = bp->pfid;
cp->num_iscsi_tasks = req1->num_tasks_per_conn; cp->num_iscsi_tasks = req1->num_tasks_per_conn;
cp->num_ccells = req1->num_ccells_per_conn; cp->num_ccells = req1->num_ccells_per_conn;
@@ -1506,15 +1530,18 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
hq_bds); hq_bds);
cnic_bnx2x_set_tcp_options(dev,
req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
return 0; return 0;
} }
static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
{ {
struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev); struct bnx2x *bp = netdev_priv(dev->netdev);
u32 pfid = cp->pfid; u32 pfid = bp->pfid;
struct iscsi_kcqe kcqe; struct iscsi_kcqe kcqe;
struct kcqe *cqes[1]; struct kcqe *cqes[1];
@@ -1653,6 +1680,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num) u32 num)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct iscsi_kwqe_conn_offload1 *req1 = struct iscsi_kwqe_conn_offload1 *req1 =
(struct iscsi_kwqe_conn_offload1 *) wqes[0]; (struct iscsi_kwqe_conn_offload1 *) wqes[0];
struct iscsi_kwqe_conn_offload2 *req2 = struct iscsi_kwqe_conn_offload2 *req2 =
@@ -1661,11 +1689,11 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
struct cnic_iscsi *iscsi = ctx->proto.iscsi; struct cnic_iscsi *iscsi = ctx->proto.iscsi;
u32 cid = ctx->cid; u32 cid = ctx->cid;
u32 hw_cid = BNX2X_HW_CID(cp, cid); u32 hw_cid = BNX2X_HW_CID(bp, cid);
struct iscsi_context *ictx; struct iscsi_context *ictx;
struct regpair context_addr; struct regpair context_addr;
int i, j, n = 2, n_max; int i, j, n = 2, n_max;
u8 port = CNIC_PORT(cp); u8 port = BP_PORT(bp);
ctx->ctx_flags = 0; ctx->ctx_flags = 0;
if (!req2->num_additional_wqes) if (!req2->num_additional_wqes)
@@ -1719,8 +1747,8 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
ETH_P_8021Q; ETH_P_8021Q;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
cp->port_mode == CHIP_2_PORT_MODE) { bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
port = 0; port = 0;
} }
@@ -1841,6 +1869,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
struct iscsi_kwqe_conn_offload1 *req1; struct iscsi_kwqe_conn_offload1 *req1;
struct iscsi_kwqe_conn_offload2 *req2; struct iscsi_kwqe_conn_offload2 *req2;
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_context *ctx; struct cnic_context *ctx;
struct iscsi_kcqe kcqe; struct iscsi_kcqe kcqe;
struct kcqe *cqes[1]; struct kcqe *cqes[1];
@@ -1894,7 +1923,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
} }
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
done: done:
cqes[0] = (struct kcqe *) &kcqe; cqes[0] = (struct kcqe *) &kcqe;
@@ -1930,6 +1959,7 @@ static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
union l5cm_specific_data l5_data; union l5cm_specific_data l5_data;
int ret; int ret;
@@ -1938,7 +1968,7 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
init_waitqueue_head(&ctx->waitq); init_waitqueue_head(&ctx->waitq);
ctx->wait_cond = 0; ctx->wait_cond = 0;
memset(&l5_data, 0, sizeof(l5_data)); memset(&l5_data, 0, sizeof(l5_data));
hw_cid = BNX2X_HW_CID(cp, ctx->cid); hw_cid = BNX2X_HW_CID(bp, ctx->cid);
ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
hw_cid, NONE_CONNECTION_TYPE, &l5_data); hw_cid, NONE_CONNECTION_TYPE, &l5_data);
@@ -2035,9 +2065,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
xstorm_buf->pseudo_header_checksum = xstorm_buf->pseudo_header_checksum =
swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
tstorm_buf->params |=
L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
if (kwqe3->ka_timeout) { if (kwqe3->ka_timeout) {
tstorm_buf->ka_enable = 1; tstorm_buf->ka_enable = 1;
tstorm_buf->ka_timeout = kwqe3->ka_timeout; tstorm_buf->ka_timeout = kwqe3->ka_timeout;
@@ -2049,9 +2076,8 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
static void cnic_init_bnx2x_mac(struct cnic_dev *dev) static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
{ {
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev); struct bnx2x *bp = netdev_priv(dev->netdev);
u32 pfid = cp->pfid; u32 pfid = bp->pfid;
u8 *mac = dev->mac_addr; u8 *mac = dev->mac_addr;
CNIC_WR8(dev, BAR_XSTRORM_INTMEM + CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
@@ -2084,25 +2110,6 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
mac[0]); mac[0]);
} }
static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
{
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
u16 tstorm_flags = 0;
if (tcp_ts) {
xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
}
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
}
static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work) u32 num, int *work)
{ {
@@ -2176,10 +2183,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
cnic_bnx2x_set_tcp_timestamp(dev,
kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
@@ -2248,11 +2252,12 @@ static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
struct fcoe_stat_ramrod_params *fcoe_stat; struct fcoe_stat_ramrod_params *fcoe_stat;
union l5cm_specific_data l5_data; union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
int ret; int ret;
u32 cid; u32 cid;
req = (struct fcoe_kwqe_stat *) kwqe; req = (struct fcoe_kwqe_stat *) kwqe;
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
if (!fcoe_stat) if (!fcoe_stat)
@@ -2271,6 +2276,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
{ {
int ret; int ret;
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
u32 cid; u32 cid;
struct fcoe_init_ramrod_params *fcoe_init; struct fcoe_init_ramrod_params *fcoe_init;
struct fcoe_kwqe_init1 *req1; struct fcoe_kwqe_init1 *req1;
@@ -2315,7 +2321,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
cp->kcq2.sw_prod_idx = 0; cp->kcq2.sw_prod_idx = 0;
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
FCOE_CONNECTION_TYPE, &l5_data); FCOE_CONNECTION_TYPE, &l5_data);
*work = 3; *work = 3;
@@ -2328,6 +2334,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
int ret = 0; int ret = 0;
u32 cid = -1, l5_cid; u32 cid = -1, l5_cid;
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct fcoe_kwqe_conn_offload1 *req1; struct fcoe_kwqe_conn_offload1 *req1;
struct fcoe_kwqe_conn_offload2 *req2; struct fcoe_kwqe_conn_offload2 *req2;
struct fcoe_kwqe_conn_offload3 *req3; struct fcoe_kwqe_conn_offload3 *req3;
@@ -2370,7 +2377,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
if (fctx) { if (fctx) {
u32 hw_cid = BNX2X_HW_CID(cp, cid); u32 hw_cid = BNX2X_HW_CID(bp, cid);
u32 val; u32 val;
val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
@@ -2394,7 +2401,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
cid = BNX2X_HW_CID(cp, cid); cid = BNX2X_HW_CID(bp, cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data); FCOE_CONNECTION_TYPE, &l5_data);
if (!ret) if (!ret)
@@ -2552,13 +2559,14 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
struct fcoe_kwqe_destroy *req; struct fcoe_kwqe_destroy *req;
union l5cm_specific_data l5_data; union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
int ret; int ret;
u32 cid; u32 cid;
cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
req = (struct fcoe_kwqe_destroy *) kwqe; req = (struct fcoe_kwqe_destroy *) kwqe;
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
memset(&l5_data, 0, sizeof(l5_data)); memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
@@ -2715,7 +2723,7 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
struct kwqe *wqes[], u32 num_wqes) struct kwqe *wqes[], u32 num_wqes)
{ {
struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev);
int i, work, ret; int i, work, ret;
u32 opcode; u32 opcode;
struct kwqe *kwqe; struct kwqe *kwqe;
@@ -2723,7 +2731,7 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */ return -EAGAIN; /* bnx2 is down */
if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) if (!BNX2X_CHIP_IS_E2_PLUS(bp))
return -EINVAL; return -EINVAL;
for (i = 0; i < num_wqes; ) { for (i = 0; i < num_wqes; ) {
@@ -3039,8 +3047,8 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance)
static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
u16 index, u8 op, u8 update) u16 index, u8 op, u8 update)
{ {
struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev);
u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
COMMAND_REG_INT_ACK); COMMAND_REG_INT_ACK);
struct igu_ack_register igu_ack; struct igu_ack_register igu_ack;
@@ -3603,6 +3611,7 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
csk1->rcv_buf = DEF_RCV_BUF; csk1->rcv_buf = DEF_RCV_BUF;
csk1->snd_buf = DEF_SND_BUF; csk1->snd_buf = DEF_SND_BUF;
csk1->seed = DEF_SEED; csk1->seed = DEF_SEED;
csk1->tcp_flags = 0;
*csk = csk1; *csk = csk1;
return 0; return 0;
@@ -4020,15 +4029,18 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
cnic_cm_upcall(cp, csk, opcode); cnic_cm_upcall(cp, csk, opcode);
break; break;
case L5CM_RAMROD_CMD_ID_CLOSE: case L5CM_RAMROD_CMD_ID_CLOSE: {
if (l4kcqe->status != 0) { struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
"status 0x%x\n", l4kcqe->status); if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
l4kcqe->status, l5kcqe->completion_status);
opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
/* Fall through */ /* Fall through */
} else { } else {
break; break;
} }
}
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP: case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4213,13 +4225,12 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
{ {
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev); struct bnx2x *bp = netdev_priv(dev->netdev);
u32 pfid = cp->pfid; u32 pfid = bp->pfid;
u32 port = CNIC_PORT(cp); u32 port = BP_PORT(bp);
cnic_init_bnx2x_mac(dev); cnic_init_bnx2x_mac(dev);
cnic_bnx2x_set_tcp_timestamp(dev, 1); cnic_bnx2x_set_tcp_options(dev, 0, 1);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
@@ -4897,6 +4908,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data) struct client_init_ramrod_data *data)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev; struct cnic_uio_dev *udev = cp->udev;
union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
dma_addr_t buf_map, ring_map = udev->l2_ring_map; dma_addr_t buf_map, ring_map = udev->l2_ring_map;
@@ -4925,7 +4937,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS; start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) if (BNX2X_CHIP_IS_E2_PLUS(bp))
pbd_e2->parsing_data = (UNICAST_ADDRESS << pbd_e2->parsing_data = (UNICAST_ADDRESS <<
ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
else else
@@ -4962,6 +4974,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data) struct client_init_ramrod_data *data)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev; struct cnic_uio_dev *udev = cp->udev;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
BNX2_PAGE_SIZE); BNX2_PAGE_SIZE);
@@ -4970,7 +4983,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i; int i;
u32 cli = cp->ethdev->iscsi_l2_client_id; u32 cli = cp->ethdev->iscsi_l2_client_id;
int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
u32 val; u32 val;
dma_addr_t ring_map = udev->l2_ring_map; dma_addr_t ring_map = udev->l2_ring_map;
@@ -4979,7 +4992,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
data->general.activate_flg = 1; data->general.activate_flg = 1;
data->general.sp_client_id = cli; data->general.sp_client_id = cli;
data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
data->general.func_id = cp->pfid; data->general.func_id = bp->pfid;
for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map; dma_addr_t buf_map;
@@ -5029,13 +5042,13 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev); struct bnx2x *bp = netdev_priv(dev->netdev);
u32 pfid = cp->pfid; u32 pfid = bp->pfid;
cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
cp->kcq1.sw_prod_idx = 0; cp->kcq1.sw_prod_idx = 0;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen; struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq1.hw_prod_idx_ptr = cp->kcq1.hw_prod_idx_ptr =
@@ -5051,7 +5064,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
&sb->sb.running_index[SM_RX_ID]; &sb->sb.running_index[SM_RX_ID];
} }
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen; struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq2.io_addr = BAR_USTRORM_INTMEM + cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
@@ -5073,12 +5086,10 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
u32 pfid; u32 pfid;
dev->stats_addr = ethdev->addr_drv_info_to_mcp; dev->stats_addr = ethdev->addr_drv_info_to_mcp;
cp->port_mode = bp->common.chip_port_mode;
cp->pfid = bp->pfid;
cp->func = bp->pf_num; cp->func = bp->pf_num;
func = CNIC_FUNC(cp); func = CNIC_FUNC(cp);
pfid = cp->pfid; pfid = bp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
cp->iscsi_start_cid, 0); cp->iscsi_start_cid, 0);
@@ -5086,7 +5097,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
if (ret) if (ret)
return -ENOMEM; return -ENOMEM;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn, ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
cp->fcoe_start_cid, 0); cp->fcoe_start_cid, 0);
@@ -5168,12 +5179,12 @@ static void cnic_init_rings(struct cnic_dev *dev)
rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
barrier(); barrier();
cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
off = BAR_USTRORM_INTMEM + off = BAR_USTRORM_INTMEM +
(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? (BNX2X_CHIP_IS_E2_PLUS(bp) ?
USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
@@ -5271,6 +5282,13 @@ static int cnic_register_netdev(struct cnic_dev *dev)
if (err) if (err)
netdev_err(dev->netdev, "register_cnic failed\n"); netdev_err(dev->netdev, "register_cnic failed\n");
/* Read iSCSI config again. On some bnx2x device, iSCSI config
* can change after firmware is downloaded.
*/
dev->max_iscsi_conn = ethdev->max_iscsi_conn;
if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
dev->max_iscsi_conn = 0;
return err; return err;
} }
@@ -5353,7 +5371,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
cnic_free_irq(dev); cnic_free_irq(dev);
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
idx_off = offsetof(struct hc_status_block_e2, index_values) + idx_off = offsetof(struct hc_status_block_e2, index_values) +
(hc_index * sizeof(u16)); (hc_index * sizeof(u16));
@@ -5370,7 +5388,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
*cp->kcq1.hw_prod_idx_ptr = 0; *cp->kcq1.hw_prod_idx_ptr = 0;
CNIC_WR(dev, BAR_CSTRORM_INTMEM + CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
CNIC_WR16(dev, cp->kcq1.io_addr, 0); CNIC_WR16(dev, cp->kcq1.io_addr, 0);
cnic_free_resc(dev); cnic_free_resc(dev);
} }
@@ -5544,7 +5562,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn; cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
if (CNIC_SUPPORTS_FCOE(cp)) { if (CNIC_SUPPORTS_FCOE(bp)) {
cdev->max_fcoe_conn = ethdev->max_fcoe_conn; cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges; cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
} }
@@ -5564,7 +5582,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cp->stop_cm = cnic_cm_stop_bnx2x_hw; cp->stop_cm = cnic_cm_stop_bnx2x_hw;
cp->enable_int = cnic_enable_bnx2x_int; cp->enable_int = cnic_enable_bnx2x_int;
cp->disable_int_sync = cnic_disable_bnx2x_int_sync; cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
cp->ack_int = cnic_ack_bnx2x_e2_msix; cp->ack_int = cnic_ack_bnx2x_e2_msix;
cp->arm_int = cnic_arm_bnx2x_e2_msix; cp->arm_int = cnic_arm_bnx2x_e2_msix;
} else { } else {
@@ -5628,7 +5646,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
dev = cnic_from_netdev(netdev); dev = cnic_from_netdev(netdev);
if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) { if (!dev && event == NETDEV_REGISTER) {
/* Check for the hot-plug device */ /* Check for the hot-plug device */
dev = is_cnic_dev(netdev); dev = is_cnic_dev(netdev);
if (dev) { if (dev) {
@@ -5644,7 +5662,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
else if (event == NETDEV_UNREGISTER) else if (event == NETDEV_UNREGISTER)
cnic_ulp_exit(dev); cnic_ulp_exit(dev);
if (event == NETDEV_UP || (new_dev && netif_running(netdev))) { if (event == NETDEV_UP) {
if (cnic_register_netdev(dev) != 0) { if (cnic_register_netdev(dev) != 0) {
cnic_put(dev); cnic_put(dev);
goto done; goto done;
@@ -5693,21 +5711,8 @@ static struct notifier_block cnic_netdev_notifier = {
static void cnic_release(void) static void cnic_release(void)
{ {
struct cnic_dev *dev;
struct cnic_uio_dev *udev; struct cnic_uio_dev *udev;
while (!list_empty(&cnic_dev_list)) {
dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
cnic_ulp_stop(dev);
cnic_stop_hw(dev);
}
cnic_ulp_exit(dev);
cnic_unregister_netdev(dev);
list_del_init(&dev->list);
cnic_free_dev(dev);
}
while (!list_empty(&cnic_udev_list)) { while (!list_empty(&cnic_udev_list)) {
udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
list); list);

View File

@@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver. /* cnic.h: Broadcom CNIC core network driver.
* *
* Copyright (c) 2006-2011 Broadcom Corporation * Copyright (c) 2006-2013 Broadcom Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
@@ -303,8 +303,6 @@ struct cnic_local {
u32 chip_id; u32 chip_id;
int func; int func;
u32 pfid;
u8 port_mode;
u32 shmem_base; u32 shmem_base;
@@ -364,47 +362,7 @@ struct bnx2x_bd_chain_next {
#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ #define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ
#define BNX2X_CHIP_NUM_57710 0x164e #define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
#define BNX2X_CHIP_NUM_57711 0x164f
#define BNX2X_CHIP_NUM_57711E 0x1650
#define BNX2X_CHIP_NUM_57712 0x1662
#define BNX2X_CHIP_NUM_57712E 0x1663
#define BNX2X_CHIP_NUM_57713 0x1651
#define BNX2X_CHIP_NUM_57713E 0x1652
#define BNX2X_CHIP_NUM_57800 0x168a
#define BNX2X_CHIP_NUM_57810 0x168e
#define BNX2X_CHIP_NUM_57840 0x168d
#define BNX2X_CHIP_NUM(x) (x >> 16)
#define BNX2X_CHIP_IS_57710(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710)
#define BNX2X_CHIP_IS_57711(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
#define BNX2X_CHIP_IS_57711E(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
#define BNX2X_CHIP_IS_E1H(x) \
(BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
#define BNX2X_CHIP_IS_57712(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712)
#define BNX2X_CHIP_IS_57712E(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E)
#define BNX2X_CHIP_IS_57713(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713)
#define BNX2X_CHIP_IS_57713E(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E)
#define BNX2X_CHIP_IS_57800(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800)
#define BNX2X_CHIP_IS_57810(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810)
#define BNX2X_CHIP_IS_57840(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840)
#define BNX2X_CHIP_IS_E2(x) \
(BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
#define BNX2X_CHIP_IS_E3(x) \
(BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \
BNX2X_CHIP_IS_57840(x))
#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x))
#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \ #define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \
sizeof(struct eth_rx_bd)) sizeof(struct eth_rx_bd))
@@ -439,31 +397,26 @@ struct bnx2x_bd_chain_next {
#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H #define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
#endif #endif
#define CNIC_PORT(cp) ((cp)->pfid & 1)
#define CNIC_FUNC(cp) ((cp)->func) #define CNIC_FUNC(cp) ((cp)->func)
#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
0 : (CNIC_FUNC(cp) & 1))
#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \ #define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
(CNIC_E1HVN(cp) << 17) | (x)) (BP_VN(bp) << 17) | (x))
#define BNX2X_SW_CID(x) (x & 0x1ffff) #define BNX2X_SW_CID(x) (x & 0x1ffff)
#define BNX2X_CL_QZONE_ID(cp, cli) \ #define BNX2X_CL_QZONE_ID(bp, cli) \
(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \ (BNX2X_CHIP_IS_E2_PLUS(bp) ? cli : \
cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H)) cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H))
#ifndef MAX_STAT_COUNTER_ID #ifndef MAX_STAT_COUNTER_ID
#define MAX_STAT_COUNTER_ID \ #define MAX_STAT_COUNTER_ID \
(BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \ (CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H : \
((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\ ((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 : \
MAX_STAT_COUNTER_ID_E1)) MAX_STAT_COUNTER_ID_E1))
#endif #endif
#define CNIC_SUPPORTS_FCOE(cp) \ #define CNIC_SUPPORTS_FCOE(cp) \
(BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \ (BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp))
!((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
#define CNIC_RAMROD_TMO (HZ / 4) #define CNIC_RAMROD_TMO (HZ / 4)

View File

@@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver. /* cnic.c: Broadcom CNIC core network driver.
* *
* Copyright (c) 2006-2012 Broadcom Corporation * Copyright (c) 2006-2013 Broadcom Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
@@ -5400,8 +5400,8 @@ struct tstorm_l5cm_tcp_flags {
u16 flags; u16 flags;
#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0) #define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0 #define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12) #define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN (0x1<<12)
#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12 #define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_SHIFT 12
#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13) #define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13 #define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14) #define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)

View File

@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver. /* cnic_if.h: Broadcom CNIC core network driver.
* *
* Copyright (c) 2006-2012 Broadcom Corporation * Copyright (c) 2006-2013 Broadcom Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h" #include "bnx2x/bnx2x_mfw_req.h"
#define CNIC_MODULE_VERSION "2.5.16" #define CNIC_MODULE_VERSION "2.5.18"
#define CNIC_MODULE_RELDATE "Dec 05, 2012" #define CNIC_MODULE_RELDATE "Sept 01, 2013"
#define CNIC_ULP_RDMA 0 #define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1 #define CNIC_ULP_ISCSI 1
@@ -238,8 +238,8 @@ struct cnic_sock {
u16 src_port; u16 src_port;
u16 dst_port; u16 dst_port;
u16 vlan_id; u16 vlan_id;
unsigned char old_ha[6]; unsigned char old_ha[ETH_ALEN];
unsigned char ha[6]; unsigned char ha[ETH_ALEN];
u32 mtu; u32 mtu;
u32 cid; u32 cid;
u32 l5_cid; u32 l5_cid;
@@ -308,7 +308,7 @@ struct cnic_dev {
#define CNIC_F_BNX2_CLASS 3 #define CNIC_F_BNX2_CLASS 3
#define CNIC_F_BNX2X_CLASS 4 #define CNIC_F_BNX2X_CLASS 4
atomic_t ref_count; atomic_t ref_count;
u8 mac_addr[6]; u8 mac_addr[ETH_ALEN];
int max_iscsi_conn; int max_iscsi_conn;
int max_fcoe_conn; int max_fcoe_conn;

View File

@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3" #define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3 #define TG3_MAJ_NUM 3
#define TG3_MIN_NUM 132 #define TG3_MIN_NUM 133
#define DRV_MODULE_VERSION \ #define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
#define DRV_MODULE_RELDATE "May 21, 2013" #define DRV_MODULE_RELDATE "Jul 29, 2013"
#define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1 #define RESET_KIND_INIT 1
@@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp)
return false; return false;
} }
static bool tg3_phy_led_bug(struct tg3 *tp)
{
switch (tg3_asic_rev(tp)) {
case ASIC_REV_5719:
if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
!tp->pci_fn)
return true;
return false;
}
return false;
}
static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
{ {
u32 val; u32 val;
@@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
} }
return; return;
} else if (do_low_power) { } else if (do_low_power) {
tg3_writephy(tp, MII_TG3_EXT_CTRL, if (!tg3_phy_led_bug(tp))
MII_TG3_EXT_CTRL_FORCE_LED_OFF); tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_FORCE_LED_OFF);
val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
@@ -4226,8 +4240,6 @@ static int tg3_power_down_prepare(struct tg3 *tp)
static void tg3_power_down(struct tg3 *tp) static void tg3_power_down(struct tg3 *tp)
{ {
tg3_power_down_prepare(tp);
pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
pci_set_power_state(tp->pdev, PCI_D3hot); pci_set_power_state(tp->pdev, PCI_D3hot);
} }
@@ -6095,10 +6107,12 @@ static u64 tg3_refclk_read(struct tg3 *tp)
/* tp->lock must be held */ /* tp->lock must be held */
static void tg3_refclk_write(struct tg3 *tp, u64 newval) static void tg3_refclk_write(struct tg3 *tp, u64 newval)
{ {
tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP); u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME); tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
} }
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
@@ -6214,6 +6228,59 @@ static int tg3_ptp_settime(struct ptp_clock_info *ptp,
static int tg3_ptp_enable(struct ptp_clock_info *ptp, static int tg3_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on) struct ptp_clock_request *rq, int on)
{ {
struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
u32 clock_ctl;
int rval = 0;
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
if (rq->perout.index != 0)
return -EINVAL;
tg3_full_lock(tp, 0);
clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
if (on) {
u64 nsec;
nsec = rq->perout.start.sec * 1000000000ULL +
rq->perout.start.nsec;
if (rq->perout.period.sec || rq->perout.period.nsec) {
netdev_warn(tp->dev,
"Device supports only a one-shot timesync output, period must be 0\n");
rval = -EINVAL;
goto err_out;
}
if (nsec & (1ULL << 63)) {
netdev_warn(tp->dev,
"Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
rval = -EINVAL;
goto err_out;
}
tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
tw32(TG3_EAV_WATCHDOG0_MSB,
TG3_EAV_WATCHDOG0_EN |
((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
tw32(TG3_EAV_REF_CLCK_CTL,
clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
} else {
tw32(TG3_EAV_WATCHDOG0_MSB, 0);
tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
}
err_out:
tg3_full_unlock(tp);
return rval;
default:
break;
}
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@@ -6223,7 +6290,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.max_adj = 250000000, .max_adj = 250000000,
.n_alarm = 0, .n_alarm = 0,
.n_ext_ts = 0, .n_ext_ts = 0,
.n_per_out = 0, .n_per_out = 1,
.pps = 0, .pps = 0,
.adjfreq = tg3_ptp_adjfreq, .adjfreq = tg3_ptp_adjfreq,
.adjtime = tg3_ptp_adjtime, .adjtime = tg3_ptp_adjtime,
@@ -8538,10 +8605,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
if (!i && tg3_flag(tp, ENABLE_RSS)) if (!i && tg3_flag(tp, ENABLE_RSS))
continue; continue;
tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
TG3_RX_RCB_RING_BYTES(tp), TG3_RX_RCB_RING_BYTES(tp),
&tnapi->rx_rcb_mapping, &tnapi->rx_rcb_mapping,
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL);
if (!tnapi->rx_rcb) if (!tnapi->rx_rcb)
goto err_out; goto err_out;
} }
@@ -8590,10 +8657,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{ {
int i; int i;
tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
sizeof(struct tg3_hw_stats), sizeof(struct tg3_hw_stats),
&tp->stats_mapping, &tp->stats_mapping, GFP_KERNEL);
GFP_KERNEL | __GFP_ZERO);
if (!tp->hw_stats) if (!tp->hw_stats)
goto err_out; goto err_out;
@@ -8601,10 +8667,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
struct tg3_napi *tnapi = &tp->napi[i]; struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk; struct tg3_hw_status *sblk;
tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
TG3_HW_STATUS_SIZE, TG3_HW_STATUS_SIZE,
&tnapi->status_mapping, &tnapi->status_mapping,
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL);
if (!tnapi->hw_status) if (!tnapi->hw_status)
goto err_out; goto err_out;
@@ -10367,6 +10433,9 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_flag(tp, 5755_PLUS)) if (tg3_flag(tp, 5755_PLUS))
tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
if (tg3_asic_rev(tp) == ASIC_REV_5762)
tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
if (tg3_flag(tp, ENABLE_RSS)) if (tg3_flag(tp, ENABLE_RSS))
tp->rx_mode |= RX_MODE_RSS_ENABLE | tp->rx_mode |= RX_MODE_RSS_ENABLE |
RX_MODE_RSS_ITBL_HASH_BITS_7 | RX_MODE_RSS_ITBL_HASH_BITS_7 |
@@ -11502,7 +11571,7 @@ static int tg3_close(struct net_device *dev)
memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
tg3_power_down(tp); tg3_power_down_prepare(tp);
tg3_carrier_off(tp); tg3_carrier_off(tp);
@@ -11724,9 +11793,6 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
if (tg3_flag(tp, NO_NVRAM)) if (tg3_flag(tp, NO_NVRAM))
return -EINVAL; return -EINVAL;
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
return -EAGAIN;
offset = eeprom->offset; offset = eeprom->offset;
len = eeprom->len; len = eeprom->len;
eeprom->len = 0; eeprom->len = 0;
@@ -11784,9 +11850,6 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *buf; u8 *buf;
__be32 start, end; __be32 start, end;
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
return -EAGAIN;
if (tg3_flag(tp, NO_NVRAM) || if (tg3_flag(tp, NO_NVRAM) ||
eeprom->magic != TG3_EEPROM_MAGIC) eeprom->magic != TG3_EEPROM_MAGIC)
return -EINVAL; return -EINVAL;
@@ -13515,7 +13578,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_phy_start(tp); tg3_phy_start(tp);
} }
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
tg3_power_down(tp); tg3_power_down_prepare(tp);
} }
@@ -15917,7 +15980,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
*/ */
if (tg3_flag(tp, 5780_CLASS)) { if (tg3_flag(tp, 5780_CLASS)) {
tg3_flag_set(tp, 40BIT_DMA_BUG); tg3_flag_set(tp, 40BIT_DMA_BUG);
tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); tp->msi_cap = tp->pdev->msi_cap;
} else { } else {
struct pci_dev *bridge = NULL; struct pci_dev *bridge = NULL;
@@ -17547,11 +17610,6 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_asic_rev(tp) == ASIC_REV_5762) tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, PTP_CAPABLE); tg3_flag_set(tp, PTP_CAPABLE);
if (tg3_flag(tp, 5717_PLUS)) {
/* Resume a low-power mode */
tg3_frob_aux_power(tp, false);
}
tg3_timer_init(tp); tg3_timer_init(tp);
tg3_carrier_off(tp); tg3_carrier_off(tp);
@@ -17755,6 +17813,23 @@ out:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
static void tg3_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
rtnl_lock();
netif_device_detach(dev);
if (netif_running(dev))
dev_close(dev);
if (system_state == SYSTEM_POWER_OFF)
tg3_power_down(tp);
rtnl_unlock();
}
/** /**
* tg3_io_error_detected - called when PCI error is detected * tg3_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device * @pdev: Pointer to PCI device
@@ -17914,6 +17989,7 @@ static struct pci_driver tg3_driver = {
.remove = tg3_remove_one, .remove = tg3_remove_one,
.err_handler = &tg3_err_handler, .err_handler = &tg3_err_handler,
.driver.pm = &tg3_pm_ops, .driver.pm = &tg3_pm_ops,
.shutdown = tg3_shutdown,
}; };
module_pci_driver(tg3_driver); module_pci_driver(tg3_driver);

View File

@@ -532,6 +532,7 @@
#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000 #define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000
#define RX_MODE_RSS_ENABLE 0x00800000 #define RX_MODE_RSS_ENABLE 0x00800000
#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000 #define RX_MODE_IPV6_CSUM_ENABLE 0x01000000
#define RX_MODE_IPV4_FRAG_FIX 0x02000000
#define MAC_RX_STATUS 0x0000046c #define MAC_RX_STATUS 0x0000046c
#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 #define RX_STATUS_REMOTE_TX_XOFFED 0x00000001
#define RX_STATUS_XOFF_RCVD 0x00000002 #define RX_STATUS_XOFF_RCVD 0x00000002
@@ -1818,12 +1819,21 @@
#define TG3_EAV_REF_CLCK_CTL 0x00006908 #define TG3_EAV_REF_CLCK_CTL 0x00006908
#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002 #define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002
#define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004 #define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004
#define TG3_EAV_CTL_TSYNC_GPIO_MASK (0x3 << 16)
#define TG3_EAV_CTL_TSYNC_WDOG0 (1 << 17)
#define TG3_EAV_WATCHDOG0_LSB 0x00006918
#define TG3_EAV_WATCHDOG0_MSB 0x0000691c
#define TG3_EAV_WATCHDOG0_EN (1 << 31)
#define TG3_EAV_WATCHDOG_MSB_MASK 0x7fffffff
#define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928 #define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928
#define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31) #define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31)
#define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30) #define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30)
#define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff #define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff
/* 0x690c --> 0x7000 unused */
/* 0x692c --> 0x7000 unused */
/* NVRAM Control registers */ /* NVRAM Control registers */
#define NVRAM_CMD 0x00007000 #define NVRAM_CMD 0x00007000

View File

@@ -1419,7 +1419,7 @@ static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
bna_bfi_rx_enet_start(rx); bna_bfi_rx_enet_start(rx);
} }
void static void
bna_rx_sm_stop_wait_entry(struct bna_rx *rx) bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
{ {
} }
@@ -1472,7 +1472,7 @@ static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
bna_rxf_start(&rx->rxf); bna_rxf_start(&rx->rxf);
} }
void static void
bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
{ {
} }
@@ -1528,7 +1528,7 @@ bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
} }
} }
void static void
bna_rx_sm_started_entry(struct bna_rx *rx) bna_rx_sm_started_entry(struct bna_rx *rx)
{ {
struct bna_rxp *rxp; struct bna_rxp *rxp;
@@ -1593,12 +1593,12 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
} }
} }
void static void
bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
{ {
} }
void static void
bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
{ {
switch (event) { switch (event) {

View File

@@ -37,8 +37,8 @@
extern char bfa_version[]; extern char bfa_version[];
#define CNA_FW_FILE_CT "ctfw-3.2.1.0.bin" #define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin"
#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.0.bin" #define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1) #pragma pack(1)

View File

@@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
/* Detect MAC & PHY and perform ethernet interface initialization */ /* Detect MAC & PHY and perform ethernet interface initialization */
static int __init at91ether_probe(struct platform_device *pdev) static int __init at91ether_probe(struct platform_device *pdev)
{ {
struct macb_platform_data *board_data = pdev->dev.platform_data; struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev);
struct resource *regs; struct resource *regs;
struct net_device *dev; struct net_device *dev;
struct phy_device *phydev; struct phy_device *phydev;

Some files were not shown because too many files have changed in this diff Show More