sfc: separate out SFC4000 ("Falcon") support into new sfc-falcon driver

Rationale: The differences between Falcon and Siena are in many ways larger
 than those between Siena and EF10 (despite Siena being nominally "Falcon-
 architecture"); for instance, Falcon has no MCPU, so there is no MCDI.
 Removing Falcon support from the sfc driver should simplify the latter,
 and avoid the possibility of Falcon support being broken by changes to sfc
 (which are rarely if ever tested on Falcon, it being end-of-lifed hardware).

The sfc-falcon driver created in this changeset is essentially a copy of the
 sfc driver, but with Siena- and EF10-specific code, including MCDI, removed
 and with the "efx_" identifier prefix changed to "ef4_" (for "EFX 4000-
 series") to avoid collisions when both drivers are built-in.

This changeset removes Falcon from the sfc driver's PCI ID table; then in
 sfc I've removed obvious Falcon-related code: I removed the Falcon NIC
 functions, Falcon PHY code, and EFX_REV_FALCON_*, then fixed up everything
 that referenced them.

Also, increment minor version of both drivers (to 4.1).

For now, CONFIG_SFC selects CONFIG_SFC_FALCON, so that updating old configs
 doesn't cause Falcon support to disappear; but that should be undone at
 some point in the future.

Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Edward Cree
2016-11-28 18:55:34 +00:00
committed by David S. Miller
parent 6bb10c2bc6
commit 5a6681e22c
44 changed files with 18209 additions and 1216 deletions

View File

@@ -0,0 +1,21 @@
config SFC_FALCON
tristate "Solarflare SFC4000 support"
depends on PCI
select MDIO
select CRC32
select I2C
select I2C_ALGOBIT
---help---
This driver supports 10-gigabit Ethernet cards based on
the Solarflare SFC4000 controller.
To compile this driver as a module, choose M here. The module
will be called sfc-falcon.
config SFC_FALCON_MTD
bool "Solarflare SFC4000 MTD support"
depends on SFC_FALCON && MTD && !(SFC_FALCON=y && MTD=m)
default y
---help---
This exposes the on-board flash and/or EEPROM as MTD devices
(e.g. /dev/mtd1). This is required to update the boot
configuration under Linux.

View File

@@ -0,0 +1,6 @@
sfc-falcon-y += efx.o nic.o farch.o falcon.o tx.o rx.o selftest.o \
ethtool.o qt202x_phy.o mdio_10g.o tenxpress.o \
txc43128_phy.o falcon_boards.o
sfc-falcon-$(CONFIG_SFC_FALCON_MTD) += mtd.o
obj-$(CONFIG_SFC_FALCON) += sfc-falcon.o

View File

@@ -0,0 +1,542 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_BITFIELD_H
#define EF4_BITFIELD_H
/*
* Efx bitfield access
*
* Efx NICs make extensive use of bitfields up to 128 bits
* wide. Since there is no native 128-bit datatype on most systems,
* and since 64-bit datatypes are inefficient on 32-bit systems and
* vice versa, we wrap accesses in a way that uses the most efficient
* datatype.
*
* The NICs are PCI devices and therefore little-endian. Since most
* of the quantities that we deal with are DMAed to/from host memory,
* we define our datatypes (ef4_oword_t, ef4_qword_t and
* ef4_dword_t) to be little-endian.
*/
/* Lowest bit numbers and widths */
#define EF4_DUMMY_FIELD_LBN 0
#define EF4_DUMMY_FIELD_WIDTH 0
#define EF4_WORD_0_LBN 0
#define EF4_WORD_0_WIDTH 16
#define EF4_WORD_1_LBN 16
#define EF4_WORD_1_WIDTH 16
#define EF4_DWORD_0_LBN 0
#define EF4_DWORD_0_WIDTH 32
#define EF4_DWORD_1_LBN 32
#define EF4_DWORD_1_WIDTH 32
#define EF4_DWORD_2_LBN 64
#define EF4_DWORD_2_WIDTH 32
#define EF4_DWORD_3_LBN 96
#define EF4_DWORD_3_WIDTH 32
#define EF4_QWORD_0_LBN 0
#define EF4_QWORD_0_WIDTH 64
/* Specified attribute (e.g. LBN) of the specified field */
#define EF4_VAL(field, attribute) field ## _ ## attribute
/* Low bit number of the specified field */
#define EF4_LOW_BIT(field) EF4_VAL(field, LBN)
/* Bit width of the specified field */
#define EF4_WIDTH(field) EF4_VAL(field, WIDTH)
/* High bit number of the specified field */
#define EF4_HIGH_BIT(field) (EF4_LOW_BIT(field) + EF4_WIDTH(field) - 1)
/* Mask equal in width to the specified field.
*
* For example, a field with width 5 would have a mask of 0x1f.
*
* The maximum width mask that can be generated is 64 bits.
*/
#define EF4_MASK64(width) \
((width) == 64 ? ~((u64) 0) : \
(((((u64) 1) << (width))) - 1))
/* Mask equal in width to the specified field.
*
* For example, a field with width 5 would have a mask of 0x1f.
*
* The maximum width mask that can be generated is 32 bits. Use
* EF4_MASK64 for higher width fields.
*/
#define EF4_MASK32(width) \
((width) == 32 ? ~((u32) 0) : \
(((((u32) 1) << (width))) - 1))
/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
typedef union ef4_dword {
__le32 u32[1];
} ef4_dword_t;
/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
typedef union ef4_qword {
__le64 u64[1];
__le32 u32[2];
ef4_dword_t dword[2];
} ef4_qword_t;
/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
typedef union ef4_oword {
__le64 u64[2];
ef4_qword_t qword[2];
__le32 u32[4];
ef4_dword_t dword[4];
} ef4_oword_t;
/* Format string and value expanders for printk */
#define EF4_DWORD_FMT "%08x"
#define EF4_QWORD_FMT "%08x:%08x"
#define EF4_OWORD_FMT "%08x:%08x:%08x:%08x"
#define EF4_DWORD_VAL(dword) \
((unsigned int) le32_to_cpu((dword).u32[0]))
#define EF4_QWORD_VAL(qword) \
((unsigned int) le32_to_cpu((qword).u32[1])), \
((unsigned int) le32_to_cpu((qword).u32[0]))
#define EF4_OWORD_VAL(oword) \
((unsigned int) le32_to_cpu((oword).u32[3])), \
((unsigned int) le32_to_cpu((oword).u32[2])), \
((unsigned int) le32_to_cpu((oword).u32[1])), \
((unsigned int) le32_to_cpu((oword).u32[0]))
/*
* Extract bit field portion [low,high) from the native-endian element
* which contains bits [min,max).
*
* For example, suppose "element" represents the high 32 bits of a
* 64-bit value, and we wish to extract the bits belonging to the bit
* field occupying bits 28-45 of this 64-bit value.
*
* Then EF4_EXTRACT ( element, 32, 63, 28, 45 ) would give
*
* ( element ) << 4
*
* The result will contain the relevant bits filled in in the range
* [0,high-low), with garbage in bits [high-low+1,...).
*/
#define EF4_EXTRACT_NATIVE(native_element, min, max, low, high) \
((low) > (max) || (high) < (min) ? 0 : \
(low) > (min) ? \
(native_element) >> ((low) - (min)) : \
(native_element) << ((min) - (low)))
/*
* Extract bit field portion [low,high) from the 64-bit little-endian
* element which contains bits [min,max)
*/
#define EF4_EXTRACT64(element, min, max, low, high) \
EF4_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
/*
* Extract bit field portion [low,high) from the 32-bit little-endian
* element which contains bits [min,max)
*/
#define EF4_EXTRACT32(element, min, max, low, high) \
EF4_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
#define EF4_EXTRACT_OWORD64(oword, low, high) \
((EF4_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
EF4_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
EF4_MASK64((high) + 1 - (low)))
#define EF4_EXTRACT_QWORD64(qword, low, high) \
(EF4_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
EF4_MASK64((high) + 1 - (low)))
#define EF4_EXTRACT_OWORD32(oword, low, high) \
((EF4_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
EF4_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
EF4_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
EF4_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
EF4_MASK32((high) + 1 - (low)))
#define EF4_EXTRACT_QWORD32(qword, low, high) \
((EF4_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
EF4_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
EF4_MASK32((high) + 1 - (low)))
#define EF4_EXTRACT_DWORD(dword, low, high) \
(EF4_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
EF4_MASK32((high) + 1 - (low)))
#define EF4_OWORD_FIELD64(oword, field) \
EF4_EXTRACT_OWORD64(oword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field))
#define EF4_QWORD_FIELD64(qword, field) \
EF4_EXTRACT_QWORD64(qword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field))
#define EF4_OWORD_FIELD32(oword, field) \
EF4_EXTRACT_OWORD32(oword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field))
#define EF4_QWORD_FIELD32(qword, field) \
EF4_EXTRACT_QWORD32(qword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field))
#define EF4_DWORD_FIELD(dword, field) \
EF4_EXTRACT_DWORD(dword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field))
#define EF4_OWORD_IS_ZERO64(oword) \
(((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
#define EF4_QWORD_IS_ZERO64(qword) \
(((qword).u64[0]) == (__force __le64) 0)
#define EF4_OWORD_IS_ZERO32(oword) \
(((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
== (__force __le32) 0)
#define EF4_QWORD_IS_ZERO32(qword) \
(((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
#define EF4_DWORD_IS_ZERO(dword) \
(((dword).u32[0]) == (__force __le32) 0)
#define EF4_OWORD_IS_ALL_ONES64(oword) \
(((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
#define EF4_QWORD_IS_ALL_ONES64(qword) \
((qword).u64[0] == ~((__force __le64) 0))
#define EF4_OWORD_IS_ALL_ONES32(oword) \
(((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
== ~((__force __le32) 0))
#define EF4_QWORD_IS_ALL_ONES32(qword) \
(((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
#define EF4_DWORD_IS_ALL_ONES(dword) \
((dword).u32[0] == ~((__force __le32) 0))
#if BITS_PER_LONG == 64
#define EF4_OWORD_FIELD EF4_OWORD_FIELD64
#define EF4_QWORD_FIELD EF4_QWORD_FIELD64
#define EF4_OWORD_IS_ZERO EF4_OWORD_IS_ZERO64
#define EF4_QWORD_IS_ZERO EF4_QWORD_IS_ZERO64
#define EF4_OWORD_IS_ALL_ONES EF4_OWORD_IS_ALL_ONES64
#define EF4_QWORD_IS_ALL_ONES EF4_QWORD_IS_ALL_ONES64
#else
#define EF4_OWORD_FIELD EF4_OWORD_FIELD32
#define EF4_QWORD_FIELD EF4_QWORD_FIELD32
#define EF4_OWORD_IS_ZERO EF4_OWORD_IS_ZERO32
#define EF4_QWORD_IS_ZERO EF4_QWORD_IS_ZERO32
#define EF4_OWORD_IS_ALL_ONES EF4_OWORD_IS_ALL_ONES32
#define EF4_QWORD_IS_ALL_ONES EF4_QWORD_IS_ALL_ONES32
#endif
/*
* Construct bit field portion
*
* Creates the portion of the bit field [low,high) that lies within
* the range [min,max).
*/
#define EF4_INSERT_NATIVE64(min, max, low, high, value) \
(((low > max) || (high < min)) ? 0 : \
((low > min) ? \
(((u64) (value)) << (low - min)) : \
(((u64) (value)) >> (min - low))))
#define EF4_INSERT_NATIVE32(min, max, low, high, value) \
(((low > max) || (high < min)) ? 0 : \
((low > min) ? \
(((u32) (value)) << (low - min)) : \
(((u32) (value)) >> (min - low))))
#define EF4_INSERT_NATIVE(min, max, low, high, value) \
((((max - min) >= 32) || ((high - low) >= 32)) ? \
EF4_INSERT_NATIVE64(min, max, low, high, value) : \
EF4_INSERT_NATIVE32(min, max, low, high, value))
/*
* Construct bit field portion
*
* Creates the portion of the named bit field that lies within the
* range [min,max).
*/
#define EF4_INSERT_FIELD_NATIVE(min, max, field, value) \
EF4_INSERT_NATIVE(min, max, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field), value)
/*
* Construct bit field
*
* Creates the portion of the named bit fields that lie within the
* range [min,max).
*/
#define EF4_INSERT_FIELDS_NATIVE(min, max, \
field1, value1, \
field2, value2, \
field3, value3, \
field4, value4, \
field5, value5, \
field6, value6, \
field7, value7, \
field8, value8, \
field9, value9, \
field10, value10) \
(EF4_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
#define EF4_INSERT_FIELDS64(...) \
cpu_to_le64(EF4_INSERT_FIELDS_NATIVE(__VA_ARGS__))
#define EF4_INSERT_FIELDS32(...) \
cpu_to_le32(EF4_INSERT_FIELDS_NATIVE(__VA_ARGS__))
#define EF4_POPULATE_OWORD64(oword, ...) do { \
(oword).u64[0] = EF4_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
(oword).u64[1] = EF4_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
} while (0)
#define EF4_POPULATE_QWORD64(qword, ...) do { \
(qword).u64[0] = EF4_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
} while (0)
#define EF4_POPULATE_OWORD32(oword, ...) do { \
(oword).u32[0] = EF4_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
(oword).u32[1] = EF4_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
(oword).u32[2] = EF4_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
(oword).u32[3] = EF4_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
} while (0)
#define EF4_POPULATE_QWORD32(qword, ...) do { \
(qword).u32[0] = EF4_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
(qword).u32[1] = EF4_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
} while (0)
#define EF4_POPULATE_DWORD(dword, ...) do { \
(dword).u32[0] = EF4_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
} while (0)
#if BITS_PER_LONG == 64
#define EF4_POPULATE_OWORD EF4_POPULATE_OWORD64
#define EF4_POPULATE_QWORD EF4_POPULATE_QWORD64
#else
#define EF4_POPULATE_OWORD EF4_POPULATE_OWORD32
#define EF4_POPULATE_QWORD EF4_POPULATE_QWORD32
#endif
/* Populate an octword field with various numbers of arguments */
#define EF4_POPULATE_OWORD_10 EF4_POPULATE_OWORD
#define EF4_POPULATE_OWORD_9(oword, ...) \
EF4_POPULATE_OWORD_10(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_8(oword, ...) \
EF4_POPULATE_OWORD_9(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_7(oword, ...) \
EF4_POPULATE_OWORD_8(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_6(oword, ...) \
EF4_POPULATE_OWORD_7(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_5(oword, ...) \
EF4_POPULATE_OWORD_6(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_4(oword, ...) \
EF4_POPULATE_OWORD_5(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_3(oword, ...) \
EF4_POPULATE_OWORD_4(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_2(oword, ...) \
EF4_POPULATE_OWORD_3(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_1(oword, ...) \
EF4_POPULATE_OWORD_2(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_ZERO_OWORD(oword) \
EF4_POPULATE_OWORD_1(oword, EF4_DUMMY_FIELD, 0)
#define EF4_SET_OWORD(oword) \
EF4_POPULATE_OWORD_4(oword, \
EF4_DWORD_0, 0xffffffff, \
EF4_DWORD_1, 0xffffffff, \
EF4_DWORD_2, 0xffffffff, \
EF4_DWORD_3, 0xffffffff)
/* Populate a quadword field with various numbers of arguments */
#define EF4_POPULATE_QWORD_10 EF4_POPULATE_QWORD
#define EF4_POPULATE_QWORD_9(qword, ...) \
EF4_POPULATE_QWORD_10(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_8(qword, ...) \
EF4_POPULATE_QWORD_9(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_7(qword, ...) \
EF4_POPULATE_QWORD_8(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_6(qword, ...) \
EF4_POPULATE_QWORD_7(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_5(qword, ...) \
EF4_POPULATE_QWORD_6(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_4(qword, ...) \
EF4_POPULATE_QWORD_5(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_3(qword, ...) \
EF4_POPULATE_QWORD_4(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_2(qword, ...) \
EF4_POPULATE_QWORD_3(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_1(qword, ...) \
EF4_POPULATE_QWORD_2(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_ZERO_QWORD(qword) \
EF4_POPULATE_QWORD_1(qword, EF4_DUMMY_FIELD, 0)
#define EF4_SET_QWORD(qword) \
EF4_POPULATE_QWORD_2(qword, \
EF4_DWORD_0, 0xffffffff, \
EF4_DWORD_1, 0xffffffff)
/* Populate a dword field with various numbers of arguments */
#define EF4_POPULATE_DWORD_10 EF4_POPULATE_DWORD
#define EF4_POPULATE_DWORD_9(dword, ...) \
EF4_POPULATE_DWORD_10(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_8(dword, ...) \
EF4_POPULATE_DWORD_9(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_7(dword, ...) \
EF4_POPULATE_DWORD_8(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_6(dword, ...) \
EF4_POPULATE_DWORD_7(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_5(dword, ...) \
EF4_POPULATE_DWORD_6(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_4(dword, ...) \
EF4_POPULATE_DWORD_5(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_3(dword, ...) \
EF4_POPULATE_DWORD_4(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_2(dword, ...) \
EF4_POPULATE_DWORD_3(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_1(dword, ...) \
EF4_POPULATE_DWORD_2(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_ZERO_DWORD(dword) \
EF4_POPULATE_DWORD_1(dword, EF4_DUMMY_FIELD, 0)
#define EF4_SET_DWORD(dword) \
EF4_POPULATE_DWORD_1(dword, EF4_DWORD_0, 0xffffffff)
/*
* Modify a named field within an already-populated structure. Used
* for read-modify-write operations.
*
*/
#define EF4_INVERT_OWORD(oword) do { \
(oword).u64[0] = ~((oword).u64[0]); \
(oword).u64[1] = ~((oword).u64[1]); \
} while (0)
#define EF4_AND_OWORD(oword, from, mask) \
do { \
(oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
(oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
} while (0)
#define EF4_OR_OWORD(oword, from, mask) \
do { \
(oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
(oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
} while (0)
#define EF4_INSERT64(min, max, low, high, value) \
cpu_to_le64(EF4_INSERT_NATIVE(min, max, low, high, value))
#define EF4_INSERT32(min, max, low, high, value) \
cpu_to_le32(EF4_INSERT_NATIVE(min, max, low, high, value))
#define EF4_INPLACE_MASK64(min, max, low, high) \
EF4_INSERT64(min, max, low, high, EF4_MASK64((high) + 1 - (low)))
#define EF4_INPLACE_MASK32(min, max, low, high) \
EF4_INSERT32(min, max, low, high, EF4_MASK32((high) + 1 - (low)))
#define EF4_SET_OWORD64(oword, low, high, value) do { \
(oword).u64[0] = (((oword).u64[0] \
& ~EF4_INPLACE_MASK64(0, 63, low, high)) \
| EF4_INSERT64(0, 63, low, high, value)); \
(oword).u64[1] = (((oword).u64[1] \
& ~EF4_INPLACE_MASK64(64, 127, low, high)) \
| EF4_INSERT64(64, 127, low, high, value)); \
} while (0)
#define EF4_SET_QWORD64(qword, low, high, value) do { \
(qword).u64[0] = (((qword).u64[0] \
& ~EF4_INPLACE_MASK64(0, 63, low, high)) \
| EF4_INSERT64(0, 63, low, high, value)); \
} while (0)
#define EF4_SET_OWORD32(oword, low, high, value) do { \
(oword).u32[0] = (((oword).u32[0] \
& ~EF4_INPLACE_MASK32(0, 31, low, high)) \
| EF4_INSERT32(0, 31, low, high, value)); \
(oword).u32[1] = (((oword).u32[1] \
& ~EF4_INPLACE_MASK32(32, 63, low, high)) \
| EF4_INSERT32(32, 63, low, high, value)); \
(oword).u32[2] = (((oword).u32[2] \
& ~EF4_INPLACE_MASK32(64, 95, low, high)) \
| EF4_INSERT32(64, 95, low, high, value)); \
(oword).u32[3] = (((oword).u32[3] \
& ~EF4_INPLACE_MASK32(96, 127, low, high)) \
| EF4_INSERT32(96, 127, low, high, value)); \
} while (0)
#define EF4_SET_QWORD32(qword, low, high, value) do { \
(qword).u32[0] = (((qword).u32[0] \
& ~EF4_INPLACE_MASK32(0, 31, low, high)) \
| EF4_INSERT32(0, 31, low, high, value)); \
(qword).u32[1] = (((qword).u32[1] \
& ~EF4_INPLACE_MASK32(32, 63, low, high)) \
| EF4_INSERT32(32, 63, low, high, value)); \
} while (0)
#define EF4_SET_DWORD32(dword, low, high, value) do { \
(dword).u32[0] = (((dword).u32[0] \
& ~EF4_INPLACE_MASK32(0, 31, low, high)) \
| EF4_INSERT32(0, 31, low, high, value)); \
} while (0)
#define EF4_SET_OWORD_FIELD64(oword, field, value) \
EF4_SET_OWORD64(oword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field), value)
#define EF4_SET_QWORD_FIELD64(qword, field, value) \
EF4_SET_QWORD64(qword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field), value)
#define EF4_SET_OWORD_FIELD32(oword, field, value) \
EF4_SET_OWORD32(oword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field), value)
#define EF4_SET_QWORD_FIELD32(qword, field, value) \
EF4_SET_QWORD32(qword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field), value)
#define EF4_SET_DWORD_FIELD(dword, field, value) \
EF4_SET_DWORD32(dword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field), value)
#if BITS_PER_LONG == 64
#define EF4_SET_OWORD_FIELD EF4_SET_OWORD_FIELD64
#define EF4_SET_QWORD_FIELD EF4_SET_QWORD_FIELD64
#else
#define EF4_SET_OWORD_FIELD EF4_SET_OWORD_FIELD32
#define EF4_SET_QWORD_FIELD EF4_SET_QWORD_FIELD32
#endif
/* Used to avoid compiler warnings about shift range exceeding width
* of the data types when dma_addr_t is only 32 bits wide.
*/
#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
#define EF4_DMA_TYPE_WIDTH(width) \
(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
/* Static initialiser */
#define EF4_OWORD32(a, b, c, d) \
{ .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
cpu_to_le32(c), cpu_to_le32(d) } }
#endif /* EF4_BITFIELD_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,277 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_EFX_H
#define EF4_EFX_H
#include "net_driver.h"
#include "filter.h"
/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
/* All VFs use BAR 0/1 for memory */
#define EF4_MEM_BAR 2
#define EF4_MEM_VF_BAR 0
int ef4_net_open(struct net_device *net_dev);
int ef4_net_stop(struct net_device *net_dev);
/* TX */
int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue);
void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue);
void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue);
void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue);
void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue);
netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb);
void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index);
int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
struct tc_to_netdev *tc);
unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx);
extern bool ef4_separate_tx_channels;
/* RX */
void ef4_set_default_rx_indir_table(struct ef4_nic *efx);
void ef4_rx_config_page_split(struct ef4_nic *efx);
int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic);
void ef4_rx_slow_fill(unsigned long context);
void __ef4_rx_packet(struct ef4_channel *channel);
void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags);
static inline void ef4_rx_flush_packet(struct ef4_channel *channel)
{
if (channel->rx_pkt_n_frags)
__ef4_rx_packet(channel);
}
void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue);
#define EF4_MAX_DMAQ_SIZE 4096UL
#define EF4_DEFAULT_DMAQ_SIZE 1024UL
#define EF4_MIN_DMAQ_SIZE 512UL
#define EF4_MAX_EVQ_SIZE 16384UL
#define EF4_MIN_EVQ_SIZE 512UL
/* Maximum number of TCP segments we support for soft-TSO */
#define EF4_TSO_MAX_SEGS 100
/* The smallest [rt]xq_entries that the driver supports. RX minimum
* is a bit arbitrary. For TX, we must have space for at least 2
* TSO skbs.
*/
#define EF4_RXQ_MIN_ENT 128U
#define EF4_TXQ_MIN_ENT(efx) (2 * ef4_tx_max_skb_descs(efx))
static inline bool ef4_rss_enabled(struct ef4_nic *efx)
{
return efx->rss_spread > 1;
}
/* Filters */
void ef4_mac_reconfigure(struct ef4_nic *efx);
/**
* ef4_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
* @spec: Specification for the filter
* @replace_equal: Flag for whether the specified filter may replace an
* existing filter with equal priority
*
* On success, return the filter ID.
* On failure, return a negative error code.
*
* If existing filters have equal match values to the new filter spec,
* then the new filter might replace them or the function might fail,
* as follows.
*
* 1. If the existing filters have lower priority, or @replace_equal
* is set and they have equal priority, replace them.
*
* 2. If the existing filters have higher priority, return -%EPERM.
*
* 3. If !ef4_filter_is_mc_recipient(@spec), or the NIC does not
* support delivery to multiple recipients, return -%EEXIST.
*
* This implies that filters for multiple multicast recipients must
* all be inserted with the same priority and @replace_equal = %false.
*/
static inline s32 ef4_filter_insert_filter(struct ef4_nic *efx,
struct ef4_filter_spec *spec,
bool replace_equal)
{
return efx->type->filter_insert(efx, spec, replace_equal);
}
/**
* ef4_filter_remove_id_safe - remove a filter by ID, carefully
* @efx: NIC from which to remove the filter
* @priority: Priority of filter, as passed to @ef4_filter_insert_filter
* @filter_id: ID of filter, as returned by @ef4_filter_insert_filter
*
* This function will range-check @filter_id, so it is safe to call
* with a value passed from userland.
*/
static inline int ef4_filter_remove_id_safe(struct ef4_nic *efx,
enum ef4_filter_priority priority,
u32 filter_id)
{
return efx->type->filter_remove_safe(efx, priority, filter_id);
}
/**
* ef4_filter_get_filter_safe - retrieve a filter by ID, carefully
* @efx: NIC from which to remove the filter
* @priority: Priority of filter, as passed to @ef4_filter_insert_filter
* @filter_id: ID of filter, as returned by @ef4_filter_insert_filter
* @spec: Buffer in which to store filter specification
*
* This function will range-check @filter_id, so it is safe to call
* with a value passed from userland.
*/
static inline int
ef4_filter_get_filter_safe(struct ef4_nic *efx,
enum ef4_filter_priority priority,
u32 filter_id, struct ef4_filter_spec *spec)
{
return efx->type->filter_get_safe(efx, priority, filter_id, spec);
}
static inline u32 ef4_filter_count_rx_used(struct ef4_nic *efx,
enum ef4_filter_priority priority)
{
return efx->type->filter_count_rx_used(efx, priority);
}
static inline u32 ef4_filter_get_rx_id_limit(struct ef4_nic *efx)
{
return efx->type->filter_get_rx_id_limit(efx);
}
static inline s32 ef4_filter_get_rx_ids(struct ef4_nic *efx,
enum ef4_filter_priority priority,
u32 *buf, u32 size)
{
return efx->type->filter_get_rx_ids(efx, priority, buf, size);
}
#ifdef CONFIG_RFS_ACCEL
int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned quota);
static inline void ef4_filter_rfs_expire(struct ef4_channel *channel)
{
if (channel->rfs_filters_added >= 60 &&
__ef4_filter_rfs_expire(channel->efx, 100))
channel->rfs_filters_added -= 60;
}
#define ef4_filter_rfs_enabled() 1
#else
static inline void ef4_filter_rfs_expire(struct ef4_channel *channel) {}
#define ef4_filter_rfs_enabled() 0
#endif
bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec);
/* Channels */
int ef4_channel_dummy_op_int(struct ef4_channel *channel);
void ef4_channel_dummy_op_void(struct ef4_channel *channel);
int ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries);
/* Ports */
int ef4_reconfigure_port(struct ef4_nic *efx);
int __ef4_reconfigure_port(struct ef4_nic *efx);
/* Ethtool support */
extern const struct ethtool_ops ef4_ethtool_ops;
/* Reset handling */
int ef4_reset(struct ef4_nic *efx, enum reset_type method);
void ef4_reset_down(struct ef4_nic *efx, enum reset_type method);
int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok);
int ef4_try_recovery(struct ef4_nic *efx);
/* Global */
void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type);
unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs);
unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks);
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);
void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive);
void ef4_stop_eventq(struct ef4_channel *channel);
void ef4_start_eventq(struct ef4_channel *channel);
/* Dummy PHY ops for PHY drivers */
int ef4_port_dummy_op_int(struct ef4_nic *efx);
void ef4_port_dummy_op_void(struct ef4_nic *efx);
/* Update the generic software stats in the passed stats array */
void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats);
/* MTD */
#ifdef CONFIG_SFC_FALCON_MTD
int ef4_mtd_add(struct ef4_nic *efx, struct ef4_mtd_partition *parts,
size_t n_parts, size_t sizeof_part);
static inline int ef4_mtd_probe(struct ef4_nic *efx)
{
return efx->type->mtd_probe(efx);
}
void ef4_mtd_rename(struct ef4_nic *efx);
void ef4_mtd_remove(struct ef4_nic *efx);
#else
static inline int ef4_mtd_probe(struct ef4_nic *efx) { return 0; }
static inline void ef4_mtd_rename(struct ef4_nic *efx) {}
static inline void ef4_mtd_remove(struct ef4_nic *efx) {}
#endif
static inline void ef4_schedule_channel(struct ef4_channel *channel)
{
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d scheduling NAPI poll on CPU%d\n",
channel->channel, raw_smp_processor_id());
napi_schedule(&channel->napi_str);
}
static inline void ef4_schedule_channel_irq(struct ef4_channel *channel)
{
channel->event_test_cpu = raw_smp_processor_id();
ef4_schedule_channel(channel);
}
void ef4_link_status_changed(struct ef4_nic *efx);
void ef4_link_set_advertising(struct ef4_nic *efx, u32);
void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8);
static inline void ef4_device_detach_sync(struct ef4_nic *efx)
{
struct net_device *dev = efx->net_dev;
/* Lock/freeze all TX queues so that we can be sure the
* TX scheduler is stopped when we're done and before
* netif_device_present() becomes false.
*/
netif_tx_lock_bh(dev);
netif_device_detach(dev);
netif_tx_unlock_bh(dev);
}
static inline bool ef4_rwsem_assert_write_locked(struct rw_semaphore *sem)
{
if (WARN_ON(down_read_trylock(sem))) {
up_read(sem);
return false;
}
return true;
}
#endif /* EF4_EFX_H */

View File

@@ -0,0 +1,171 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2007-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_ENUM_H
#define EF4_ENUM_H
/**
* enum ef4_loopback_mode - loopback modes
* @LOOPBACK_NONE: no loopback
* @LOOPBACK_DATA: data path loopback
* @LOOPBACK_GMAC: loopback within GMAC
* @LOOPBACK_XGMII: loopback after XMAC
* @LOOPBACK_XGXS: loopback within BPX after XGXS
* @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
* @LOOPBACK_GMII: loopback within BPX after GMAC
* @LOOPBACK_SGMII: loopback within BPX within SGMII
* @LOOPBACK_XGBR: loopback within BPX within XGBR
* @LOOPBACK_XFI: loopback within BPX before XFI serdes
* @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
* @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
* @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
* @LOOPBACK_XFI_FAR: loopback after XFI serdes
* @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
* @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
* @LOOPBACK_PCS: loopback within 10G PHY at PCS level
* @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
* @LOOPBACK_XPORT: cross port loopback
* @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
* @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
* @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
* @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
* @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
* @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
* @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
* @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
*/
/* Please keep up-to-date w.r.t the following two #defines */
enum ef4_loopback_mode {
LOOPBACK_NONE = 0,
LOOPBACK_DATA = 1,
LOOPBACK_GMAC = 2,
LOOPBACK_XGMII = 3,
LOOPBACK_XGXS = 4,
LOOPBACK_XAUI = 5,
LOOPBACK_GMII = 6,
LOOPBACK_SGMII = 7,
LOOPBACK_XGBR = 8,
LOOPBACK_XFI = 9,
LOOPBACK_XAUI_FAR = 10,
LOOPBACK_GMII_FAR = 11,
LOOPBACK_SGMII_FAR = 12,
LOOPBACK_XFI_FAR = 13,
LOOPBACK_GPHY = 14,
LOOPBACK_PHYXS = 15,
LOOPBACK_PCS = 16,
LOOPBACK_PMAPMD = 17,
LOOPBACK_XPORT = 18,
LOOPBACK_XGMII_WS = 19,
LOOPBACK_XAUI_WS = 20,
LOOPBACK_XAUI_WS_FAR = 21,
LOOPBACK_XAUI_WS_NEAR = 22,
LOOPBACK_GMII_WS = 23,
LOOPBACK_XFI_WS = 24,
LOOPBACK_XFI_WS_FAR = 25,
LOOPBACK_PHYXS_WS = 26,
LOOPBACK_MAX
};
#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
/* These loopbacks occur within the controller */
#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \
(1 << LOOPBACK_GMAC) | \
(1 << LOOPBACK_XGMII)| \
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI) | \
(1 << LOOPBACK_GMII) | \
(1 << LOOPBACK_SGMII) | \
(1 << LOOPBACK_SGMII) | \
(1 << LOOPBACK_XGBR) | \
(1 << LOOPBACK_XFI) | \
(1 << LOOPBACK_XAUI_FAR) | \
(1 << LOOPBACK_GMII_FAR) | \
(1 << LOOPBACK_SGMII_FAR) | \
(1 << LOOPBACK_XFI_FAR) | \
(1 << LOOPBACK_XGMII_WS) | \
(1 << LOOPBACK_XAUI_WS) | \
(1 << LOOPBACK_XAUI_WS_FAR) | \
(1 << LOOPBACK_XAUI_WS_NEAR) | \
(1 << LOOPBACK_GMII_WS) | \
(1 << LOOPBACK_XFI_WS) | \
(1 << LOOPBACK_XFI_WS_FAR))
#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \
(1 << LOOPBACK_XAUI_WS) | \
(1 << LOOPBACK_XAUI_WS_FAR) | \
(1 << LOOPBACK_XAUI_WS_NEAR) | \
(1 << LOOPBACK_GMII_WS) | \
(1 << LOOPBACK_XFI_WS) | \
(1 << LOOPBACK_XFI_WS_FAR) | \
(1 << LOOPBACK_PHYXS_WS))
#define LOOPBACKS_EXTERNAL(_efx) \
((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \
~(1 << LOOPBACK_NONE))
#define LOOPBACK_MASK(_efx) \
(1 << (_efx)->loopback_mode)
#define LOOPBACK_INTERNAL(_efx) \
(!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
#define LOOPBACK_EXTERNAL(_efx) \
(!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
#define LOOPBACK_CHANGED(_from, _to, _mask) \
(!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
#define LOOPBACK_OUT_OF(_from, _to, _mask) \
((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
/*****************************************************************************/
/**
* enum reset_type - reset types
*
* %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
* %RESET_TYPE_DISABLE specify the method/scope of the reset. The
* other valuesspecify reasons, which ef4_schedule_reset() will choose
* a method for.
*
* Reset methods are numbered in order of increasing scope.
*
* @RESET_TYPE_INVISIBLE: Reset datapath and MAC
* @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
* if unsuccessful.
* @RESET_TYPE_ALL: Reset datapath, MAC and PHY
* @RESET_TYPE_WORLD: Reset as much as possible
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
* unsuccessful.
* @RESET_TYPE_DATAPATH: Reset datapath only.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
* @RESET_TYPE_DMA_ERROR: DMA error
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
*/
enum reset_type {
RESET_TYPE_INVISIBLE,
RESET_TYPE_RECOVER_OR_ALL,
RESET_TYPE_ALL,
RESET_TYPE_WORLD,
RESET_TYPE_RECOVER_OR_DISABLE,
RESET_TYPE_DATAPATH,
RESET_TYPE_DISABLE,
RESET_TYPE_MAX_METHOD,
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
RESET_TYPE_RX_RECOVERY,
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MAX,
};
#endif /* EF4_ENUM_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,764 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2007-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/rtnetlink.h>
#include "net_driver.h"
#include "phy.h"
#include "efx.h"
#include "nic.h"
#include "workarounds.h"
/* Macros for unpacking the board revision */
/* The revision info is in host byte order. */
#define FALCON_BOARD_TYPE(_rev) (_rev >> 8)
#define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
#define FALCON_BOARD_MINOR(_rev) (_rev & 0xf)
/* Board types */
#define FALCON_BOARD_SFE4001 0x01
#define FALCON_BOARD_SFE4002 0x02
#define FALCON_BOARD_SFE4003 0x03
#define FALCON_BOARD_SFN4112F 0x52
/* Board temperature is about 15°C above ambient when air flow is
* limited. The maximum acceptable ambient temperature varies
* depending on the PHY specifications but the critical temperature
* above which we should shut down to avoid damage is 80°C. */
#define FALCON_BOARD_TEMP_BIAS 15
#define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS)
/* SFC4000 datasheet says: 'The maximum permitted junction temperature
* is 125°C; the thermal design of the environment for the SFC4000
* should aim to keep this well below 100°C.' */
#define FALCON_JUNC_TEMP_MIN 0
#define FALCON_JUNC_TEMP_MAX 90
#define FALCON_JUNC_TEMP_CRIT 125
/*****************************************************************************
* Support for LM87 sensor chip used on several boards
*/
#define LM87_REG_TEMP_HW_INT_LOCK 0x13
#define LM87_REG_TEMP_HW_EXT_LOCK 0x14
#define LM87_REG_TEMP_HW_INT 0x17
#define LM87_REG_TEMP_HW_EXT 0x18
#define LM87_REG_TEMP_EXT1 0x26
#define LM87_REG_TEMP_INT 0x27
#define LM87_REG_ALARMS1 0x41
#define LM87_REG_ALARMS2 0x42
#define LM87_IN_LIMITS(nr, _min, _max) \
0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
#define LM87_AIN_LIMITS(nr, _min, _max) \
0x3B + (nr), _max, 0x1A + (nr), _min
#define LM87_TEMP_INT_LIMITS(_min, _max) \
0x39, _max, 0x3A, _min
#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
0x37, _max, 0x38, _min
#define LM87_ALARM_TEMP_INT 0x10
#define LM87_ALARM_TEMP_EXT1 0x20
#if IS_ENABLED(CONFIG_SENSORS_LM87)
static int ef4_poke_lm87(struct i2c_client *client, const u8 *reg_values)
{
while (*reg_values) {
u8 reg = *reg_values++;
u8 value = *reg_values++;
int rc = i2c_smbus_write_byte_data(client, reg, value);
if (rc)
return rc;
}
return 0;
}
static const u8 falcon_lm87_common_regs[] = {
LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT,
LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT,
LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX),
LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT,
LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT,
0
};
static int ef4_init_lm87(struct ef4_nic *efx, const struct i2c_board_info *info,
const u8 *reg_values)
{
struct falcon_board *board = falcon_board(efx);
struct i2c_client *client = i2c_new_device(&board->i2c_adap, info);
int rc;
if (!client)
return -EIO;
/* Read-to-clear alarm/interrupt status */
i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
rc = ef4_poke_lm87(client, reg_values);
if (rc)
goto err;
rc = ef4_poke_lm87(client, falcon_lm87_common_regs);
if (rc)
goto err;
board->hwmon_client = client;
return 0;
err:
i2c_unregister_device(client);
return rc;
}
static void ef4_fini_lm87(struct ef4_nic *efx)
{
i2c_unregister_device(falcon_board(efx)->hwmon_client);
}
static int ef4_check_lm87(struct ef4_nic *efx, unsigned mask)
{
struct i2c_client *client = falcon_board(efx)->hwmon_client;
bool temp_crit, elec_fault, is_failure;
u16 alarms;
s32 reg;
/* If link is up then do not monitor temperature */
if (EF4_WORKAROUND_7884(efx) && efx->link_state.up)
return 0;
reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
if (reg < 0)
return reg;
alarms = reg;
reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
if (reg < 0)
return reg;
alarms |= reg << 8;
alarms &= mask;
temp_crit = false;
if (alarms & LM87_ALARM_TEMP_INT) {
reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT);
if (reg < 0)
return reg;
if (reg > FALCON_BOARD_TEMP_CRIT)
temp_crit = true;
}
if (alarms & LM87_ALARM_TEMP_EXT1) {
reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1);
if (reg < 0)
return reg;
if (reg > FALCON_JUNC_TEMP_CRIT)
temp_crit = true;
}
elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1);
is_failure = temp_crit || elec_fault;
if (alarms)
netif_err(efx, hw, efx->net_dev,
"LM87 detected a hardware %s (status %02x:%02x)"
"%s%s%s%s\n",
is_failure ? "failure" : "problem",
alarms & 0xff, alarms >> 8,
(alarms & LM87_ALARM_TEMP_INT) ?
"; board is overheating" : "",
(alarms & LM87_ALARM_TEMP_EXT1) ?
"; controller is overheating" : "",
temp_crit ? "; reached critical temperature" : "",
elec_fault ? "; electrical fault" : "");
return is_failure ? -ERANGE : 0;
}
#else /* !CONFIG_SENSORS_LM87 */
static inline int
ef4_init_lm87(struct ef4_nic *efx, const struct i2c_board_info *info,
const u8 *reg_values)
{
return 0;
}
static inline void ef4_fini_lm87(struct ef4_nic *efx)
{
}
static inline int ef4_check_lm87(struct ef4_nic *efx, unsigned mask)
{
return 0;
}
#endif /* CONFIG_SENSORS_LM87 */
/*****************************************************************************
* Support for the SFE4001 NIC.
*
* The SFE4001 does not power-up fully at reset due to its high power
* consumption. We control its power via a PCA9539 I/O expander.
* It also has a MAX6647 temperature monitor which we expose to
* the lm90 driver.
*
* This also provides minimal support for reflashing the PHY, which is
* initiated by resetting it with the FLASH_CFG_1 pin pulled down.
* On SFE4001 rev A2 and later this is connected to the 3V3X output of
* the IO-expander.
* We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
* exclusive with the network device being open.
*/
/**************************************************************************
* Support for I2C IO Expander device on SFE4001
*/
#define PCA9539 0x74
#define P0_IN 0x00
#define P0_OUT 0x02
#define P0_INVERT 0x04
#define P0_CONFIG 0x06
#define P0_EN_1V0X_LBN 0
#define P0_EN_1V0X_WIDTH 1
#define P0_EN_1V2_LBN 1
#define P0_EN_1V2_WIDTH 1
#define P0_EN_2V5_LBN 2
#define P0_EN_2V5_WIDTH 1
#define P0_EN_3V3X_LBN 3
#define P0_EN_3V3X_WIDTH 1
#define P0_EN_5V_LBN 4
#define P0_EN_5V_WIDTH 1
#define P0_SHORTEN_JTAG_LBN 5
#define P0_SHORTEN_JTAG_WIDTH 1
#define P0_X_TRST_LBN 6
#define P0_X_TRST_WIDTH 1
#define P0_DSP_RESET_LBN 7
#define P0_DSP_RESET_WIDTH 1
#define P1_IN 0x01
#define P1_OUT 0x03
#define P1_INVERT 0x05
#define P1_CONFIG 0x07
#define P1_AFE_PWD_LBN 0
#define P1_AFE_PWD_WIDTH 1
#define P1_DSP_PWD25_LBN 1
#define P1_DSP_PWD25_WIDTH 1
#define P1_RESERVED_LBN 2
#define P1_RESERVED_WIDTH 2
#define P1_SPARE_LBN 4
#define P1_SPARE_WIDTH 4
/* Temperature Sensor */
#define MAX664X_REG_RSL 0x02
#define MAX664X_REG_WLHO 0x0B
static void sfe4001_poweroff(struct ef4_nic *efx)
{
struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
/* Turn off all power rails and disable outputs */
i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
/* Clear any over-temperature alert */
i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
}
static int sfe4001_poweron(struct ef4_nic *efx)
{
struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
unsigned int i, j;
int rc;
u8 out;
/* Clear any previous over-temperature alert */
rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
if (rc < 0)
return rc;
/* Enable port 0 and port 1 outputs on IO expander */
rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
if (rc)
return rc;
rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
0xff & ~(1 << P1_SPARE_LBN));
if (rc)
goto fail_on;
/* If PHY power is on, turn it all off and wait 1 second to
* ensure a full reset.
*/
rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
if (rc < 0)
goto fail_on;
out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
(0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
(0 << P0_EN_1V0X_LBN));
if (rc != out) {
netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n");
rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
if (rc)
goto fail_on;
schedule_timeout_uninterruptible(HZ);
}
for (i = 0; i < 20; ++i) {
/* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
(1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
(1 << P0_X_TRST_LBN));
if (efx->phy_mode & PHY_MODE_SPECIAL)
out |= 1 << P0_EN_3V3X_LBN;
rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
if (rc)
goto fail_on;
msleep(10);
/* Turn on 1V power rail */
out &= ~(1 << P0_EN_1V0X_LBN);
rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
if (rc)
goto fail_on;
netif_info(efx, hw, efx->net_dev,
"waiting for DSP boot (attempt %d)...\n", i);
/* In flash config mode, DSP does not turn on AFE, so
* just wait 1 second.
*/
if (efx->phy_mode & PHY_MODE_SPECIAL) {
schedule_timeout_uninterruptible(HZ);
return 0;
}
for (j = 0; j < 10; ++j) {
msleep(100);
/* Check DSP has asserted AFE power line */
rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
if (rc < 0)
goto fail_on;
if (rc & (1 << P1_AFE_PWD_LBN))
return 0;
}
}
netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n");
rc = -ETIMEDOUT;
fail_on:
sfe4001_poweroff(efx);
return rc;
}
static ssize_t show_phy_flash_cfg(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
}
static ssize_t set_phy_flash_cfg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
enum ef4_phy_mode old_mode, new_mode;
int err;
rtnl_lock();
old_mode = efx->phy_mode;
if (count == 0 || *buf == '0')
new_mode = old_mode & ~PHY_MODE_SPECIAL;
else
new_mode = PHY_MODE_SPECIAL;
if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
err = 0;
} else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
err = -EBUSY;
} else {
/* Reset the PHY, reconfigure the MAC and enable/disable
* MAC stats accordingly. */
efx->phy_mode = new_mode;
if (new_mode & PHY_MODE_SPECIAL)
falcon_stop_nic_stats(efx);
err = sfe4001_poweron(efx);
if (!err)
err = ef4_reconfigure_port(efx);
if (!(new_mode & PHY_MODE_SPECIAL))
falcon_start_nic_stats(efx);
}
rtnl_unlock();
return err ? err : count;
}
static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
static void sfe4001_fini(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
sfe4001_poweroff(efx);
i2c_unregister_device(board->ioexp_client);
i2c_unregister_device(board->hwmon_client);
}
static int sfe4001_check_hw(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
s32 status;
/* If XAUI link is up then do not monitor */
if (EF4_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
return 0;
/* Check the powered status of the PHY. Lack of power implies that
* the MAX6647 has shut down power to it, probably due to a temp.
* alarm. Reading the power status rather than the MAX6647 status
* directly because the later is read-to-clear and would thus
* start to power up the PHY again when polled, causing us to blip
* the power undesirably.
* We know we can read from the IO expander because we did
* it during power-on. Assume failure now is bad news. */
status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN);
if (status >= 0 &&
(status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
return 0;
/* Use board power control, not PHY power control */
sfe4001_poweroff(efx);
efx->phy_mode = PHY_MODE_OFF;
return (status < 0) ? -EIO : -ERANGE;
}
static const struct i2c_board_info sfe4001_hwmon_info = {
I2C_BOARD_INFO("max6647", 0x4e),
};
/* This board uses an I2C expander to provider power to the PHY, which needs to
* be turned on before the PHY can be used.
* Context: Process context, rtnl lock held
*/
static int sfe4001_init(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
int rc;
#if IS_ENABLED(CONFIG_SENSORS_LM90)
board->hwmon_client =
i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
#else
board->hwmon_client =
i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr);
#endif
if (!board->hwmon_client)
return -EIO;
/* Raise board/PHY high limit from 85 to 90 degrees Celsius */
rc = i2c_smbus_write_byte_data(board->hwmon_client,
MAX664X_REG_WLHO, 90);
if (rc)
goto fail_hwmon;
board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539);
if (!board->ioexp_client) {
rc = -EIO;
goto fail_hwmon;
}
if (efx->phy_mode & PHY_MODE_SPECIAL) {
/* PHY won't generate a 156.25 MHz clock and MAC stats fetch
* will fail. */
falcon_stop_nic_stats(efx);
}
rc = sfe4001_poweron(efx);
if (rc)
goto fail_ioexp;
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
if (rc)
goto fail_on;
netif_info(efx, hw, efx->net_dev, "PHY is powered on\n");
return 0;
fail_on:
sfe4001_poweroff(efx);
fail_ioexp:
i2c_unregister_device(board->ioexp_client);
fail_hwmon:
i2c_unregister_device(board->hwmon_client);
return rc;
}
/*****************************************************************************
* Support for the SFE4002
*
*/
static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
static const u8 sfe4002_lm87_regs[] = {
LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */
LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */
LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS),
LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
0
};
static const struct i2c_board_info sfe4002_hwmon_info = {
I2C_BOARD_INFO("lm87", 0x2e),
.platform_data = &sfe4002_lm87_channel,
};
/****************************************************************************/
/* LED allocations. Note that on rev A0 boards the schematic and the reality
* differ: red and green are swapped. Below is the fixed (A1) layout (there
* are only 3 A0 boards in existence, so no real reason to make this
* conditional).
*/
#define SFE4002_FAULT_LED (2) /* Red */
#define SFE4002_RX_LED (0) /* Green */
#define SFE4002_TX_LED (1) /* Amber */
static void sfe4002_init_phy(struct ef4_nic *efx)
{
/* Set the TX and RX LEDs to reflect status and activity, and the
* fault LED off */
falcon_qt202x_set_led(efx, SFE4002_TX_LED,
QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
falcon_qt202x_set_led(efx, SFE4002_RX_LED,
QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
}
static void sfe4002_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
falcon_qt202x_set_led(
efx, SFE4002_FAULT_LED,
(mode == EF4_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF);
}
static int sfe4002_check_hw(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
/* A0 board rev. 4002s report a temperature fault the whole time
* (bad sensor) so we mask it out. */
unsigned alarm_mask =
(board->major == 0 && board->minor == 0) ?
~LM87_ALARM_TEMP_EXT1 : ~0;
return ef4_check_lm87(efx, alarm_mask);
}
static int sfe4002_init(struct ef4_nic *efx)
{
return ef4_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
}
/*****************************************************************************
* Support for the SFN4112F
*
*/
static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
static const u8 sfn4112f_lm87_regs[] = {
LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS),
LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
0
};
static const struct i2c_board_info sfn4112f_hwmon_info = {
I2C_BOARD_INFO("lm87", 0x2e),
.platform_data = &sfn4112f_lm87_channel,
};
#define SFN4112F_ACT_LED 0
#define SFN4112F_LINK_LED 1
static void sfn4112f_init_phy(struct ef4_nic *efx)
{
falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
}
static void sfn4112f_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
int reg;
switch (mode) {
case EF4_LED_OFF:
reg = QUAKE_LED_OFF;
break;
case EF4_LED_ON:
reg = QUAKE_LED_ON;
break;
default:
reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT;
break;
}
falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg);
}
static int sfn4112f_check_hw(struct ef4_nic *efx)
{
/* Mask out unused sensors */
return ef4_check_lm87(efx, ~0x48);
}
static int sfn4112f_init(struct ef4_nic *efx)
{
return ef4_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
}
/*****************************************************************************
* Support for the SFE4003
*
*/
static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */
static const u8 sfe4003_lm87_regs[] = {
LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */
LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS),
0
};
static const struct i2c_board_info sfe4003_hwmon_info = {
I2C_BOARD_INFO("lm87", 0x2e),
.platform_data = &sfe4003_lm87_channel,
};
/* Board-specific LED info. */
#define SFE4003_RED_LED_GPIO 11
#define SFE4003_LED_ON 1
#define SFE4003_LED_OFF 0
static void sfe4003_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
struct falcon_board *board = falcon_board(efx);
/* The LEDs were not wired to GPIOs before A3 */
if (board->minor < 3 && board->major == 0)
return;
falcon_txc_set_gpio_val(
efx, SFE4003_RED_LED_GPIO,
(mode == EF4_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
}
static void sfe4003_init_phy(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
/* The LEDs were not wired to GPIOs before A3 */
if (board->minor < 3 && board->major == 0)
return;
falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT);
falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
}
static int sfe4003_check_hw(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
/* A0/A1/A2 board rev. 4003s report a temperature fault the whole time
* (bad sensor) so we mask it out. */
unsigned alarm_mask =
(board->major == 0 && board->minor <= 2) ?
~LM87_ALARM_TEMP_EXT1 : ~0;
return ef4_check_lm87(efx, alarm_mask);
}
static int sfe4003_init(struct ef4_nic *efx)
{
return ef4_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
}
static const struct falcon_board_type board_types[] = {
{
.id = FALCON_BOARD_SFE4001,
.init = sfe4001_init,
.init_phy = ef4_port_dummy_op_void,
.fini = sfe4001_fini,
.set_id_led = tenxpress_set_id_led,
.monitor = sfe4001_check_hw,
},
{
.id = FALCON_BOARD_SFE4002,
.init = sfe4002_init,
.init_phy = sfe4002_init_phy,
.fini = ef4_fini_lm87,
.set_id_led = sfe4002_set_id_led,
.monitor = sfe4002_check_hw,
},
{
.id = FALCON_BOARD_SFE4003,
.init = sfe4003_init,
.init_phy = sfe4003_init_phy,
.fini = ef4_fini_lm87,
.set_id_led = sfe4003_set_id_led,
.monitor = sfe4003_check_hw,
},
{
.id = FALCON_BOARD_SFN4112F,
.init = sfn4112f_init,
.init_phy = sfn4112f_init_phy,
.fini = ef4_fini_lm87,
.set_id_led = sfn4112f_set_id_led,
.monitor = sfn4112f_check_hw,
},
};
int falcon_probe_board(struct ef4_nic *efx, u16 revision_info)
{
struct falcon_board *board = falcon_board(efx);
u8 type_id = FALCON_BOARD_TYPE(revision_info);
int i;
board->major = FALCON_BOARD_MAJOR(revision_info);
board->minor = FALCON_BOARD_MINOR(revision_info);
for (i = 0; i < ARRAY_SIZE(board_types); i++)
if (board_types[i].id == type_id)
board->type = &board_types[i];
if (board->type) {
return 0;
} else {
netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
type_id);
return -ENODEV;
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,272 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_FILTER_H
#define EF4_FILTER_H
#include <linux/types.h>
#include <linux/if_ether.h>
#include <asm/byteorder.h>
/**
* enum ef4_filter_match_flags - Flags for hardware filter match type
* @EF4_FILTER_MATCH_REM_HOST: Match by remote IP host address
* @EF4_FILTER_MATCH_LOC_HOST: Match by local IP host address
* @EF4_FILTER_MATCH_REM_MAC: Match by remote MAC address
* @EF4_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
* @EF4_FILTER_MATCH_LOC_MAC: Match by local MAC address
* @EF4_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
* @EF4_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
* @EF4_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
* @EF4_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
* @EF4_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
* @EF4_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
* Used for RX default unicast and multicast/broadcast filters.
*
* Only some combinations are supported, depending on NIC type:
*
* - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
* local 2-tuple (only implemented for Falcon B0)
*
* - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
* or local 2-tuple, or local MAC with or without outer VID, and RX
* default filters
*
* - Huntington supports filter matching controlled by firmware, potentially
* using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
* with or without outer and inner VID
*/
enum ef4_filter_match_flags {
EF4_FILTER_MATCH_REM_HOST = 0x0001,
EF4_FILTER_MATCH_LOC_HOST = 0x0002,
EF4_FILTER_MATCH_REM_MAC = 0x0004,
EF4_FILTER_MATCH_REM_PORT = 0x0008,
EF4_FILTER_MATCH_LOC_MAC = 0x0010,
EF4_FILTER_MATCH_LOC_PORT = 0x0020,
EF4_FILTER_MATCH_ETHER_TYPE = 0x0040,
EF4_FILTER_MATCH_INNER_VID = 0x0080,
EF4_FILTER_MATCH_OUTER_VID = 0x0100,
EF4_FILTER_MATCH_IP_PROTO = 0x0200,
EF4_FILTER_MATCH_LOC_MAC_IG = 0x0400,
};
/**
* enum ef4_filter_priority - priority of a hardware filter specification
* @EF4_FILTER_PRI_HINT: Performance hint
* @EF4_FILTER_PRI_AUTO: Automatic filter based on device address list
* or hardware requirements. This may only be used by the filter
* implementation for each NIC type.
* @EF4_FILTER_PRI_MANUAL: Manually configured filter
* @EF4_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
* networking and SR-IOV)
*/
enum ef4_filter_priority {
EF4_FILTER_PRI_HINT = 0,
EF4_FILTER_PRI_AUTO,
EF4_FILTER_PRI_MANUAL,
EF4_FILTER_PRI_REQUIRED,
};
/**
* enum ef4_filter_flags - flags for hardware filter specifications
* @EF4_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
* By default, matching packets will be delivered only to the
* specified queue. If this flag is set, they will be delivered
* to a range of queues offset from the specified queue number
* according to the indirection table.
* @EF4_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
* @EF4_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
* overriding an automatic filter (priority
* %EF4_FILTER_PRI_AUTO). This may only be set by the filter
* implementation for each type. A removal request will restore
* the automatic filter in its place.
* @EF4_FILTER_FLAG_RX: Filter is for RX
* @EF4_FILTER_FLAG_TX: Filter is for TX
*/
enum ef4_filter_flags {
EF4_FILTER_FLAG_RX_RSS = 0x01,
EF4_FILTER_FLAG_RX_SCATTER = 0x02,
EF4_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EF4_FILTER_FLAG_RX = 0x08,
EF4_FILTER_FLAG_TX = 0x10,
};
/**
* struct ef4_filter_spec - specification for a hardware filter
* @match_flags: Match type flags, from &enum ef4_filter_match_flags
* @priority: Priority of the filter, from &enum ef4_filter_priority
* @flags: Miscellaneous flags, from &enum ef4_filter_flags
* @rss_context: RSS context to use, if %EF4_FILTER_FLAG_RX_RSS is set
* @dmaq_id: Source/target queue index, or %EF4_FILTER_RX_DMAQ_ID_DROP for
* an RX drop filter
* @outer_vid: Outer VLAN ID to match, if %EF4_FILTER_MATCH_OUTER_VID is set
* @inner_vid: Inner VLAN ID to match, if %EF4_FILTER_MATCH_INNER_VID is set
* @loc_mac: Local MAC address to match, if %EF4_FILTER_MATCH_LOC_MAC or
* %EF4_FILTER_MATCH_LOC_MAC_IG is set
* @rem_mac: Remote MAC address to match, if %EF4_FILTER_MATCH_REM_MAC is set
* @ether_type: Ether-type to match, if %EF4_FILTER_MATCH_ETHER_TYPE is set
* @ip_proto: IP transport protocol to match, if %EF4_FILTER_MATCH_IP_PROTO
* is set
* @loc_host: Local IP host to match, if %EF4_FILTER_MATCH_LOC_HOST is set
* @rem_host: Remote IP host to match, if %EF4_FILTER_MATCH_REM_HOST is set
* @loc_port: Local TCP/UDP port to match, if %EF4_FILTER_MATCH_LOC_PORT is set
* @rem_port: Remote TCP/UDP port to match, if %EF4_FILTER_MATCH_REM_PORT is set
*
* The ef4_filter_init_rx() or ef4_filter_init_tx() function *must* be
* used to initialise the structure. The ef4_filter_set_*() functions
* may then be used to set @rss_context, @match_flags and related
* fields.
*
* The @priority field is used by software to determine whether a new
* filter may replace an old one. The hardware priority of a filter
* depends on which fields are matched.
*/
struct ef4_filter_spec {
u32 match_flags:12;
u32 priority:2;
u32 flags:6;
u32 dmaq_id:12;
u32 rss_context;
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
__be16 inner_vid;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
__be16 ether_type;
u8 ip_proto;
__be32 loc_host[4];
__be32 rem_host[4];
__be16 loc_port;
__be16 rem_port;
/* total 64 bytes */
};
enum {
EF4_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
EF4_FILTER_RX_DMAQ_ID_DROP = 0xfff
};
static inline void ef4_filter_init_rx(struct ef4_filter_spec *spec,
enum ef4_filter_priority priority,
enum ef4_filter_flags flags,
unsigned rxq_id)
{
memset(spec, 0, sizeof(*spec));
spec->priority = priority;
spec->flags = EF4_FILTER_FLAG_RX | flags;
spec->rss_context = EF4_FILTER_RSS_CONTEXT_DEFAULT;
spec->dmaq_id = rxq_id;
}
static inline void ef4_filter_init_tx(struct ef4_filter_spec *spec,
unsigned txq_id)
{
memset(spec, 0, sizeof(*spec));
spec->priority = EF4_FILTER_PRI_REQUIRED;
spec->flags = EF4_FILTER_FLAG_TX;
spec->dmaq_id = txq_id;
}
/**
* ef4_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
* @spec: Specification to initialise
* @proto: Transport layer protocol number
* @host: Local host address (network byte order)
* @port: Local port (network byte order)
*/
static inline int
ef4_filter_set_ipv4_local(struct ef4_filter_spec *spec, u8 proto,
__be32 host, __be16 port)
{
spec->match_flags |=
EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT;
spec->ether_type = htons(ETH_P_IP);
spec->ip_proto = proto;
spec->loc_host[0] = host;
spec->loc_port = port;
return 0;
}
/**
* ef4_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
* @spec: Specification to initialise
* @proto: Transport layer protocol number
* @lhost: Local host address (network byte order)
* @lport: Local port (network byte order)
* @rhost: Remote host address (network byte order)
* @rport: Remote port (network byte order)
*/
static inline int
ef4_filter_set_ipv4_full(struct ef4_filter_spec *spec, u8 proto,
__be32 lhost, __be16 lport,
__be32 rhost, __be16 rport)
{
spec->match_flags |=
EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT;
spec->ether_type = htons(ETH_P_IP);
spec->ip_proto = proto;
spec->loc_host[0] = lhost;
spec->loc_port = lport;
spec->rem_host[0] = rhost;
spec->rem_port = rport;
return 0;
}
enum {
EF4_FILTER_VID_UNSPEC = 0xffff,
};
/**
* ef4_filter_set_eth_local - specify local Ethernet address and/or VID
* @spec: Specification to initialise
* @vid: Outer VLAN ID to match, or %EF4_FILTER_VID_UNSPEC
* @addr: Local Ethernet MAC address, or %NULL
*/
static inline int ef4_filter_set_eth_local(struct ef4_filter_spec *spec,
u16 vid, const u8 *addr)
{
if (vid == EF4_FILTER_VID_UNSPEC && addr == NULL)
return -EINVAL;
if (vid != EF4_FILTER_VID_UNSPEC) {
spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID;
spec->outer_vid = htons(vid);
}
if (addr != NULL) {
spec->match_flags |= EF4_FILTER_MATCH_LOC_MAC;
ether_addr_copy(spec->loc_mac, addr);
}
return 0;
}
/**
* ef4_filter_set_uc_def - specify matching otherwise-unmatched unicast
* @spec: Specification to initialise
*/
static inline int ef4_filter_set_uc_def(struct ef4_filter_spec *spec)
{
spec->match_flags |= EF4_FILTER_MATCH_LOC_MAC_IG;
return 0;
}
/**
* ef4_filter_set_mc_def - specify matching otherwise-unmatched multicast
* @spec: Specification to initialise
*/
static inline int ef4_filter_set_mc_def(struct ef4_filter_spec *spec)
{
spec->match_flags |= EF4_FILTER_MATCH_LOC_MAC_IG;
spec->loc_mac[0] = 1;
return 0;
}
#endif /* EF4_FILTER_H */

View File

@@ -0,0 +1,290 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_IO_H
#define EF4_IO_H
#include <linux/io.h>
#include <linux/spinlock.h>
/**************************************************************************
*
* NIC register I/O
*
**************************************************************************
*
* Notes on locking strategy for the Falcon architecture:
*
* Many CSRs are very wide and cannot be read or written atomically.
* Writes from the host are buffered by the Bus Interface Unit (BIU)
* up to 128 bits. Whenever the host writes part of such a register,
* the BIU collects the written value and does not write to the
* underlying register until all 4 dwords have been written. A
* similar buffering scheme applies to host access to the NIC's 64-bit
* SRAM.
*
* Writes to different CSRs and 64-bit SRAM words must be serialised,
* since interleaved access can result in lost writes. We use
* ef4_nic::biu_lock for this.
*
* We also serialise reads from 128-bit CSRs and SRAM with the same
* spinlock. This may not be necessary, but it doesn't really matter
* as there are no such reads on the fast path.
*
* The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
* 128-bit but are special-cased in the BIU to avoid the need for
* locking in the host:
*
* - They are write-only.
* - The semantics of writing to these registers are such that
* replacing the low 96 bits with zero does not affect functionality.
* - If the host writes to the last dword address of such a register
* (i.e. the high 32 bits) the underlying register will always be
* written. If the collector and the current write together do not
* provide values for all 128 bits of the register, the low 96 bits
* will be written as zero.
* - If the host writes to the address of any other part of such a
* register while the collector already holds values for some other
* register, the write is discarded and the collector maintains its
* current state.
*
* The EF10 architecture exposes very few registers to the host and
* most of them are only 32 bits wide. The only exceptions are the MC
* doorbell register pair, which has its own latching, and
* TX_DESC_UPD, which works in a similar way to the Falcon
* architecture.
*/
#if BITS_PER_LONG == 64
#define EF4_USE_QWORD_IO 1
#endif
#ifdef EF4_USE_QWORD_IO
static inline void _ef4_writeq(struct ef4_nic *efx, __le64 value,
unsigned int reg)
{
__raw_writeq((__force u64)value, efx->membase + reg);
}
static inline __le64 _ef4_readq(struct ef4_nic *efx, unsigned int reg)
{
return (__force __le64)__raw_readq(efx->membase + reg);
}
#endif
static inline void _ef4_writed(struct ef4_nic *efx, __le32 value,
unsigned int reg)
{
__raw_writel((__force u32)value, efx->membase + reg);
}
static inline __le32 _ef4_readd(struct ef4_nic *efx, unsigned int reg)
{
return (__force __le32)__raw_readl(efx->membase + reg);
}
/* Write a normal 128-bit CSR, locking as appropriate. */
static inline void ef4_writeo(struct ef4_nic *efx, const ef4_oword_t *value,
unsigned int reg)
{
unsigned long flags __attribute__ ((unused));
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with " EF4_OWORD_FMT "\n", reg,
EF4_OWORD_VAL(*value));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EF4_USE_QWORD_IO
_ef4_writeq(efx, value->u64[0], reg + 0);
_ef4_writeq(efx, value->u64[1], reg + 8);
#else
_ef4_writed(efx, value->u32[0], reg + 0);
_ef4_writed(efx, value->u32[1], reg + 4);
_ef4_writed(efx, value->u32[2], reg + 8);
_ef4_writed(efx, value->u32[3], reg + 12);
#endif
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void ef4_sram_writeq(struct ef4_nic *efx, void __iomem *membase,
const ef4_qword_t *value, unsigned int index)
{
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
netif_vdbg(efx, hw, efx->net_dev,
"writing SRAM address %x with " EF4_QWORD_FMT "\n",
addr, EF4_QWORD_VAL(*value));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EF4_USE_QWORD_IO
__raw_writeq((__force u64)value->u64[0], membase + addr);
#else
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
static inline void ef4_writed(struct ef4_nic *efx, const ef4_dword_t *value,
unsigned int reg)
{
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with "EF4_DWORD_FMT"\n",
reg, EF4_DWORD_VAL(*value));
/* No lock required */
_ef4_writed(efx, value->u32[0], reg);
}
/* Read a 128-bit CSR, locking as appropriate. */
static inline void ef4_reado(struct ef4_nic *efx, ef4_oword_t *value,
unsigned int reg)
{
unsigned long flags __attribute__ ((unused));
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _ef4_readd(efx, reg + 0);
value->u32[1] = _ef4_readd(efx, reg + 4);
value->u32[2] = _ef4_readd(efx, reg + 8);
value->u32[3] = _ef4_readd(efx, reg + 12);
spin_unlock_irqrestore(&efx->biu_lock, flags);
netif_vdbg(efx, hw, efx->net_dev,
"read from register %x, got " EF4_OWORD_FMT "\n", reg,
EF4_OWORD_VAL(*value));
}
/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void ef4_sram_readq(struct ef4_nic *efx, void __iomem *membase,
ef4_qword_t *value, unsigned int index)
{
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EF4_USE_QWORD_IO
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
netif_vdbg(efx, hw, efx->net_dev,
"read from SRAM address %x, got "EF4_QWORD_FMT"\n",
addr, EF4_QWORD_VAL(*value));
}
/* Read a 32-bit CSR or SRAM */
static inline void ef4_readd(struct ef4_nic *efx, ef4_dword_t *value,
unsigned int reg)
{
value->u32[0] = _ef4_readd(efx, reg);
netif_vdbg(efx, hw, efx->net_dev,
"read from register %x, got "EF4_DWORD_FMT"\n",
reg, EF4_DWORD_VAL(*value));
}
/* Write a 128-bit CSR forming part of a table */
static inline void
ef4_writeo_table(struct ef4_nic *efx, const ef4_oword_t *value,
unsigned int reg, unsigned int index)
{
ef4_writeo(efx, value, reg + index * sizeof(ef4_oword_t));
}
/* Read a 128-bit CSR forming part of a table */
static inline void ef4_reado_table(struct ef4_nic *efx, ef4_oword_t *value,
unsigned int reg, unsigned int index)
{
ef4_reado(efx, value, reg + index * sizeof(ef4_oword_t));
}
/* Page size used as step between per-VI registers */
#define EF4_VI_PAGE_SIZE 0x2000
/* Calculate offset to page-mapped register */
#define EF4_PAGED_REG(page, reg) \
((page) * EF4_VI_PAGE_SIZE + (reg))
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
static inline void _ef4_writeo_page(struct ef4_nic *efx, ef4_oword_t *value,
unsigned int reg, unsigned int page)
{
reg = EF4_PAGED_REG(page, reg);
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with " EF4_OWORD_FMT "\n", reg,
EF4_OWORD_VAL(*value));
#ifdef EF4_USE_QWORD_IO
_ef4_writeq(efx, value->u64[0], reg + 0);
_ef4_writeq(efx, value->u64[1], reg + 8);
#else
_ef4_writed(efx, value->u32[0], reg + 0);
_ef4_writed(efx, value->u32[1], reg + 4);
_ef4_writed(efx, value->u32[2], reg + 8);
_ef4_writed(efx, value->u32[3], reg + 12);
#endif
}
#define ef4_writeo_page(efx, value, reg, page) \
_ef4_writeo_page(efx, value, \
reg + \
BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
page)
/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
* high bits of RX_DESC_UPD or TX_DESC_UPD)
*/
static inline void
_ef4_writed_page(struct ef4_nic *efx, const ef4_dword_t *value,
unsigned int reg, unsigned int page)
{
ef4_writed(efx, value, EF4_PAGED_REG(page, reg));
}
#define ef4_writed_page(efx, value, reg, page) \
_ef4_writed_page(efx, value, \
reg + \
BUILD_BUG_ON_ZERO((reg) != 0x400 && \
(reg) != 0x420 && \
(reg) != 0x830 && \
(reg) != 0x83c && \
(reg) != 0xa18 && \
(reg) != 0xa1c), \
page)
/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
* in the BIU means that writes to TIMER_COMMAND[0] invalidate the
* collector register.
*/
static inline void _ef4_writed_page_locked(struct ef4_nic *efx,
const ef4_dword_t *value,
unsigned int reg,
unsigned int page)
{
unsigned long flags __attribute__ ((unused));
if (page == 0) {
spin_lock_irqsave(&efx->biu_lock, flags);
ef4_writed(efx, value, EF4_PAGED_REG(page, reg));
spin_unlock_irqrestore(&efx->biu_lock, flags);
} else {
ef4_writed(efx, value, EF4_PAGED_REG(page, reg));
}
}
#define ef4_writed_page_locked(efx, value, reg, page) \
_ef4_writed_page_locked(efx, value, \
reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
page)
#endif /* EF4_IO_H */

View File

@@ -0,0 +1,323 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
/*
* Useful functions for working with MDIO clause 45 PHYs
*/
#include <linux/types.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include "net_driver.h"
#include "mdio_10g.h"
#include "workarounds.h"
unsigned ef4_mdio_id_oui(u32 id)
{
unsigned oui = 0;
int i;
/* The bits of the OUI are designated a..x, with a=0 and b variable.
* In the id register c is the MSB but the OUI is conventionally
* written as bytes h..a, p..i, x..q. Reorder the bits accordingly. */
for (i = 0; i < 22; ++i)
if (id & (1 << (i + 10)))
oui |= 1 << (i ^ 7);
return oui;
}
int ef4_mdio_reset_mmd(struct ef4_nic *port, int mmd,
int spins, int spintime)
{
u32 ctrl;
/* Catch callers passing values in the wrong units (or just silly) */
EF4_BUG_ON_PARANOID(spins * spintime >= 5000);
ef4_mdio_write(port, mmd, MDIO_CTRL1, MDIO_CTRL1_RESET);
/* Wait for the reset bit to clear. */
do {
msleep(spintime);
ctrl = ef4_mdio_read(port, mmd, MDIO_CTRL1);
spins--;
} while (spins && (ctrl & MDIO_CTRL1_RESET));
return spins ? spins : -ETIMEDOUT;
}
static int ef4_mdio_check_mmd(struct ef4_nic *efx, int mmd)
{
int status;
if (mmd != MDIO_MMD_AN) {
/* Read MMD STATUS2 to check it is responding. */
status = ef4_mdio_read(efx, mmd, MDIO_STAT2);
if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) {
netif_err(efx, hw, efx->net_dev,
"PHY MMD %d not responding.\n", mmd);
return -EIO;
}
}
return 0;
}
/* This ought to be ridiculous overkill. We expect it to fail rarely */
#define MDIO45_RESET_TIME 1000 /* ms */
#define MDIO45_RESET_ITERS 100
int ef4_mdio_wait_reset_mmds(struct ef4_nic *efx, unsigned int mmd_mask)
{
const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
int tries = MDIO45_RESET_ITERS;
int rc = 0;
int in_reset;
while (tries) {
int mask = mmd_mask;
int mmd = 0;
int stat;
in_reset = 0;
while (mask) {
if (mask & 1) {
stat = ef4_mdio_read(efx, mmd, MDIO_CTRL1);
if (stat < 0) {
netif_err(efx, hw, efx->net_dev,
"failed to read status of"
" MMD %d\n", mmd);
return -EIO;
}
if (stat & MDIO_CTRL1_RESET)
in_reset |= (1 << mmd);
}
mask = mask >> 1;
mmd++;
}
if (!in_reset)
break;
tries--;
msleep(spintime);
}
if (in_reset != 0) {
netif_err(efx, hw, efx->net_dev,
"not all MMDs came out of reset in time."
" MMDs still in reset: %x\n", in_reset);
rc = -ETIMEDOUT;
}
return rc;
}
int ef4_mdio_check_mmds(struct ef4_nic *efx, unsigned int mmd_mask)
{
int mmd = 0, probe_mmd, devs1, devs2;
u32 devices;
/* Historically we have probed the PHYXS to find out what devices are
* present,but that doesn't work so well if the PHYXS isn't expected
* to exist, if so just find the first item in the list supplied. */
probe_mmd = (mmd_mask & MDIO_DEVS_PHYXS) ? MDIO_MMD_PHYXS :
__ffs(mmd_mask);
/* Check all the expected MMDs are present */
devs1 = ef4_mdio_read(efx, probe_mmd, MDIO_DEVS1);
devs2 = ef4_mdio_read(efx, probe_mmd, MDIO_DEVS2);
if (devs1 < 0 || devs2 < 0) {
netif_err(efx, hw, efx->net_dev,
"failed to read devices present\n");
return -EIO;
}
devices = devs1 | (devs2 << 16);
if ((devices & mmd_mask) != mmd_mask) {
netif_err(efx, hw, efx->net_dev,
"required MMDs not present: got %x, wanted %x\n",
devices, mmd_mask);
return -ENODEV;
}
netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices);
/* Check all required MMDs are responding and happy. */
while (mmd_mask) {
if ((mmd_mask & 1) && ef4_mdio_check_mmd(efx, mmd))
return -EIO;
mmd_mask = mmd_mask >> 1;
mmd++;
}
return 0;
}
bool ef4_mdio_links_ok(struct ef4_nic *efx, unsigned int mmd_mask)
{
/* If the port is in loopback, then we should only consider a subset
* of mmd's */
if (LOOPBACK_INTERNAL(efx))
return true;
else if (LOOPBACK_MASK(efx) & LOOPBACKS_WS)
return false;
else if (ef4_phy_mode_disabled(efx->phy_mode))
return false;
else if (efx->loopback_mode == LOOPBACK_PHYXS)
mmd_mask &= ~(MDIO_DEVS_PHYXS |
MDIO_DEVS_PCS |
MDIO_DEVS_PMAPMD |
MDIO_DEVS_AN);
else if (efx->loopback_mode == LOOPBACK_PCS)
mmd_mask &= ~(MDIO_DEVS_PCS |
MDIO_DEVS_PMAPMD |
MDIO_DEVS_AN);
else if (efx->loopback_mode == LOOPBACK_PMAPMD)
mmd_mask &= ~(MDIO_DEVS_PMAPMD |
MDIO_DEVS_AN);
return mdio45_links_ok(&efx->mdio, mmd_mask);
}
void ef4_mdio_transmit_disable(struct ef4_nic *efx)
{
ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
MDIO_PMA_TXDIS, MDIO_PMD_TXDIS_GLOBAL,
efx->phy_mode & PHY_MODE_TX_DISABLED);
}
void ef4_mdio_phy_reconfigure(struct ef4_nic *efx)
{
ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
MDIO_CTRL1, MDIO_PMA_CTRL1_LOOPBACK,
efx->loopback_mode == LOOPBACK_PMAPMD);
ef4_mdio_set_flag(efx, MDIO_MMD_PCS,
MDIO_CTRL1, MDIO_PCS_CTRL1_LOOPBACK,
efx->loopback_mode == LOOPBACK_PCS);
ef4_mdio_set_flag(efx, MDIO_MMD_PHYXS,
MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK,
efx->loopback_mode == LOOPBACK_PHYXS_WS);
}
static void ef4_mdio_set_mmd_lpower(struct ef4_nic *efx,
int lpower, int mmd)
{
int stat = ef4_mdio_read(efx, mmd, MDIO_STAT1);
netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n",
mmd, lpower);
if (stat & MDIO_STAT1_LPOWERABLE) {
ef4_mdio_set_flag(efx, mmd, MDIO_CTRL1,
MDIO_CTRL1_LPOWER, lpower);
}
}
void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx,
int low_power, unsigned int mmd_mask)
{
int mmd = 0;
mmd_mask &= ~MDIO_DEVS_AN;
while (mmd_mask) {
if (mmd_mask & 1)
ef4_mdio_set_mmd_lpower(efx, low_power, mmd);
mmd_mask = (mmd_mask >> 1);
mmd++;
}
}
/**
* ef4_mdio_set_settings - Set (some of) the PHY settings over MDIO.
* @efx: Efx NIC
* @ecmd: New settings
*/
int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET };
efx->phy_op->get_settings(efx, &prev);
if (ecmd->advertising == prev.advertising &&
ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) &&
ecmd->duplex == prev.duplex &&
ecmd->port == prev.port &&
ecmd->autoneg == prev.autoneg)
return 0;
/* We can only change these settings for -T PHYs */
if (prev.port != PORT_TP || ecmd->port != PORT_TP)
return -EINVAL;
/* Check that PHY supports these settings */
if (!ecmd->autoneg ||
(ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
return -EINVAL;
ef4_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
ef4_mdio_an_reconfigure(efx);
return 0;
}
/**
* ef4_mdio_an_reconfigure - Push advertising flags and restart autonegotiation
* @efx: Efx NIC
*/
void ef4_mdio_an_reconfigure(struct ef4_nic *efx)
{
int reg;
WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
/* Set up the base page */
reg = ADVERTISE_CSMA | ADVERTISE_RESV;
if (efx->link_advertising & ADVERTISED_Pause)
reg |= ADVERTISE_PAUSE_CAP;
if (efx->link_advertising & ADVERTISED_Asym_Pause)
reg |= ADVERTISE_PAUSE_ASYM;
ef4_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
/* Set up the (extended) next page */
efx->phy_op->set_npage_adv(efx, efx->link_advertising);
/* Enable and restart AN */
reg = ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP;
ef4_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
}
u8 ef4_mdio_get_pause(struct ef4_nic *efx)
{
BUILD_BUG_ON(EF4_FC_AUTO & (EF4_FC_RX | EF4_FC_TX));
if (!(efx->wanted_fc & EF4_FC_AUTO))
return efx->wanted_fc;
WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
return mii_resolve_flowctrl_fdx(
mii_advertise_flowctrl(efx->wanted_fc),
ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
}
int ef4_mdio_test_alive(struct ef4_nic *efx)
{
int rc;
int devad = __ffs(efx->mdio.mmds);
u16 physid1, physid2;
mutex_lock(&efx->mac_lock);
physid1 = ef4_mdio_read(efx, devad, MDIO_DEVID1);
physid2 = ef4_mdio_read(efx, devad, MDIO_DEVID2);
if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
(physid2 == 0x0000) || (physid2 == 0xffff)) {
netif_err(efx, hw, efx->net_dev,
"no MDIO PHY present with ID %d\n", efx->mdio.prtad);
rc = -EINVAL;
} else {
rc = ef4_mdio_check_mmds(efx, efx->mdio.mmds);
}
mutex_unlock(&efx->mac_lock);
return rc;
}

View File

@@ -0,0 +1,110 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_MDIO_10G_H
#define EF4_MDIO_10G_H
#include <linux/mdio.h>
/*
* Helper functions for doing 10G MDIO as specified in IEEE 802.3 clause 45.
*/
#include "efx.h"
static inline unsigned ef4_mdio_id_rev(u32 id) { return id & 0xf; }
static inline unsigned ef4_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
unsigned ef4_mdio_id_oui(u32 id);
static inline int ef4_mdio_read(struct ef4_nic *efx, int devad, int addr)
{
return efx->mdio.mdio_read(efx->net_dev, efx->mdio.prtad, devad, addr);
}
static inline void
ef4_mdio_write(struct ef4_nic *efx, int devad, int addr, int value)
{
efx->mdio.mdio_write(efx->net_dev, efx->mdio.prtad, devad, addr, value);
}
static inline u32 ef4_mdio_read_id(struct ef4_nic *efx, int mmd)
{
u16 id_low = ef4_mdio_read(efx, mmd, MDIO_DEVID2);
u16 id_hi = ef4_mdio_read(efx, mmd, MDIO_DEVID1);
return (id_hi << 16) | (id_low);
}
static inline bool ef4_mdio_phyxgxs_lane_sync(struct ef4_nic *efx)
{
int i, lane_status;
bool sync;
for (i = 0; i < 2; ++i)
lane_status = ef4_mdio_read(efx, MDIO_MMD_PHYXS,
MDIO_PHYXS_LNSTAT);
sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN);
if (!sync)
netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n",
lane_status);
return sync;
}
const char *ef4_mdio_mmd_name(int mmd);
/*
* Reset a specific MMD and wait for reset to clear.
* Return number of spins left (>0) on success, -%ETIMEDOUT on failure.
*
* This function will sleep
*/
int ef4_mdio_reset_mmd(struct ef4_nic *efx, int mmd, int spins, int spintime);
/* As ef4_mdio_check_mmd but for multiple MMDs */
int ef4_mdio_check_mmds(struct ef4_nic *efx, unsigned int mmd_mask);
/* Check the link status of specified mmds in bit mask */
bool ef4_mdio_links_ok(struct ef4_nic *efx, unsigned int mmd_mask);
/* Generic transmit disable support though PMAPMD */
void ef4_mdio_transmit_disable(struct ef4_nic *efx);
/* Generic part of reconfigure: set/clear loopback bits */
void ef4_mdio_phy_reconfigure(struct ef4_nic *efx);
/* Set the power state of the specified MMDs */
void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx, int low_power,
unsigned int mmd_mask);
/* Set (some of) the PHY settings over MDIO */
int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd);
/* Push advertising flags and restart autonegotiation */
void ef4_mdio_an_reconfigure(struct ef4_nic *efx);
/* Get pause parameters from AN if available (otherwise return
* requested pause parameters)
*/
u8 ef4_mdio_get_pause(struct ef4_nic *efx);
/* Wait for specified MMDs to exit reset within a timeout */
int ef4_mdio_wait_reset_mmds(struct ef4_nic *efx, unsigned int mmd_mask);
/* Set or clear flag, debouncing */
static inline void
ef4_mdio_set_flag(struct ef4_nic *efx, int devad, int addr,
int mask, bool state)
{
mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
}
/* Liveness self-test for MDIO PHYs */
int ef4_mdio_test_alive(struct ef4_nic *efx);
#endif /* EF4_MDIO_10G_H */

View File

@@ -0,0 +1,133 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include "net_driver.h"
#include "efx.h"
#define to_ef4_mtd_partition(mtd) \
container_of(mtd, struct ef4_mtd_partition, mtd)
/* MTD interface */
static int ef4_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
{
struct ef4_nic *efx = mtd->priv;
int rc;
rc = efx->type->mtd_erase(mtd, erase->addr, erase->len);
if (rc == 0) {
erase->state = MTD_ERASE_DONE;
} else {
erase->state = MTD_ERASE_FAILED;
erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
}
mtd_erase_callback(erase);
return rc;
}
static void ef4_mtd_sync(struct mtd_info *mtd)
{
struct ef4_mtd_partition *part = to_ef4_mtd_partition(mtd);
struct ef4_nic *efx = mtd->priv;
int rc;
rc = efx->type->mtd_sync(mtd);
if (rc)
pr_err("%s: %s sync failed (%d)\n",
part->name, part->dev_type_name, rc);
}
static void ef4_mtd_remove_partition(struct ef4_mtd_partition *part)
{
int rc;
for (;;) {
rc = mtd_device_unregister(&part->mtd);
if (rc != -EBUSY)
break;
ssleep(1);
}
WARN_ON(rc);
list_del(&part->node);
}
int ef4_mtd_add(struct ef4_nic *efx, struct ef4_mtd_partition *parts,
size_t n_parts, size_t sizeof_part)
{
struct ef4_mtd_partition *part;
size_t i;
for (i = 0; i < n_parts; i++) {
part = (struct ef4_mtd_partition *)((char *)parts +
i * sizeof_part);
part->mtd.writesize = 1;
part->mtd.owner = THIS_MODULE;
part->mtd.priv = efx;
part->mtd.name = part->name;
part->mtd._erase = ef4_mtd_erase;
part->mtd._read = efx->type->mtd_read;
part->mtd._write = efx->type->mtd_write;
part->mtd._sync = ef4_mtd_sync;
efx->type->mtd_rename(part);
if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
/* Add to list in order - ef4_mtd_remove() depends on this */
list_add_tail(&part->node, &efx->mtd_list);
}
return 0;
fail:
while (i--) {
part = (struct ef4_mtd_partition *)((char *)parts +
i * sizeof_part);
ef4_mtd_remove_partition(part);
}
/* Failure is unlikely here, but probably means we're out of memory */
return -ENOMEM;
}
void ef4_mtd_remove(struct ef4_nic *efx)
{
struct ef4_mtd_partition *parts, *part, *next;
WARN_ON(ef4_dev_registered(efx));
if (list_empty(&efx->mtd_list))
return;
parts = list_first_entry(&efx->mtd_list, struct ef4_mtd_partition,
node);
list_for_each_entry_safe(part, next, &efx->mtd_list, node)
ef4_mtd_remove_partition(part);
kfree(parts);
}
void ef4_mtd_rename(struct ef4_nic *efx)
{
struct ef4_mtd_partition *part;
ASSERT_RTNL();
list_for_each_entry(part, &efx->mtd_list, node)
efx->type->mtd_rename(part);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,527 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/cpu_rmap.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
/**************************************************************************
*
* Generic buffer handling
* These buffers are used for interrupt status, MAC stats, etc.
*
**************************************************************************/
int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
unsigned int len, gfp_t gfp_flags)
{
buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
&buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
return 0;
}
void ef4_nic_free_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer)
{
if (buffer->addr) {
dma_free_coherent(&efx->pci_dev->dev, buffer->len,
buffer->addr, buffer->dma_addr);
buffer->addr = NULL;
}
}
/* Check whether an event is present in the eventq at the current
* read pointer. Only useful for self-test.
*/
bool ef4_nic_event_present(struct ef4_channel *channel)
{
return ef4_event_present(ef4_event(channel, channel->eventq_read_ptr));
}
void ef4_nic_event_test_start(struct ef4_channel *channel)
{
channel->event_test_cpu = -1;
smp_wmb();
channel->efx->type->ev_test_generate(channel);
}
int ef4_nic_irq_test_start(struct ef4_nic *efx)
{
efx->last_irq_cpu = -1;
smp_wmb();
return efx->type->irq_test_generate(efx);
}
/* Hook interrupt handler(s)
* Try MSI and then legacy interrupts.
*/
int ef4_nic_init_interrupt(struct ef4_nic *efx)
{
struct ef4_channel *channel;
unsigned int n_irqs;
int rc;
if (!EF4_INT_MODE_USE_MSI(efx)) {
rc = request_irq(efx->legacy_irq,
efx->type->irq_handle_legacy, IRQF_SHARED,
efx->name, efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook legacy IRQ %d\n",
efx->pci_dev->irq);
goto fail1;
}
return 0;
}
#ifdef CONFIG_RFS_ACCEL
if (efx->interrupt_mode == EF4_INT_MODE_MSIX) {
efx->net_dev->rx_cpu_rmap =
alloc_irq_cpu_rmap(efx->n_rx_channels);
if (!efx->net_dev->rx_cpu_rmap) {
rc = -ENOMEM;
goto fail1;
}
}
#endif
/* Hook MSI or MSI-X interrupt */
n_irqs = 0;
ef4_for_each_channel(channel, efx) {
rc = request_irq(channel->irq, efx->type->irq_handle_msi,
IRQF_PROBE_SHARED, /* Not shared */
efx->msi_context[channel->channel].name,
&efx->msi_context[channel->channel]);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq);
goto fail2;
}
++n_irqs;
#ifdef CONFIG_RFS_ACCEL
if (efx->interrupt_mode == EF4_INT_MODE_MSIX &&
channel->channel < efx->n_rx_channels) {
rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
channel->irq);
if (rc)
goto fail2;
}
#endif
}
return 0;
fail2:
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
#endif
ef4_for_each_channel(channel, efx) {
if (n_irqs-- == 0)
break;
free_irq(channel->irq, &efx->msi_context[channel->channel]);
}
fail1:
return rc;
}
void ef4_nic_fini_interrupt(struct ef4_nic *efx)
{
struct ef4_channel *channel;
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
#endif
if (EF4_INT_MODE_USE_MSI(efx)) {
/* Disable MSI/MSI-X interrupts */
ef4_for_each_channel(channel, efx)
free_irq(channel->irq,
&efx->msi_context[channel->channel]);
} else {
/* Disable legacy interrupt */
free_irq(efx->legacy_irq, efx);
}
}
/* Register dump */
#define REGISTER_REVISION_FA 1
#define REGISTER_REVISION_FB 2
#define REGISTER_REVISION_FC 3
#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
#define REGISTER_REVISION_ED 4
#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
struct ef4_nic_reg {
u32 offset:24;
u32 min_revision:3, max_revision:3;
};
#define REGISTER(name, arch, min_rev, max_rev) { \
arch ## R_ ## min_rev ## max_rev ## _ ## name, \
REGISTER_REVISION_ ## arch ## min_rev, \
REGISTER_REVISION_ ## arch ## max_rev \
}
#define REGISTER_AA(name) REGISTER(name, F, A, A)
#define REGISTER_AB(name) REGISTER(name, F, A, B)
#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
#define REGISTER_BB(name) REGISTER(name, F, B, B)
#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
static const struct ef4_nic_reg ef4_nic_regs[] = {
REGISTER_AZ(ADR_REGION),
REGISTER_AZ(INT_EN_KER),
REGISTER_BZ(INT_EN_CHAR),
REGISTER_AZ(INT_ADR_KER),
REGISTER_BZ(INT_ADR_CHAR),
/* INT_ACK_KER is WO */
/* INT_ISR0 is RC */
REGISTER_AZ(HW_INIT),
REGISTER_CZ(USR_EV_CFG),
REGISTER_AB(EE_SPI_HCMD),
REGISTER_AB(EE_SPI_HADR),
REGISTER_AB(EE_SPI_HDATA),
REGISTER_AB(EE_BASE_PAGE),
REGISTER_AB(EE_VPD_CFG0),
/* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
/* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
/* PCIE_CORE_INDIRECT is indirect */
REGISTER_AB(NIC_STAT),
REGISTER_AB(GPIO_CTL),
REGISTER_AB(GLB_CTL),
/* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
REGISTER_BZ(DP_CTRL),
REGISTER_AZ(MEM_STAT),
REGISTER_AZ(CS_DEBUG),
REGISTER_AZ(ALTERA_BUILD),
REGISTER_AZ(CSR_SPARE),
REGISTER_AB(PCIE_SD_CTL0123),
REGISTER_AB(PCIE_SD_CTL45),
REGISTER_AB(PCIE_PCS_CTL_STAT),
/* DEBUG_DATA_OUT is not used */
/* DRV_EV is WO */
REGISTER_AZ(EVQ_CTL),
REGISTER_AZ(EVQ_CNT1),
REGISTER_AZ(EVQ_CNT2),
REGISTER_AZ(BUF_TBL_CFG),
REGISTER_AZ(SRM_RX_DC_CFG),
REGISTER_AZ(SRM_TX_DC_CFG),
REGISTER_AZ(SRM_CFG),
/* BUF_TBL_UPD is WO */
REGISTER_AZ(SRM_UPD_EVQ),
REGISTER_AZ(SRAM_PARITY),
REGISTER_AZ(RX_CFG),
REGISTER_BZ(RX_FILTER_CTL),
/* RX_FLUSH_DESCQ is WO */
REGISTER_AZ(RX_DC_CFG),
REGISTER_AZ(RX_DC_PF_WM),
REGISTER_BZ(RX_RSS_TKEY),
/* RX_NODESC_DROP is RC */
REGISTER_AA(RX_SELF_RST),
/* RX_DEBUG, RX_PUSH_DROP are not used */
REGISTER_CZ(RX_RSS_IPV6_REG1),
REGISTER_CZ(RX_RSS_IPV6_REG2),
REGISTER_CZ(RX_RSS_IPV6_REG3),
/* TX_FLUSH_DESCQ is WO */
REGISTER_AZ(TX_DC_CFG),
REGISTER_AA(TX_CHKSM_CFG),
REGISTER_AZ(TX_CFG),
/* TX_PUSH_DROP is not used */
REGISTER_AZ(TX_RESERVED),
REGISTER_BZ(TX_PACE),
/* TX_PACE_DROP_QID is RC */
REGISTER_BB(TX_VLAN),
REGISTER_BZ(TX_IPFIL_PORTEN),
REGISTER_AB(MD_TXD),
REGISTER_AB(MD_RXD),
REGISTER_AB(MD_CS),
REGISTER_AB(MD_PHY_ADR),
REGISTER_AB(MD_ID),
/* MD_STAT is RC */
REGISTER_AB(MAC_STAT_DMA),
REGISTER_AB(MAC_CTRL),
REGISTER_BB(GEN_MODE),
REGISTER_AB(MAC_MC_HASH_REG0),
REGISTER_AB(MAC_MC_HASH_REG1),
REGISTER_AB(GM_CFG1),
REGISTER_AB(GM_CFG2),
/* GM_IPG and GM_HD are not used */
REGISTER_AB(GM_MAX_FLEN),
/* GM_TEST is not used */
REGISTER_AB(GM_ADR1),
REGISTER_AB(GM_ADR2),
REGISTER_AB(GMF_CFG0),
REGISTER_AB(GMF_CFG1),
REGISTER_AB(GMF_CFG2),
REGISTER_AB(GMF_CFG3),
REGISTER_AB(GMF_CFG4),
REGISTER_AB(GMF_CFG5),
REGISTER_BB(TX_SRC_MAC_CTL),
REGISTER_AB(XM_ADR_LO),
REGISTER_AB(XM_ADR_HI),
REGISTER_AB(XM_GLB_CFG),
REGISTER_AB(XM_TX_CFG),
REGISTER_AB(XM_RX_CFG),
REGISTER_AB(XM_MGT_INT_MASK),
REGISTER_AB(XM_FC),
REGISTER_AB(XM_PAUSE_TIME),
REGISTER_AB(XM_TX_PARAM),
REGISTER_AB(XM_RX_PARAM),
/* XM_MGT_INT_MSK (note no 'A') is RC */
REGISTER_AB(XX_PWR_RST),
REGISTER_AB(XX_SD_CTL),
REGISTER_AB(XX_TXDRV_CTL),
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
/* XX_CORE_STAT is partly RC */
};
struct ef4_nic_reg_table {
u32 offset:24;
u32 min_revision:3, max_revision:3;
u32 step:6, rows:21;
};
#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
offset, \
REGISTER_REVISION_ ## arch ## min_rev, \
REGISTER_REVISION_ ## arch ## max_rev, \
step, rows \
}
#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
REGISTER_TABLE_DIMENSIONS( \
name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
arch, min_rev, max_rev, \
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
#define REGISTER_TABLE_BB_CZ(name) \
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
FR_BZ_ ## name ## _STEP, \
FR_BB_ ## name ## _ROWS), \
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
FR_BZ_ ## name ## _STEP, \
FR_CZ_ ## name ## _ROWS)
#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
static const struct ef4_nic_reg_table ef4_nic_reg_tables[] = {
/* DRIVER is not used */
/* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
REGISTER_TABLE_BB(TX_IPFIL_TBL),
REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
/* We can't reasonably read all of the buffer table (up to 8MB!).
* However this driver will only use a few entries. Reading
* 1K entries allows for some expansion of queue count and
* size before we need to change the version. */
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
F, A, A, 8, 1024),
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
F, B, Z, 8, 1024),
REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
REGISTER_TABLE_BB_CZ(TIMER_TBL),
REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
/* TX_FILTER_TBL0 is huge and not used by this driver */
REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
REGISTER_TABLE_CZ(MC_TREG_SMEM),
/* MSIX_PBA_TABLE is not mapped */
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
REGISTER_TABLE_BZ(RX_FILTER_TBL0),
};
size_t ef4_nic_get_regs_len(struct ef4_nic *efx)
{
const struct ef4_nic_reg *reg;
const struct ef4_nic_reg_table *table;
size_t len = 0;
for (reg = ef4_nic_regs;
reg < ef4_nic_regs + ARRAY_SIZE(ef4_nic_regs);
reg++)
if (efx->type->revision >= reg->min_revision &&
efx->type->revision <= reg->max_revision)
len += sizeof(ef4_oword_t);
for (table = ef4_nic_reg_tables;
table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables);
table++)
if (efx->type->revision >= table->min_revision &&
efx->type->revision <= table->max_revision)
len += table->rows * min_t(size_t, table->step, 16);
return len;
}
void ef4_nic_get_regs(struct ef4_nic *efx, void *buf)
{
const struct ef4_nic_reg *reg;
const struct ef4_nic_reg_table *table;
for (reg = ef4_nic_regs;
reg < ef4_nic_regs + ARRAY_SIZE(ef4_nic_regs);
reg++) {
if (efx->type->revision >= reg->min_revision &&
efx->type->revision <= reg->max_revision) {
ef4_reado(efx, (ef4_oword_t *)buf, reg->offset);
buf += sizeof(ef4_oword_t);
}
}
for (table = ef4_nic_reg_tables;
table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables);
table++) {
size_t size, i;
if (!(efx->type->revision >= table->min_revision &&
efx->type->revision <= table->max_revision))
continue;
size = min_t(size_t, table->step, 16);
for (i = 0; i < table->rows; i++) {
switch (table->step) {
case 4: /* 32-bit SRAM */
ef4_readd(efx, buf, table->offset + 4 * i);
break;
case 8: /* 64-bit SRAM */
ef4_sram_readq(efx,
efx->membase + table->offset,
buf, i);
break;
case 16: /* 128-bit-readable register */
ef4_reado_table(efx, buf, table->offset, i);
break;
case 32: /* 128-bit register, interleaved */
ef4_reado_table(efx, buf, table->offset, 2 * i);
break;
default:
WARN_ON(1);
return;
}
buf += size;
}
}
}
/**
* ef4_nic_describe_stats - Describe supported statistics for ethtool
* @desc: Array of &struct ef4_hw_stat_desc describing the statistics
* @count: Length of the @desc array
* @mask: Bitmask of which elements of @desc are enabled
* @names: Buffer to copy names to, or %NULL. The names are copied
* starting at intervals of %ETH_GSTRING_LEN bytes.
*
* Returns the number of visible statistics, i.e. the number of set
* bits in the first @count bits of @mask for which a name is defined.
*/
size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u8 *names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
if (names) {
strlcpy(names, desc[index].name,
ETH_GSTRING_LEN);
names += ETH_GSTRING_LEN;
}
++visible;
}
}
return visible;
}
/**
* ef4_nic_update_stats - Convert statistics DMA buffer to array of u64
* @desc: Array of &struct ef4_hw_stat_desc describing the DMA buffer
* layout. DMA widths of 0, 16, 32 and 64 are supported; where
* the width is specified as 0 the corresponding element of
* @stats is not updated.
* @count: Length of the @desc array
* @mask: Bitmask of which elements of @desc are enabled
* @stats: Buffer to update with the converted statistics. The length
* of this array must be at least @count.
* @dma_buf: DMA buffer containing hardware statistics
* @accumulate: If set, the converted values will be added rather than
* directly stored to the corresponding elements of @stats
*/
void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask,
u64 *stats, const void *dma_buf, bool accumulate)
{
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].dma_width) {
const void *addr = dma_buf + desc[index].offset;
u64 val;
switch (desc[index].dma_width) {
case 16:
val = le16_to_cpup((__le16 *)addr);
break;
case 32:
val = le32_to_cpup((__le32 *)addr);
break;
case 64:
val = le64_to_cpup((__le64 *)addr);
break;
default:
WARN_ON(1);
val = 0;
break;
}
if (accumulate)
stats[index] += val;
else
stats[index] = val;
}
}
}
void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *rx_nodesc_drops)
{
/* if down, or this is the first update after coming up */
if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
efx->rx_nodesc_drops_while_down +=
*rx_nodesc_drops - efx->rx_nodesc_drops_total;
efx->rx_nodesc_drops_total = *rx_nodesc_drops;
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
}

View File

@@ -0,0 +1,513 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_NIC_H
#define EF4_NIC_H
#include <linux/net_tstamp.h>
#include <linux/i2c-algo-bit.h>
#include "net_driver.h"
#include "efx.h"
enum {
EF4_REV_FALCON_A0 = 0,
EF4_REV_FALCON_A1 = 1,
EF4_REV_FALCON_B0 = 2,
};
static inline int ef4_nic_rev(struct ef4_nic *efx)
{
return efx->type->revision;
}
u32 ef4_farch_fpga_ver(struct ef4_nic *efx);
/* NIC has two interlinked PCI functions for the same port. */
static inline bool ef4_nic_is_dual_func(struct ef4_nic *efx)
{
return ef4_nic_rev(efx) < EF4_REV_FALCON_B0;
}
/* Read the current event from the event queue */
static inline ef4_qword_t *ef4_event(struct ef4_channel *channel,
unsigned int index)
{
return ((ef4_qword_t *) (channel->eventq.buf.addr)) +
(index & channel->eventq_mask);
}
/* See if an event is present
*
* We check both the high and low dword of the event for all ones. We
* wrote all ones when we cleared the event, and no valid event can
* have all ones in either its high or low dwords. This approach is
* robust against reordering.
*
* Note that using a single 64-bit comparison is incorrect; even
* though the CPU read will be atomic, the DMA write may not be.
*/
static inline int ef4_event_present(ef4_qword_t *event)
{
return !(EF4_DWORD_IS_ALL_ONES(event->dword[0]) |
EF4_DWORD_IS_ALL_ONES(event->dword[1]));
}
/* Returns a pointer to the specified transmit descriptor in the TX
* descriptor queue belonging to the specified channel.
*/
static inline ef4_qword_t *
ef4_tx_desc(struct ef4_tx_queue *tx_queue, unsigned int index)
{
return ((ef4_qword_t *) (tx_queue->txd.buf.addr)) + index;
}
/* Get partner of a TX queue, seen as part of the same net core queue */
static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_queue)
{
if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
return tx_queue - EF4_TXQ_TYPE_OFFLOAD;
else
return tx_queue + EF4_TXQ_TYPE_OFFLOAD;
}
/* Report whether this TX queue would be empty for the given write_count.
* May return false negative.
*/
static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue,
unsigned int write_count)
{
unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
return ((empty_read_count ^ write_count) & ~EF4_EMPTY_COUNT_VALID) == 0;
}
/* Decide whether to push a TX descriptor to the NIC vs merely writing
* the doorbell. This can reduce latency when we are adding a single
* descriptor to an empty queue, but is otherwise pointless. Further,
* Falcon and Siena have hardware bugs (SF bug 33851) that may be
* triggered if we don't check this.
* We use the write_count used for the last doorbell push, to get the
* NIC's view of the tx queue.
*/
static inline bool ef4_nic_may_push_tx_desc(struct ef4_tx_queue *tx_queue,
unsigned int write_count)
{
bool was_empty = __ef4_nic_tx_is_empty(tx_queue, write_count);
tx_queue->empty_read_count = 0;
return was_empty && tx_queue->write_count - write_count == 1;
}
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
static inline ef4_qword_t *
ef4_rx_desc(struct ef4_rx_queue *rx_queue, unsigned int index)
{
return ((ef4_qword_t *) (rx_queue->rxd.buf.addr)) + index;
}
enum {
PHY_TYPE_NONE = 0,
PHY_TYPE_TXC43128 = 1,
PHY_TYPE_88E1111 = 2,
PHY_TYPE_SFX7101 = 3,
PHY_TYPE_QT2022C2 = 4,
PHY_TYPE_PM8358 = 6,
PHY_TYPE_SFT9001A = 8,
PHY_TYPE_QT2025C = 9,
PHY_TYPE_SFT9001B = 10,
};
#define FALCON_XMAC_LOOPBACKS \
((1 << LOOPBACK_XGMII) | \
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI))
/* Alignment of PCIe DMA boundaries (4KB) */
#define EF4_PAGE_SIZE 4096
/* Size and alignment of buffer table entries (same) */
#define EF4_BUF_SIZE EF4_PAGE_SIZE
/* NIC-generic software stats */
enum {
GENERIC_STAT_rx_noskb_drops,
GENERIC_STAT_rx_nodesc_trunc,
GENERIC_STAT_COUNT
};
/**
* struct falcon_board_type - board operations and type information
* @id: Board type id, as found in NVRAM
* @init: Allocate resources and initialise peripheral hardware
* @init_phy: Do board-specific PHY initialisation
* @fini: Shut down hardware and free resources
* @set_id_led: Set state of identifying LED or revert to automatic function
* @monitor: Board-specific health check function
*/
struct falcon_board_type {
u8 id;
int (*init) (struct ef4_nic *nic);
void (*init_phy) (struct ef4_nic *efx);
void (*fini) (struct ef4_nic *nic);
void (*set_id_led) (struct ef4_nic *efx, enum ef4_led_mode mode);
int (*monitor) (struct ef4_nic *nic);
};
/**
* struct falcon_board - board information
* @type: Type of board
* @major: Major rev. ('A', 'B' ...)
* @minor: Minor rev. (0, 1, ...)
* @i2c_adap: I2C adapter for on-board peripherals
* @i2c_data: Data for bit-banging algorithm
* @hwmon_client: I2C client for hardware monitor
* @ioexp_client: I2C client for power/port control
*/
struct falcon_board {
const struct falcon_board_type *type;
int major;
int minor;
struct i2c_adapter i2c_adap;
struct i2c_algo_bit_data i2c_data;
struct i2c_client *hwmon_client, *ioexp_client;
};
/**
* struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
* @device_id: Controller's id for the device
* @size: Size (in bytes)
* @addr_len: Number of address bytes in read/write commands
* @munge_address: Flag whether addresses should be munged.
* Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
* use bit 3 of the command byte as address bit A8, rather
* than having a two-byte address. If this flag is set, then
* commands should be munged in this way.
* @erase_command: Erase command (or 0 if sector erase not needed).
* @erase_size: Erase sector size (in bytes)
* Erase commands affect sectors with this size and alignment.
* This must be a power of two.
* @block_size: Write block size (in bytes).
* Write commands are limited to blocks with this size and alignment.
*/
struct falcon_spi_device {
int device_id;
unsigned int size;
unsigned int addr_len;
unsigned int munge_address:1;
u8 erase_command;
unsigned int erase_size;
unsigned int block_size;
};
static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
{
return spi->size != 0;
}
enum {
FALCON_STAT_tx_bytes = GENERIC_STAT_COUNT,
FALCON_STAT_tx_packets,
FALCON_STAT_tx_pause,
FALCON_STAT_tx_control,
FALCON_STAT_tx_unicast,
FALCON_STAT_tx_multicast,
FALCON_STAT_tx_broadcast,
FALCON_STAT_tx_lt64,
FALCON_STAT_tx_64,
FALCON_STAT_tx_65_to_127,
FALCON_STAT_tx_128_to_255,
FALCON_STAT_tx_256_to_511,
FALCON_STAT_tx_512_to_1023,
FALCON_STAT_tx_1024_to_15xx,
FALCON_STAT_tx_15xx_to_jumbo,
FALCON_STAT_tx_gtjumbo,
FALCON_STAT_tx_non_tcpudp,
FALCON_STAT_tx_mac_src_error,
FALCON_STAT_tx_ip_src_error,
FALCON_STAT_rx_bytes,
FALCON_STAT_rx_good_bytes,
FALCON_STAT_rx_bad_bytes,
FALCON_STAT_rx_packets,
FALCON_STAT_rx_good,
FALCON_STAT_rx_bad,
FALCON_STAT_rx_pause,
FALCON_STAT_rx_control,
FALCON_STAT_rx_unicast,
FALCON_STAT_rx_multicast,
FALCON_STAT_rx_broadcast,
FALCON_STAT_rx_lt64,
FALCON_STAT_rx_64,
FALCON_STAT_rx_65_to_127,
FALCON_STAT_rx_128_to_255,
FALCON_STAT_rx_256_to_511,
FALCON_STAT_rx_512_to_1023,
FALCON_STAT_rx_1024_to_15xx,
FALCON_STAT_rx_15xx_to_jumbo,
FALCON_STAT_rx_gtjumbo,
FALCON_STAT_rx_bad_lt64,
FALCON_STAT_rx_bad_gtjumbo,
FALCON_STAT_rx_overflow,
FALCON_STAT_rx_symbol_error,
FALCON_STAT_rx_align_error,
FALCON_STAT_rx_length_error,
FALCON_STAT_rx_internal_error,
FALCON_STAT_rx_nodesc_drop_cnt,
FALCON_STAT_COUNT
};
/**
* struct falcon_nic_data - Falcon NIC state
* @pci_dev2: Secondary function of Falcon A
* @board: Board state and functions
* @stats: Hardware statistics
* @stats_disable_count: Nest count for disabling statistics fetches
* @stats_pending: Is there a pending DMA of MAC statistics.
* @stats_timer: A timer for regularly fetching MAC statistics.
* @spi_flash: SPI flash device
* @spi_eeprom: SPI EEPROM device
* @spi_lock: SPI bus lock
* @mdio_lock: MDIO bus lock
* @xmac_poll_required: XMAC link state needs polling
*/
struct falcon_nic_data {
struct pci_dev *pci_dev2;
struct falcon_board board;
u64 stats[FALCON_STAT_COUNT];
unsigned int stats_disable_count;
bool stats_pending;
struct timer_list stats_timer;
struct falcon_spi_device spi_flash;
struct falcon_spi_device spi_eeprom;
struct mutex spi_lock;
struct mutex mdio_lock;
bool xmac_poll_required;
};
static inline struct falcon_board *falcon_board(struct ef4_nic *efx)
{
struct falcon_nic_data *data = efx->nic_data;
return &data->board;
}
struct ethtool_ts_info;
extern const struct ef4_nic_type falcon_a1_nic_type;
extern const struct ef4_nic_type falcon_b0_nic_type;
/**************************************************************************
*
* Externs
*
**************************************************************************
*/
int falcon_probe_board(struct ef4_nic *efx, u16 revision_info);
/* TX data path */
static inline int ef4_nic_probe_tx(struct ef4_tx_queue *tx_queue)
{
return tx_queue->efx->type->tx_probe(tx_queue);
}
static inline void ef4_nic_init_tx(struct ef4_tx_queue *tx_queue)
{
tx_queue->efx->type->tx_init(tx_queue);
}
static inline void ef4_nic_remove_tx(struct ef4_tx_queue *tx_queue)
{
tx_queue->efx->type->tx_remove(tx_queue);
}
static inline void ef4_nic_push_buffers(struct ef4_tx_queue *tx_queue)
{
tx_queue->efx->type->tx_write(tx_queue);
}
/* RX data path */
static inline int ef4_nic_probe_rx(struct ef4_rx_queue *rx_queue)
{
return rx_queue->efx->type->rx_probe(rx_queue);
}
static inline void ef4_nic_init_rx(struct ef4_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_init(rx_queue);
}
static inline void ef4_nic_remove_rx(struct ef4_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_remove(rx_queue);
}
static inline void ef4_nic_notify_rx_desc(struct ef4_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_write(rx_queue);
}
static inline void ef4_nic_generate_fill_event(struct ef4_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_defer_refill(rx_queue);
}
/* Event data path */
static inline int ef4_nic_probe_eventq(struct ef4_channel *channel)
{
return channel->efx->type->ev_probe(channel);
}
static inline int ef4_nic_init_eventq(struct ef4_channel *channel)
{
return channel->efx->type->ev_init(channel);
}
static inline void ef4_nic_fini_eventq(struct ef4_channel *channel)
{
channel->efx->type->ev_fini(channel);
}
static inline void ef4_nic_remove_eventq(struct ef4_channel *channel)
{
channel->efx->type->ev_remove(channel);
}
static inline int
ef4_nic_process_eventq(struct ef4_channel *channel, int quota)
{
return channel->efx->type->ev_process(channel, quota);
}
static inline void ef4_nic_eventq_read_ack(struct ef4_channel *channel)
{
channel->efx->type->ev_read_ack(channel);
}
void ef4_nic_event_test_start(struct ef4_channel *channel);
/* queue operations */
int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue);
void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue);
void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue);
void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue);
void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue);
unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue);
void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue);
void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue);
void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue);
void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue);
void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue);
int ef4_farch_ev_probe(struct ef4_channel *channel);
int ef4_farch_ev_init(struct ef4_channel *channel);
void ef4_farch_ev_fini(struct ef4_channel *channel);
void ef4_farch_ev_remove(struct ef4_channel *channel);
int ef4_farch_ev_process(struct ef4_channel *channel, int quota);
void ef4_farch_ev_read_ack(struct ef4_channel *channel);
void ef4_farch_ev_test_generate(struct ef4_channel *channel);
/* filter operations */
int ef4_farch_filter_table_probe(struct ef4_nic *efx);
void ef4_farch_filter_table_restore(struct ef4_nic *efx);
void ef4_farch_filter_table_remove(struct ef4_nic *efx);
void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx);
s32 ef4_farch_filter_insert(struct ef4_nic *efx, struct ef4_filter_spec *spec,
bool replace);
int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
enum ef4_filter_priority priority,
u32 filter_id);
int ef4_farch_filter_get_safe(struct ef4_nic *efx,
enum ef4_filter_priority priority, u32 filter_id,
struct ef4_filter_spec *);
int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
enum ef4_filter_priority priority);
u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
enum ef4_filter_priority priority);
u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx);
s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
enum ef4_filter_priority priority, u32 *buf,
u32 size);
#ifdef CONFIG_RFS_ACCEL
s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
struct ef4_filter_spec *spec);
bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
unsigned int index);
#endif
void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx);
bool ef4_nic_event_present(struct ef4_channel *channel);
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
* asynchronously. If the counters contributing to B are always read
* after those contributing to A, the computed value may be lower than
* the true value by some variable amount, and may decrease between
* subsequent computations.
*
* We should never allow statistics to decrease or to exceed the true
* value. Since the computed value will never be greater than the
* true value, we can achieve this by only storing the computed value
* when it increases.
*/
static inline void ef4_update_diff_stat(u64 *stat, u64 diff)
{
if ((s64)(diff - *stat) > 0)
*stat = diff;
}
/* Interrupts */
int ef4_nic_init_interrupt(struct ef4_nic *efx);
int ef4_nic_irq_test_start(struct ef4_nic *efx);
void ef4_nic_fini_interrupt(struct ef4_nic *efx);
void ef4_farch_irq_enable_master(struct ef4_nic *efx);
int ef4_farch_irq_test_generate(struct ef4_nic *efx);
void ef4_farch_irq_disable_master(struct ef4_nic *efx);
irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id);
irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id);
irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx);
static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel)
{
return ACCESS_ONCE(channel->event_test_cpu);
}
static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx)
{
return ACCESS_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
int ef4_nic_flush_queues(struct ef4_nic *efx);
int ef4_farch_fini_dmaq(struct ef4_nic *efx);
void ef4_farch_finish_flr(struct ef4_nic *efx);
void falcon_start_nic_stats(struct ef4_nic *efx);
void falcon_stop_nic_stats(struct ef4_nic *efx);
int falcon_reset_xaui(struct ef4_nic *efx);
void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw);
void ef4_farch_init_common(struct ef4_nic *efx);
void ef4_farch_rx_push_indir_table(struct ef4_nic *efx);
int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
unsigned int len, gfp_t gfp_flags);
void ef4_nic_free_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer);
/* Tests */
struct ef4_farch_register_test {
unsigned address;
ef4_oword_t mask;
};
int ef4_farch_test_registers(struct ef4_nic *efx,
const struct ef4_farch_register_test *regs,
size_t n_regs);
size_t ef4_nic_get_regs_len(struct ef4_nic *efx);
void ef4_nic_get_regs(struct ef4_nic *efx, void *buf);
size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u8 *names);
void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *stat);
#define EF4_MAX_FLUSH_TIME 5000
void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
ef4_qword_t *event);
#endif /* EF4_NIC_H */

View File

@@ -0,0 +1,50 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2007-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_PHY_H
#define EF4_PHY_H
/****************************************************************************
* 10Xpress (SFX7101) PHY
*/
extern const struct ef4_phy_operations falcon_sfx7101_phy_ops;
void tenxpress_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode);
/****************************************************************************
* AMCC/Quake QT202x PHYs
*/
extern const struct ef4_phy_operations falcon_qt202x_phy_ops;
/* These PHYs provide various H/W control states for LEDs */
#define QUAKE_LED_LINK_INVAL (0)
#define QUAKE_LED_LINK_STAT (1)
#define QUAKE_LED_LINK_ACT (2)
#define QUAKE_LED_LINK_ACTSTAT (3)
#define QUAKE_LED_OFF (4)
#define QUAKE_LED_ON (5)
#define QUAKE_LED_LINK_INPUT (6) /* Pin is an input. */
/* What link the LED tracks */
#define QUAKE_LED_TXLINK (0)
#define QUAKE_LED_RXLINK (8)
void falcon_qt202x_set_led(struct ef4_nic *p, int led, int state);
/****************************************************************************
* Transwitch CX4 retimer
*/
extern const struct ef4_phy_operations falcon_txc_phy_ops;
#define TXC_GPIO_DIR_INPUT 0
#define TXC_GPIO_DIR_OUTPUT 1
void falcon_txc_set_gpio_dir(struct ef4_nic *efx, int pin, int dir);
void falcon_txc_set_gpio_val(struct ef4_nic *efx, int pin, int val);
#endif

View File

@@ -0,0 +1,495 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
/*
* Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details
*/
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include "efx.h"
#include "mdio_10g.h"
#include "phy.h"
#include "nic.h"
#define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS | \
MDIO_DEVS_PMAPMD | \
MDIO_DEVS_PHYXS)
#define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) | \
(1 << LOOPBACK_PMAPMD) | \
(1 << LOOPBACK_PHYXS_WS))
/****************************************************************************/
/* Quake-specific MDIO registers */
#define MDIO_QUAKE_LED0_REG (0xD006)
/* QT2025C only */
#define PCS_FW_HEARTBEAT_REG 0xd7ee
#define PCS_FW_HEARTB_LBN 0
#define PCS_FW_HEARTB_WIDTH 8
#define PCS_FW_PRODUCT_CODE_1 0xd7f0
#define PCS_FW_VERSION_1 0xd7f3
#define PCS_FW_BUILD_1 0xd7f6
#define PCS_UC8051_STATUS_REG 0xd7fd
#define PCS_UC_STATUS_LBN 0
#define PCS_UC_STATUS_WIDTH 8
#define PCS_UC_STATUS_FW_SAVE 0x20
#define PMA_PMD_MODE_REG 0xc301
#define PMA_PMD_RXIN_SEL_LBN 6
#define PMA_PMD_FTX_CTRL2_REG 0xc309
#define PMA_PMD_FTX_STATIC_LBN 13
#define PMA_PMD_VEND1_REG 0xc001
#define PMA_PMD_VEND1_LBTXD_LBN 15
#define PCS_VEND1_REG 0xc000
#define PCS_VEND1_LBTXD_LBN 5
void falcon_qt202x_set_led(struct ef4_nic *p, int led, int mode)
{
int addr = MDIO_QUAKE_LED0_REG + led;
ef4_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
}
struct qt202x_phy_data {
enum ef4_phy_mode phy_mode;
bool bug17190_in_bad_state;
unsigned long bug17190_timer;
u32 firmware_ver;
};
#define QT2022C2_MAX_RESET_TIME 500
#define QT2022C2_RESET_WAIT 10
#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
#define QT2025C_HEARTB_WAIT 100
#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
#define QT2025C_FWSTART_WAIT 100
#define BUG17190_INTERVAL (2 * HZ)
static int qt2025c_wait_heartbeat(struct ef4_nic *efx)
{
unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
int reg, old_counter = 0;
/* Wait for firmware heartbeat to start */
for (;;) {
int counter;
reg = ef4_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG);
if (reg < 0)
return reg;
counter = ((reg >> PCS_FW_HEARTB_LBN) &
((1 << PCS_FW_HEARTB_WIDTH) - 1));
if (old_counter == 0)
old_counter = counter;
else if (counter != old_counter)
break;
if (time_after(jiffies, timeout)) {
/* Some cables have EEPROMs that conflict with the
* PHY's on-board EEPROM so it cannot load firmware */
netif_err(efx, hw, efx->net_dev,
"If an SFP+ direct attach cable is"
" connected, please check that it complies"
" with the SFP+ specification\n");
return -ETIMEDOUT;
}
msleep(QT2025C_HEARTB_WAIT);
}
return 0;
}
static int qt2025c_wait_fw_status_good(struct ef4_nic *efx)
{
unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
int reg;
/* Wait for firmware status to look good */
for (;;) {
reg = ef4_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
if (reg < 0)
return reg;
if ((reg &
((1 << PCS_UC_STATUS_WIDTH) - 1) << PCS_UC_STATUS_LBN) >=
PCS_UC_STATUS_FW_SAVE)
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
msleep(QT2025C_FWSTART_WAIT);
}
return 0;
}
static void qt2025c_restart_firmware(struct ef4_nic *efx)
{
/* Restart microcontroller execution of firmware from RAM */
ef4_mdio_write(efx, 3, 0xe854, 0x00c0);
ef4_mdio_write(efx, 3, 0xe854, 0x0040);
msleep(50);
}
static int qt2025c_wait_reset(struct ef4_nic *efx)
{
int rc;
rc = qt2025c_wait_heartbeat(efx);
if (rc != 0)
return rc;
rc = qt2025c_wait_fw_status_good(efx);
if (rc == -ETIMEDOUT) {
/* Bug 17689: occasionally heartbeat starts but firmware status
* code never progresses beyond 0x00. Try again, once, after
* restarting execution of the firmware image. */
netif_dbg(efx, hw, efx->net_dev,
"bashing QT2025C microcontroller\n");
qt2025c_restart_firmware(efx);
rc = qt2025c_wait_heartbeat(efx);
if (rc != 0)
return rc;
rc = qt2025c_wait_fw_status_good(efx);
}
return rc;
}
static void qt2025c_firmware_id(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
u8 firmware_id[9];
size_t i;
for (i = 0; i < sizeof(firmware_id); i++)
firmware_id[i] = ef4_mdio_read(efx, MDIO_MMD_PCS,
PCS_FW_PRODUCT_CODE_1 + i);
netif_info(efx, probe, efx->net_dev,
"QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
(firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
firmware_id[3] >> 4, firmware_id[3] & 0xf,
firmware_id[4], firmware_id[5],
firmware_id[6], firmware_id[7], firmware_id[8]);
phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
((firmware_id[3] & 0x0f) << 16) |
(firmware_id[4] << 8) | firmware_id[5];
}
static void qt2025c_bug17190_workaround(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
/* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
* layers up, but PCS down (no block_lock). If we notice this state
* persisting for a couple of seconds, we switch PMA/PMD loopback
* briefly on and then off again, which is normally sufficient to
* recover it.
*/
if (efx->link_state.up ||
!ef4_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
phy_data->bug17190_in_bad_state = false;
return;
}
if (!phy_data->bug17190_in_bad_state) {
phy_data->bug17190_in_bad_state = true;
phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
return;
}
if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n");
ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_PMA_CTRL1_LOOPBACK, true);
msleep(100);
ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_PMA_CTRL1_LOOPBACK, false);
phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
}
}
static int qt2025c_select_phy_mode(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
struct falcon_board *board = falcon_board(efx);
int reg, rc, i;
uint16_t phy_op_mode;
/* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
* Self-Configure mode. Don't attempt any switching if we encounter
* older firmware. */
if (phy_data->firmware_ver < 0x02000100)
return 0;
/* In general we will get optimal behaviour in "SFP+ Self-Configure"
* mode; however, that powers down most of the PHY when no module is
* present, so we must use a different mode (any fixed mode will do)
* to be sure that loopbacks will work. */
phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
/* Only change mode if really necessary */
reg = ef4_mdio_read(efx, 1, 0xc319);
if ((reg & 0x0038) == phy_op_mode)
return 0;
netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n",
phy_op_mode);
/* This sequence replicates the register writes configured in the boot
* EEPROM (including the differences between board revisions), except
* that the operating mode is changed, and the PHY is prevented from
* unnecessarily reloading the main firmware image again. */
ef4_mdio_write(efx, 1, 0xc300, 0x0000);
/* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
* STOPs onto the firmware/module I2C bus to reset it, varies across
* board revisions, as the bus is connected to different GPIO/LED
* outputs on the PHY.) */
if (board->major == 0 && board->minor < 2) {
ef4_mdio_write(efx, 1, 0xc303, 0x4498);
for (i = 0; i < 9; i++) {
ef4_mdio_write(efx, 1, 0xc303, 0x4488);
ef4_mdio_write(efx, 1, 0xc303, 0x4480);
ef4_mdio_write(efx, 1, 0xc303, 0x4490);
ef4_mdio_write(efx, 1, 0xc303, 0x4498);
}
} else {
ef4_mdio_write(efx, 1, 0xc303, 0x0920);
ef4_mdio_write(efx, 1, 0xd008, 0x0004);
for (i = 0; i < 9; i++) {
ef4_mdio_write(efx, 1, 0xc303, 0x0900);
ef4_mdio_write(efx, 1, 0xd008, 0x0005);
ef4_mdio_write(efx, 1, 0xc303, 0x0920);
ef4_mdio_write(efx, 1, 0xd008, 0x0004);
}
ef4_mdio_write(efx, 1, 0xc303, 0x4900);
}
ef4_mdio_write(efx, 1, 0xc303, 0x4900);
ef4_mdio_write(efx, 1, 0xc302, 0x0004);
ef4_mdio_write(efx, 1, 0xc316, 0x0013);
ef4_mdio_write(efx, 1, 0xc318, 0x0054);
ef4_mdio_write(efx, 1, 0xc319, phy_op_mode);
ef4_mdio_write(efx, 1, 0xc31a, 0x0098);
ef4_mdio_write(efx, 3, 0x0026, 0x0e00);
ef4_mdio_write(efx, 3, 0x0027, 0x0013);
ef4_mdio_write(efx, 3, 0x0028, 0xa528);
ef4_mdio_write(efx, 1, 0xd006, 0x000a);
ef4_mdio_write(efx, 1, 0xd007, 0x0009);
ef4_mdio_write(efx, 1, 0xd008, 0x0004);
/* This additional write is not present in the boot EEPROM. It
* prevents the PHY's internal boot ROM doing another pointless (and
* slow) reload of the firmware image (the microcontroller's code
* memory is not affected by the microcontroller reset). */
ef4_mdio_write(efx, 1, 0xc317, 0x00ff);
/* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
* restart doesn't reset it. We need to do that ourselves. */
ef4_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
1 << PMA_PMD_RXIN_SEL_LBN, false);
ef4_mdio_write(efx, 1, 0xc300, 0x0002);
msleep(20);
/* Restart microcontroller execution of firmware from RAM */
qt2025c_restart_firmware(efx);
/* Wait for the microcontroller to be ready again */
rc = qt2025c_wait_reset(efx);
if (rc < 0) {
netif_err(efx, hw, efx->net_dev,
"PHY microcontroller reset during mode switch "
"timed out\n");
return rc;
}
return 0;
}
static int qt202x_reset_phy(struct ef4_nic *efx)
{
int rc;
if (efx->phy_type == PHY_TYPE_QT2025C) {
/* Wait for the reset triggered by falcon_reset_hw()
* to complete */
rc = qt2025c_wait_reset(efx);
if (rc < 0)
goto fail;
} else {
/* Reset the PHYXS MMD. This is documented as doing
* a complete soft reset. */
rc = ef4_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
QT2022C2_MAX_RESET_TIME /
QT2022C2_RESET_WAIT,
QT2022C2_RESET_WAIT);
if (rc < 0)
goto fail;
}
/* Wait 250ms for the PHY to complete bootup */
msleep(250);
falcon_board(efx)->type->init_phy(efx);
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n");
return rc;
}
static int qt202x_phy_probe(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data;
phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
phy_data->phy_mode = efx->phy_mode;
phy_data->bug17190_in_bad_state = false;
phy_data->bug17190_timer = 0;
efx->mdio.mmds = QT202X_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
return 0;
}
static int qt202x_phy_init(struct ef4_nic *efx)
{
u32 devid;
int rc;
rc = qt202x_reset_phy(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev, "PHY init failed\n");
return rc;
}
devid = ef4_mdio_read_id(efx, MDIO_MMD_PHYXS);
netif_info(efx, probe, efx->net_dev,
"PHY ID reg %x (OUI %06x model %02x revision %x)\n",
devid, ef4_mdio_id_oui(devid), ef4_mdio_id_model(devid),
ef4_mdio_id_rev(devid));
if (efx->phy_type == PHY_TYPE_QT2025C)
qt2025c_firmware_id(efx);
return 0;
}
static int qt202x_link_ok(struct ef4_nic *efx)
{
return ef4_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
}
static bool qt202x_phy_poll(struct ef4_nic *efx)
{
bool was_up = efx->link_state.up;
efx->link_state.up = qt202x_link_ok(efx);
efx->link_state.speed = 10000;
efx->link_state.fd = true;
efx->link_state.fc = efx->wanted_fc;
if (efx->phy_type == PHY_TYPE_QT2025C)
qt2025c_bug17190_workaround(efx);
return efx->link_state.up != was_up;
}
static int qt202x_phy_reconfigure(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
if (efx->phy_type == PHY_TYPE_QT2025C) {
int rc = qt2025c_select_phy_mode(efx);
if (rc)
return rc;
/* There are several different register bits which can
* disable TX (and save power) on direct-attach cables
* or optical transceivers, varying somewhat between
* firmware versions. Only 'static mode' appears to
* cover everything. */
mdio_set_flag(
&efx->mdio, efx->mdio.prtad, MDIO_MMD_PMAPMD,
PMA_PMD_FTX_CTRL2_REG, 1 << PMA_PMD_FTX_STATIC_LBN,
efx->phy_mode & PHY_MODE_TX_DISABLED ||
efx->phy_mode & PHY_MODE_LOW_POWER ||
efx->loopback_mode == LOOPBACK_PCS ||
efx->loopback_mode == LOOPBACK_PMAPMD);
} else {
/* Reset the PHY when moving from tx off to tx on */
if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
(phy_data->phy_mode & PHY_MODE_TX_DISABLED))
qt202x_reset_phy(efx);
ef4_mdio_transmit_disable(efx);
}
ef4_mdio_phy_reconfigure(efx);
phy_data->phy_mode = efx->phy_mode;
return 0;
}
static void qt202x_phy_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
static void qt202x_phy_remove(struct ef4_nic *efx)
{
/* Free the context block */
kfree(efx->phy_data);
efx->phy_data = NULL;
}
static int qt202x_phy_get_module_info(struct ef4_nic *efx,
struct ethtool_modinfo *modinfo)
{
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
return 0;
}
static int qt202x_phy_get_module_eeprom(struct ef4_nic *efx,
struct ethtool_eeprom *ee, u8 *data)
{
int mmd, reg_base, rc, i;
if (efx->phy_type == PHY_TYPE_QT2025C) {
mmd = MDIO_MMD_PCS;
reg_base = 0xd000;
} else {
mmd = MDIO_MMD_PMAPMD;
reg_base = 0x8007;
}
for (i = 0; i < ee->len; i++) {
rc = ef4_mdio_read(efx, mmd, reg_base + ee->offset + i);
if (rc < 0)
return rc;
data[i] = rc;
}
return 0;
}
const struct ef4_phy_operations falcon_qt202x_phy_ops = {
.probe = qt202x_phy_probe,
.init = qt202x_phy_init,
.reconfigure = qt202x_phy_reconfigure,
.poll = qt202x_phy_poll,
.fini = ef4_port_dummy_op_void,
.remove = qt202x_phy_remove,
.get_settings = qt202x_phy_get_settings,
.set_settings = ef4_mdio_set_settings,
.test_alive = ef4_mdio_test_alive,
.get_module_eeprom = qt202x_phy_get_module_eeprom,
.get_module_info = qt202x_phy_get_module_info,
};

View File

@@ -0,0 +1,974 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/prefetch.h>
#include <linux/moduleparam.h>
#include <linux/iommu.h>
#include <net/ip.h>
#include <net/checksum.h>
#include "net_driver.h"
#include "efx.h"
#include "filter.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
/* Preferred number of descriptors to fill at once */
#define EF4_RX_PREFERRED_BATCH 8U
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
* ring, this number is divided by the number of buffers per page to calculate
* the number of pages to store in the RX page recycle ring.
*/
#define EF4_RECYCLE_RING_SIZE_IOMMU 4096
#define EF4_RECYCLE_RING_SIZE_NOIOMMU (2 * EF4_RX_PREFERRED_BATCH)
/* Size of buffer allocated for skb header area. */
#define EF4_SKB_HEADERS 128u
/* This is the percentage fill level below which new RX descriptors
* will be added to the RX descriptor ring.
*/
static unsigned int rx_refill_threshold;
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
#define EF4_RX_MAX_FRAGS DIV_ROUND_UP(EF4_MAX_FRAME_LEN(EF4_MAX_MTU), \
EF4_RX_USR_BUF_SIZE)
/*
* RX maximum head room required.
*
* This must be at least 1 to prevent overflow, plus one packet-worth
* to allow pipelined receives.
*/
#define EF4_RXD_HEAD_ROOM (1 + EF4_RX_MAX_FRAGS)
static inline u8 *ef4_rx_buf_va(struct ef4_rx_buffer *buf)
{
return page_address(buf->page) + buf->page_offset;
}
static inline u32 ef4_rx_buf_hash(struct ef4_nic *efx, const u8 *eh)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
#else
const u8 *data = eh + efx->rx_packet_hash_offset;
return (u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
(u32)data[3] << 24;
#endif
}
static inline struct ef4_rx_buffer *
ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf)
{
if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask)))
return ef4_rx_buffer(rx_queue, 0);
else
return rx_buf + 1;
}
static inline void ef4_sync_rx_buffer(struct ef4_nic *efx,
struct ef4_rx_buffer *rx_buf,
unsigned int len)
{
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
DMA_FROM_DEVICE);
}
void ef4_rx_config_page_split(struct ef4_nic *efx)
{
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
EF4_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct ef4_rx_page_state)) /
efx->rx_page_buf_step);
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
efx->rx_bufs_per_page;
efx->rx_pages_per_batch = DIV_ROUND_UP(EF4_RX_PREFERRED_BATCH,
efx->rx_bufs_per_page);
}
/* Check the RX page recycle ring for a page that can be reused. */
static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
{
struct ef4_nic *efx = rx_queue->efx;
struct page *page;
struct ef4_rx_page_state *state;
unsigned index;
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
page = rx_queue->page_ring[index];
if (page == NULL)
return NULL;
rx_queue->page_ring[index] = NULL;
/* page_remove cannot exceed page_add. */
if (rx_queue->page_remove != rx_queue->page_add)
++rx_queue->page_remove;
/* If page_count is 1 then we hold the only reference to this page. */
if (page_count(page) == 1) {
++rx_queue->page_recycle_count;
return page;
} else {
state = page_address(page);
dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE);
put_page(page);
++rx_queue->page_recycle_failed;
}
return NULL;
}
/**
* ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers
*
* @rx_queue: Efx RX queue
*
* This allocates a batch of pages, maps them for DMA, and populates
* struct ef4_rx_buffers for each one. Return a negative error code or
* 0 on success. If a single page can be used for multiple buffers,
* then the page will either be inserted fully, or not at all.
*/
static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
{
struct ef4_nic *efx = rx_queue->efx;
struct ef4_rx_buffer *rx_buf;
struct page *page;
unsigned int page_offset;
struct ef4_rx_page_state *state;
dma_addr_t dma_addr;
unsigned index, count;
count = 0;
do {
page = ef4_reuse_page(rx_queue);
if (page == NULL) {
page = alloc_pages(__GFP_COLD | __GFP_COMP |
(atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
dma_addr =
dma_map_page(&efx->pci_dev->dev, page, 0,
PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
dma_addr))) {
__free_pages(page, efx->rx_buffer_order);
return -EIO;
}
state = page_address(page);
state->dma_addr = dma_addr;
} else {
state = page_address(page);
dma_addr = state->dma_addr;
}
dma_addr += sizeof(struct ef4_rx_page_state);
page_offset = sizeof(struct ef4_rx_page_state);
do {
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = ef4_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
rx_buf->page = page;
rx_buf->page_offset = page_offset + efx->rx_ip_align;
rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0;
++rx_queue->added_count;
get_page(page);
dma_addr += efx->rx_page_buf_step;
page_offset += efx->rx_page_buf_step;
} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
rx_buf->flags = EF4_RX_BUF_LAST_IN_PAGE;
} while (++count < efx->rx_pages_per_batch);
return 0;
}
/* Unmap a DMA-mapped page. This function is only called for the final RX
* buffer in a page.
*/
static void ef4_unmap_rx_buffer(struct ef4_nic *efx,
struct ef4_rx_buffer *rx_buf)
{
struct page *page = rx_buf->page;
if (page) {
struct ef4_rx_page_state *state = page_address(page);
dma_unmap_page(&efx->pci_dev->dev,
state->dma_addr,
PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE);
}
}
static void ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue,
struct ef4_rx_buffer *rx_buf,
unsigned int num_bufs)
{
do {
if (rx_buf->page) {
put_page(rx_buf->page);
rx_buf->page = NULL;
}
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
} while (--num_bufs);
}
/* Attempt to recycle the page if there is an RX recycle ring; the page can
* only be added if this is the final RX buffer, to prevent pages being used in
* the descriptor ring and appearing in the recycle ring simultaneously.
*/
static void ef4_recycle_rx_page(struct ef4_channel *channel,
struct ef4_rx_buffer *rx_buf)
{
struct page *page = rx_buf->page;
struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
struct ef4_nic *efx = rx_queue->efx;
unsigned index;
/* Only recycle the page after processing the final buffer. */
if (!(rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE))
return;
index = rx_queue->page_add & rx_queue->page_ptr_mask;
if (rx_queue->page_ring[index] == NULL) {
unsigned read_index = rx_queue->page_remove &
rx_queue->page_ptr_mask;
/* The next slot in the recycle ring is available, but
* increment page_remove if the read pointer currently
* points here.
*/
if (read_index == index)
++rx_queue->page_remove;
rx_queue->page_ring[index] = page;
++rx_queue->page_add;
return;
}
++rx_queue->page_recycle_full;
ef4_unmap_rx_buffer(efx, rx_buf);
put_page(rx_buf->page);
}
static void ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue,
struct ef4_rx_buffer *rx_buf)
{
/* Release the page reference we hold for the buffer. */
if (rx_buf->page)
put_page(rx_buf->page);
/* If this is the last buffer in a page, unmap and free it. */
if (rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE) {
ef4_unmap_rx_buffer(rx_queue->efx, rx_buf);
ef4_free_rx_buffers(rx_queue, rx_buf, 1);
}
rx_buf->page = NULL;
}
/* Recycle the pages that are used by buffers that have just been received. */
static void ef4_recycle_rx_pages(struct ef4_channel *channel,
struct ef4_rx_buffer *rx_buf,
unsigned int n_frags)
{
struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
do {
ef4_recycle_rx_page(channel, rx_buf);
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
} while (--n_frags);
}
static void ef4_discard_rx_packet(struct ef4_channel *channel,
struct ef4_rx_buffer *rx_buf,
unsigned int n_frags)
{
struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
ef4_recycle_rx_pages(channel, rx_buf, n_frags);
ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
}
/**
* ef4_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
*
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@max_fill. If there is insufficient atomic
* memory to do so, a slow fill will be scheduled.
*
* The caller must provide serialisation (none is used here). In practise,
* this means this function must run from the NAPI handler, or be called
* when NAPI is disabled.
*/
void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
{
struct ef4_nic *efx = rx_queue->efx;
unsigned int fill_level, batch_size;
int space, rc = 0;
if (!rx_queue->refill_enabled)
return;
/* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
if (fill_level >= rx_queue->fast_fill_trigger)
goto out;
/* Record minimum fill level */
if (unlikely(fill_level < rx_queue->min_fill)) {
if (fill_level)
rx_queue->min_fill = fill_level;
}
batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
space = rx_queue->max_fill - fill_level;
EF4_BUG_ON_PARANOID(space < batch_size);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filling descriptor ring from"
" level %d to level %d\n",
ef4_rx_queue_index(rx_queue), fill_level,
rx_queue->max_fill);
do {
rc = ef4_init_rx_buffers(rx_queue, atomic);
if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */
if (rx_queue->added_count == rx_queue->removed_count)
ef4_schedule_slow_fill(rx_queue);
goto out;
}
} while ((space -= batch_size) >= batch_size);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filled descriptor ring "
"to level %d\n", ef4_rx_queue_index(rx_queue),
rx_queue->added_count - rx_queue->removed_count);
out:
if (rx_queue->notified_count != rx_queue->added_count)
ef4_nic_notify_rx_desc(rx_queue);
}
void ef4_rx_slow_fill(unsigned long context)
{
struct ef4_rx_queue *rx_queue = (struct ef4_rx_queue *)context;
/* Post an event to cause NAPI to run and refill the queue */
ef4_nic_generate_fill_event(rx_queue);
++rx_queue->slow_fill_count;
}
static void ef4_rx_packet__check_len(struct ef4_rx_queue *rx_queue,
struct ef4_rx_buffer *rx_buf,
int len)
{
struct ef4_nic *efx = rx_queue->efx;
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
if (likely(len <= max_len))
return;
/* The packet must be discarded, but this is only a fatal error
* if the caller indicated it was
*/
rx_buf->flags |= EF4_RX_PKT_DISCARD;
if ((len > rx_buf->len) && EF4_WORKAROUND_8071(efx)) {
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
" RX queue %d seriously overlength "
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
ef4_rx_queue_index(rx_queue), len, max_len,
efx->type->rx_buffer_padding);
ef4_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
" RX queue %d overlength RX event "
"(0x%x > 0x%x)\n",
ef4_rx_queue_index(rx_queue), len, max_len);
}
ef4_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
/* Pass a received packet up through GRO. GRO can handle pages
* regardless of checksum state and skbs with a good checksum.
*/
static void
ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
struct ef4_nic *efx = channel->efx;
struct sk_buff *skb;
skb = napi_get_frags(napi);
if (unlikely(!skb)) {
struct ef4_rx_queue *rx_queue;
rx_queue = ef4_channel_get_rx_queue(channel);
ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
return;
}
if (efx->net_dev->features & NETIF_F_RXHASH)
skb_set_hash(skb, ef4_rx_buf_hash(efx, eh),
PKT_HASH_TYPE_L3);
skb->ip_summed = ((rx_buf->flags & EF4_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
for (;;) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buf->page, rx_buf->page_offset,
rx_buf->len);
rx_buf->page = NULL;
skb->len += rx_buf->len;
if (skb_shinfo(skb)->nr_frags == n_frags)
break;
rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
}
skb->data_len = skb->len;
skb->truesize += n_frags * efx->rx_buffer_truesize;
skb_record_rx_queue(skb, channel->rx_queue.core_index);
gro_result = napi_gro_frags(napi);
if (gro_result != GRO_DROP)
channel->irq_mod_score += 2;
}
/* Allocate and construct an SKB around page fragments */
static struct sk_buff *ef4_rx_mk_skb(struct ef4_channel *channel,
struct ef4_rx_buffer *rx_buf,
unsigned int n_frags,
u8 *eh, int hdr_len)
{
struct ef4_nic *efx = channel->efx;
struct sk_buff *skb;
/* Allocate an SKB to store the headers */
skb = netdev_alloc_skb(efx->net_dev,
efx->rx_ip_align + efx->rx_prefix_size +
hdr_len);
if (unlikely(skb == NULL)) {
atomic_inc(&efx->n_rx_noskb_drops);
return NULL;
}
EF4_BUG_ON_PARANOID(rx_buf->len < hdr_len);
memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
efx->rx_prefix_size + hdr_len);
skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
__skb_put(skb, hdr_len);
/* Append the remaining page(s) onto the frag list */
if (rx_buf->len > hdr_len) {
rx_buf->page_offset += hdr_len;
rx_buf->len -= hdr_len;
for (;;) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buf->page, rx_buf->page_offset,
rx_buf->len);
rx_buf->page = NULL;
skb->len += rx_buf->len;
skb->data_len += rx_buf->len;
if (skb_shinfo(skb)->nr_frags == n_frags)
break;
rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
}
} else {
__free_pages(rx_buf->page, efx->rx_buffer_order);
rx_buf->page = NULL;
n_frags = 0;
}
skb->truesize += n_frags * efx->rx_buffer_truesize;
/* Move past the ethernet header */
skb->protocol = eth_type_trans(skb, efx->net_dev);
skb_mark_napi_id(skb, &channel->napi_str);
return skb;
}
void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags)
{
struct ef4_nic *efx = rx_queue->efx;
struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
struct ef4_rx_buffer *rx_buf;
rx_queue->rx_packets++;
rx_buf = ef4_rx_buffer(rx_queue, index);
rx_buf->flags |= flags;
/* Validate the number of fragments and completed length */
if (n_frags == 1) {
if (!(flags & EF4_RX_PKT_PREFIX_LEN))
ef4_rx_packet__check_len(rx_queue, rx_buf, len);
} else if (unlikely(n_frags > EF4_RX_MAX_FRAGS) ||
unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
unlikely(len > n_frags * efx->rx_dma_len) ||
unlikely(!efx->rx_scatter)) {
/* If this isn't an explicit discard request, either
* the hardware or the driver is broken.
*/
WARN_ON(!(len == 0 && rx_buf->flags & EF4_RX_PKT_DISCARD));
rx_buf->flags |= EF4_RX_PKT_DISCARD;
}
netif_vdbg(efx, rx_status, efx->net_dev,
"RX queue %d received ids %x-%x len %d %s%s\n",
ef4_rx_queue_index(rx_queue), index,
(index + n_frags - 1) & rx_queue->ptr_mask, len,
(rx_buf->flags & EF4_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
(rx_buf->flags & EF4_RX_PKT_DISCARD) ? " [DISCARD]" : "");
/* Discard packet, if instructed to do so. Process the
* previous receive first.
*/
if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) {
ef4_rx_flush_packet(channel);
ef4_discard_rx_packet(channel, rx_buf, n_frags);
return;
}
if (n_frags == 1 && !(flags & EF4_RX_PKT_PREFIX_LEN))
rx_buf->len = len;
/* Release and/or sync the DMA mapping - assumes all RX buffers
* consumed in-order per RX queue.
*/
ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
*/
prefetch(ef4_rx_buf_va(rx_buf));
rx_buf->page_offset += efx->rx_prefix_size;
rx_buf->len -= efx->rx_prefix_size;
if (n_frags > 1) {
/* Release/sync DMA mapping for additional fragments.
* Fix length for last fragment.
*/
unsigned int tail_frags = n_frags - 1;
for (;;) {
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
if (--tail_frags == 0)
break;
ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
}
rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
}
/* All fragments have been DMA-synced, so recycle pages. */
rx_buf = ef4_rx_buffer(rx_queue, index);
ef4_recycle_rx_pages(channel, rx_buf, n_frags);
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.
*/
ef4_rx_flush_packet(channel);
channel->rx_pkt_n_frags = n_frags;
channel->rx_pkt_index = index;
}
static void ef4_rx_deliver(struct ef4_channel *channel, u8 *eh,
struct ef4_rx_buffer *rx_buf,
unsigned int n_frags)
{
struct sk_buff *skb;
u16 hdr_len = min_t(u16, rx_buf->len, EF4_SKB_HEADERS);
skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
if (unlikely(skb == NULL)) {
struct ef4_rx_queue *rx_queue;
rx_queue = ef4_channel_get_rx_queue(channel);
ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
return;
}
skb_record_rx_queue(skb, channel->rx_queue.core_index);
/* Set the SKB flags */
skb_checksum_none_assert(skb);
if (likely(rx_buf->flags & EF4_RX_PKT_CSUMMED))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (channel->type->receive_skb)
if (channel->type->receive_skb(channel, skb))
return;
/* Pass the packet up */
netif_receive_skb(skb);
}
/* Handle a received packet. Second half: Touches packet payload. */
void __ef4_rx_packet(struct ef4_channel *channel)
{
struct ef4_nic *efx = channel->efx;
struct ef4_rx_buffer *rx_buf =
ef4_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
u8 *eh = ef4_rx_buf_va(rx_buf);
/* Read length from the prefix if necessary. This already
* excludes the length of the prefix itself.
*/
if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN)
rx_buf->len = le16_to_cpup((__le16 *)
(eh + efx->rx_packet_len_offset));
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
struct ef4_rx_queue *rx_queue;
ef4_loopback_rx_packet(efx, eh, rx_buf->len);
rx_queue = ef4_channel_get_rx_queue(channel);
ef4_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags);
goto out;
}
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EF4_RX_PKT_CSUMMED;
if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb &&
!ef4_channel_busy_polling(channel))
ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
else
ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
out:
channel->rx_pkt_n_frags = 0;
}
int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue)
{
struct ef4_nic *efx = rx_queue->efx;
unsigned int entries;
int rc;
/* Create the smallest power-of-two aligned ring */
entries = max(roundup_pow_of_two(efx->rxq_entries), EF4_MIN_DMAQ_SIZE);
EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
rx_queue->ptr_mask = entries - 1;
netif_dbg(efx, probe, efx->net_dev,
"creating RX queue %d size %#x mask %#x\n",
ef4_rx_queue_index(rx_queue), efx->rxq_entries,
rx_queue->ptr_mask);
/* Allocate RX buffers */
rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
GFP_KERNEL);
if (!rx_queue->buffer)
return -ENOMEM;
rc = ef4_nic_probe_rx(rx_queue);
if (rc) {
kfree(rx_queue->buffer);
rx_queue->buffer = NULL;
}
return rc;
}
static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
struct ef4_rx_queue *rx_queue)
{
unsigned int bufs_in_recycle_ring, page_ring_size;
/* Set the RX recycle ring size */
#ifdef CONFIG_PPC64
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
#else
if (iommu_present(&pci_bus_type))
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
else
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU;
#endif /* CONFIG_PPC64 */
page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
sizeof(*rx_queue->page_ring), GFP_KERNEL);
rx_queue->page_ptr_mask = page_ring_size - 1;
}
void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
{
struct ef4_nic *efx = rx_queue->efx;
unsigned int max_fill, trigger, max_trigger;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"initialising RX queue %d\n", ef4_rx_queue_index(rx_queue));
/* Initialise ptr fields */
rx_queue->added_count = 0;
rx_queue->notified_count = 0;
rx_queue->removed_count = 0;
rx_queue->min_fill = -1U;
ef4_init_rx_recycle_ring(efx, rx_queue);
rx_queue->page_remove = 0;
rx_queue->page_add = rx_queue->page_ptr_mask + 1;
rx_queue->page_recycle_count = 0;
rx_queue->page_recycle_failed = 0;
rx_queue->page_recycle_full = 0;
/* Initialise limit fields */
max_fill = efx->rxq_entries - EF4_RXD_HEAD_ROOM;
max_trigger =
max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
if (rx_refill_threshold != 0) {
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
if (trigger > max_trigger)
trigger = max_trigger;
} else {
trigger = max_trigger;
}
rx_queue->max_fill = max_fill;
rx_queue->fast_fill_trigger = trigger;
rx_queue->refill_enabled = true;
/* Set up RX descriptor ring */
ef4_nic_init_rx(rx_queue);
}
void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
{
int i;
struct ef4_nic *efx = rx_queue->efx;
struct ef4_rx_buffer *rx_buf;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
del_timer_sync(&rx_queue->slow_fill);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
for (i = rx_queue->removed_count; i < rx_queue->added_count;
i++) {
unsigned index = i & rx_queue->ptr_mask;
rx_buf = ef4_rx_buffer(rx_queue, index);
ef4_fini_rx_buffer(rx_queue, rx_buf);
}
}
/* Unmap and release the pages in the recycle ring. Remove the ring. */
for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
struct page *page = rx_queue->page_ring[i];
struct ef4_rx_page_state *state;
if (page == NULL)
continue;
state = page_address(page);
dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE);
put_page(page);
}
kfree(rx_queue->page_ring);
rx_queue->page_ring = NULL;
}
void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue)
{
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"destroying RX queue %d\n", ef4_rx_queue_index(rx_queue));
ef4_nic_remove_rx(rx_queue);
kfree(rx_queue->buffer);
rx_queue->buffer = NULL;
}
module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)");
#ifdef CONFIG_RFS_ACCEL
int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct ef4_nic *efx = netdev_priv(net_dev);
struct ef4_channel *channel;
struct ef4_filter_spec spec;
struct flow_keys fk;
int rc;
if (flow_id == RPS_FLOW_ID_INVALID)
return -EINVAL;
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
return -EPROTONOSUPPORT;
if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
return -EPROTONOSUPPORT;
ef4_filter_init_rx(&spec, EF4_FILTER_PRI_HINT,
efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0,
rxq_index);
spec.match_flags =
EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT;
spec.ether_type = fk.basic.n_proto;
spec.ip_proto = fk.basic.ip_proto;
if (fk.basic.n_proto == htons(ETH_P_IP)) {
spec.rem_host[0] = fk.addrs.v4addrs.src;
spec.loc_host[0] = fk.addrs.v4addrs.dst;
} else {
memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
}
spec.rem_port = fk.ports.src;
spec.loc_port = fk.ports.dst;
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
return rc;
/* Remember this so we can check whether to expire the filter later */
channel = ef4_get_channel(efx, rxq_index);
channel->rps_flow_id[rc] = flow_id;
++channel->rfs_filters_added;
if (spec.ether_type == htons(ETH_P_IP))
netif_info(efx, rx_status, efx->net_dev,
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
ntohs(spec.loc_port), rxq_index, flow_id, rc);
else
netif_info(efx, rx_status, efx->net_dev,
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
ntohs(spec.loc_port), rxq_index, flow_id, rc);
return rc;
}
bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned int quota)
{
bool (*expire_one)(struct ef4_nic *efx, u32 flow_id, unsigned int index);
unsigned int channel_idx, index, size;
u32 flow_id;
if (!spin_trylock_bh(&efx->filter_lock))
return false;
expire_one = efx->type->filter_rfs_expire_one;
channel_idx = efx->rps_expire_channel;
index = efx->rps_expire_index;
size = efx->type->max_rx_ip_filters;
while (quota--) {
struct ef4_channel *channel = ef4_get_channel(efx, channel_idx);
flow_id = channel->rps_flow_id[index];
if (flow_id != RPS_FLOW_ID_INVALID &&
expire_one(efx, flow_id, index)) {
netif_info(efx, rx_status, efx->net_dev,
"expired filter %d [queue %u flow %u]\n",
index, channel_idx, flow_id);
channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
}
if (++index == size) {
if (++channel_idx == efx->n_channels)
channel_idx = 0;
index = 0;
}
}
efx->rps_expire_channel = channel_idx;
efx->rps_expire_index = index;
spin_unlock_bh(&efx->filter_lock);
return true;
}
#endif /* CONFIG_RFS_ACCEL */
/**
* ef4_filter_is_mc_recipient - test whether spec is a multicast recipient
* @spec: Specification to test
*
* Return: %true if the specification is a non-drop RX filter that
* matches a local MAC address I/G bit value of 1 or matches a local
* IPv4 or IPv6 address value in the respective multicast address
* range. Otherwise %false.
*/
bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec)
{
if (!(spec->flags & EF4_FILTER_FLAG_RX) ||
spec->dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP)
return false;
if (spec->match_flags &
(EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG) &&
is_multicast_ether_addr(spec->loc_mac))
return true;
if ((spec->match_flags &
(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) ==
(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) {
if (spec->ether_type == htons(ETH_P_IP) &&
ipv4_is_multicast(spec->loc_host[0]))
return true;
if (spec->ether_type == htons(ETH_P_IPV6) &&
((const u8 *)spec->loc_host)[0] == 0xff)
return true;
}
return false;
}

View File

@@ -0,0 +1,808 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/kernel_stat.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
/* IRQ latency can be enormous because:
* - All IRQs may be disabled on a CPU for a *long* time by e.g. a
* slow serial console or an old IDE driver doing error recovery
* - The PREEMPT_RT patches mostly deal with this, but also allow a
* tasklet or normal task to be given higher priority than our IRQ
* threads
* Try to avoid blaming the hardware for this.
*/
#define IRQ_TIMEOUT HZ
/*
* Loopback test packet structure
*
* The self-test should stress every RSS vector, and unfortunately
* Falcon only performs RSS on TCP/UDP packets.
*/
struct ef4_loopback_payload {
struct ethhdr header;
struct iphdr ip;
struct udphdr udp;
__be16 iteration;
char msg[64];
} __packed;
/* Loopback test source MAC address */
static const u8 payload_source[ETH_ALEN] __aligned(2) = {
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
};
static const char payload_msg[] =
"Hello world! This is an Efx loopback test in progress!";
/* Interrupt mode names */
static const unsigned int ef4_interrupt_mode_max = EF4_INT_MODE_MAX;
static const char *const ef4_interrupt_mode_names[] = {
[EF4_INT_MODE_MSIX] = "MSI-X",
[EF4_INT_MODE_MSI] = "MSI",
[EF4_INT_MODE_LEGACY] = "legacy",
};
#define INT_MODE(efx) \
STRING_TABLE_LOOKUP(efx->interrupt_mode, ef4_interrupt_mode)
/**
* ef4_loopback_state - persistent state during a loopback selftest
* @flush: Drop all packets in ef4_loopback_rx_packet
* @packet_count: Number of packets being used in this test
* @skbs: An array of skbs transmitted
* @offload_csum: Checksums are being offloaded
* @rx_good: RX good packet count
* @rx_bad: RX bad packet count
* @payload: Payload used in tests
*/
struct ef4_loopback_state {
bool flush;
int packet_count;
struct sk_buff **skbs;
bool offload_csum;
atomic_t rx_good;
atomic_t rx_bad;
struct ef4_loopback_payload payload;
};
/* How long to wait for all the packets to arrive (in ms) */
#define LOOPBACK_TIMEOUT_MS 1000
/**************************************************************************
*
* MII, NVRAM and register tests
*
**************************************************************************/
static int ef4_test_phy_alive(struct ef4_nic *efx, struct ef4_self_tests *tests)
{
int rc = 0;
if (efx->phy_op->test_alive) {
rc = efx->phy_op->test_alive(efx);
tests->phy_alive = rc ? -1 : 1;
}
return rc;
}
static int ef4_test_nvram(struct ef4_nic *efx, struct ef4_self_tests *tests)
{
int rc = 0;
if (efx->type->test_nvram) {
rc = efx->type->test_nvram(efx);
if (rc == -EPERM)
rc = 0;
else
tests->nvram = rc ? -1 : 1;
}
return rc;
}
/**************************************************************************
*
* Interrupt and event queue testing
*
**************************************************************************/
/* Test generation and receipt of interrupts */
static int ef4_test_interrupts(struct ef4_nic *efx,
struct ef4_self_tests *tests)
{
unsigned long timeout, wait;
int cpu;
int rc;
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
rc = ef4_nic_irq_test_start(efx);
if (rc == -ENOTSUPP) {
netif_dbg(efx, drv, efx->net_dev,
"direct interrupt testing not supported\n");
tests->interrupt = 0;
return 0;
}
timeout = jiffies + IRQ_TIMEOUT;
wait = 1;
/* Wait for arrival of test interrupt. */
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
do {
schedule_timeout_uninterruptible(wait);
cpu = ef4_nic_irq_test_irq_cpu(efx);
if (cpu >= 0)
goto success;
wait *= 2;
} while (time_before(jiffies, timeout));
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
return -ETIMEDOUT;
success:
netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
INT_MODE(efx), cpu);
tests->interrupt = 1;
return 0;
}
/* Test generation and receipt of interrupting events */
static int ef4_test_eventq_irq(struct ef4_nic *efx,
struct ef4_self_tests *tests)
{
struct ef4_channel *channel;
unsigned int read_ptr[EF4_MAX_CHANNELS];
unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
unsigned long timeout, wait;
BUILD_BUG_ON(EF4_MAX_CHANNELS > BITS_PER_LONG);
ef4_for_each_channel(channel, efx) {
read_ptr[channel->channel] = channel->eventq_read_ptr;
set_bit(channel->channel, &dma_pend);
set_bit(channel->channel, &int_pend);
ef4_nic_event_test_start(channel);
}
timeout = jiffies + IRQ_TIMEOUT;
wait = 1;
/* Wait for arrival of interrupts. NAPI processing may or may
* not complete in time, but we can cope in any case.
*/
do {
schedule_timeout_uninterruptible(wait);
ef4_for_each_channel(channel, efx) {
ef4_stop_eventq(channel);
if (channel->eventq_read_ptr !=
read_ptr[channel->channel]) {
set_bit(channel->channel, &napi_ran);
clear_bit(channel->channel, &dma_pend);
clear_bit(channel->channel, &int_pend);
} else {
if (ef4_nic_event_present(channel))
clear_bit(channel->channel, &dma_pend);
if (ef4_nic_event_test_irq_cpu(channel) >= 0)
clear_bit(channel->channel, &int_pend);
}
ef4_start_eventq(channel);
}
wait *= 2;
} while ((dma_pend || int_pend) && time_before(jiffies, timeout));
ef4_for_each_channel(channel, efx) {
bool dma_seen = !test_bit(channel->channel, &dma_pend);
bool int_seen = !test_bit(channel->channel, &int_pend);
tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
if (dma_seen && int_seen) {
netif_dbg(efx, drv, efx->net_dev,
"channel %d event queue passed (with%s NAPI)\n",
channel->channel,
test_bit(channel->channel, &napi_ran) ?
"" : "out");
} else {
/* Report failure and whether either interrupt or DMA
* worked
*/
netif_err(efx, drv, efx->net_dev,
"channel %d timed out waiting for event queue\n",
channel->channel);
if (int_seen)
netif_err(efx, drv, efx->net_dev,
"channel %d saw interrupt "
"during event queue test\n",
channel->channel);
if (dma_seen)
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n",
channel->channel);
}
}
return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
}
static int ef4_test_phy(struct ef4_nic *efx, struct ef4_self_tests *tests,
unsigned flags)
{
int rc;
if (!efx->phy_op->run_tests)
return 0;
mutex_lock(&efx->mac_lock);
rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
if (rc == -EPERM)
rc = 0;
else
netif_info(efx, drv, efx->net_dev,
"%s phy selftest\n", rc ? "Failed" : "Passed");
return rc;
}
/**************************************************************************
*
* Loopback testing
* NB Only one loopback test can be executing concurrently.
*
**************************************************************************/
/* Loopback test RX callback
* This is called for each received packet during loopback testing.
*/
void ef4_loopback_rx_packet(struct ef4_nic *efx,
const char *buf_ptr, int pkt_len)
{
struct ef4_loopback_state *state = efx->loopback_selftest;
struct ef4_loopback_payload *received;
struct ef4_loopback_payload *payload;
BUG_ON(!buf_ptr);
/* If we are just flushing, then drop the packet */
if ((state == NULL) || state->flush)
return;
payload = &state->payload;
received = (struct ef4_loopback_payload *) buf_ptr;
received->ip.saddr = payload->ip.saddr;
if (state->offload_csum)
received->ip.check = payload->ip.check;
/* Check that header exists */
if (pkt_len < sizeof(received->header)) {
netif_err(efx, drv, efx->net_dev,
"saw runt RX packet (length %d) in %s loopback "
"test\n", pkt_len, LOOPBACK_MODE(efx));
goto err;
}
/* Check that the ethernet header exists */
if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
netif_err(efx, drv, efx->net_dev,
"saw non-loopback RX packet in %s loopback test\n",
LOOPBACK_MODE(efx));
goto err;
}
/* Check packet length */
if (pkt_len != sizeof(*payload)) {
netif_err(efx, drv, efx->net_dev,
"saw incorrect RX packet length %d (wanted %d) in "
"%s loopback test\n", pkt_len, (int)sizeof(*payload),
LOOPBACK_MODE(efx));
goto err;
}
/* Check that IP header matches */
if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
netif_err(efx, drv, efx->net_dev,
"saw corrupted IP header in %s loopback test\n",
LOOPBACK_MODE(efx));
goto err;
}
/* Check that msg and padding matches */
if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
netif_err(efx, drv, efx->net_dev,
"saw corrupted RX packet in %s loopback test\n",
LOOPBACK_MODE(efx));
goto err;
}
/* Check that iteration matches */
if (received->iteration != payload->iteration) {
netif_err(efx, drv, efx->net_dev,
"saw RX packet from iteration %d (wanted %d) in "
"%s loopback test\n", ntohs(received->iteration),
ntohs(payload->iteration), LOOPBACK_MODE(efx));
goto err;
}
/* Increase correct RX count */
netif_vdbg(efx, drv, efx->net_dev,
"got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
atomic_inc(&state->rx_good);
return;
err:
#ifdef DEBUG
if (atomic_read(&state->rx_bad) == 0) {
netif_err(efx, drv, efx->net_dev, "received packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
buf_ptr, pkt_len, 0);
netif_err(efx, drv, efx->net_dev, "expected packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
&state->payload, sizeof(state->payload), 0);
}
#endif
atomic_inc(&state->rx_bad);
}
/* Initialise an ef4_selftest_state for a new iteration */
static void ef4_iterate_state(struct ef4_nic *efx)
{
struct ef4_loopback_state *state = efx->loopback_selftest;
struct net_device *net_dev = efx->net_dev;
struct ef4_loopback_payload *payload = &state->payload;
/* Initialise the layerII header */
ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
payload->header.h_proto = htons(ETH_P_IP);
/* saddr set later and used as incrementing count */
payload->ip.daddr = htonl(INADDR_LOOPBACK);
payload->ip.ihl = 5;
payload->ip.check = (__force __sum16) htons(0xdead);
payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
payload->ip.version = IPVERSION;
payload->ip.protocol = IPPROTO_UDP;
/* Initialise udp header */
payload->udp.source = 0;
payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
sizeof(struct iphdr));
payload->udp.check = 0; /* checksum ignored */
/* Fill out payload */
payload->iteration = htons(ntohs(payload->iteration) + 1);
memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
/* Fill out remaining state members */
atomic_set(&state->rx_good, 0);
atomic_set(&state->rx_bad, 0);
smp_wmb();
}
static int ef4_begin_loopback(struct ef4_tx_queue *tx_queue)
{
struct ef4_nic *efx = tx_queue->efx;
struct ef4_loopback_state *state = efx->loopback_selftest;
struct ef4_loopback_payload *payload;
struct sk_buff *skb;
int i;
netdev_tx_t rc;
/* Transmit N copies of buffer */
for (i = 0; i < state->packet_count; i++) {
/* Allocate an skb, holding an extra reference for
* transmit completion counting */
skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
if (!skb)
return -ENOMEM;
state->skbs[i] = skb;
skb_get(skb);
/* Copy the payload in, incrementing the source address to
* exercise the rss vectors */
payload = ((struct ef4_loopback_payload *)
skb_put(skb, sizeof(state->payload)));
memcpy(payload, &state->payload, sizeof(state->payload));
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
/* Ensure everything we've written is visible to the
* interrupt handler. */
smp_wmb();
netif_tx_lock_bh(efx->net_dev);
rc = ef4_enqueue_skb(tx_queue, skb);
netif_tx_unlock_bh(efx->net_dev);
if (rc != NETDEV_TX_OK) {
netif_err(efx, drv, efx->net_dev,
"TX queue %d could not transmit packet %d of "
"%d in %s loopback test\n", tx_queue->queue,
i + 1, state->packet_count,
LOOPBACK_MODE(efx));
/* Defer cleaning up the other skbs for the caller */
kfree_skb(skb);
return -EPIPE;
}
}
return 0;
}
static int ef4_poll_loopback(struct ef4_nic *efx)
{
struct ef4_loopback_state *state = efx->loopback_selftest;
return atomic_read(&state->rx_good) == state->packet_count;
}
static int ef4_end_loopback(struct ef4_tx_queue *tx_queue,
struct ef4_loopback_self_tests *lb_tests)
{
struct ef4_nic *efx = tx_queue->efx;
struct ef4_loopback_state *state = efx->loopback_selftest;
struct sk_buff *skb;
int tx_done = 0, rx_good, rx_bad;
int i, rc = 0;
netif_tx_lock_bh(efx->net_dev);
/* Count the number of tx completions, and decrement the refcnt. Any
* skbs not already completed will be free'd when the queue is flushed */
for (i = 0; i < state->packet_count; i++) {
skb = state->skbs[i];
if (skb && !skb_shared(skb))
++tx_done;
dev_kfree_skb(skb);
}
netif_tx_unlock_bh(efx->net_dev);
/* Check TX completion and received packet counts */
rx_good = atomic_read(&state->rx_good);
rx_bad = atomic_read(&state->rx_bad);
if (tx_done != state->packet_count) {
/* Don't free the skbs; they will be picked up on TX
* overflow or channel teardown.
*/
netif_err(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d "
"TX completion events in %s loopback test\n",
tx_queue->queue, tx_done, state->packet_count,
LOOPBACK_MODE(efx));
rc = -ETIMEDOUT;
/* Allow to fall through so we see the RX errors as well */
}
/* We may always be up to a flush away from our desired packet total */
if (rx_good != state->packet_count) {
netif_dbg(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d "
"received packets in %s loopback test\n",
tx_queue->queue, rx_good, state->packet_count,
LOOPBACK_MODE(efx));
rc = -ETIMEDOUT;
/* Fall through */
}
/* Update loopback test structure */
lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
lb_tests->tx_done[tx_queue->queue] += tx_done;
lb_tests->rx_good += rx_good;
lb_tests->rx_bad += rx_bad;
return rc;
}
static int
ef4_test_loopback(struct ef4_tx_queue *tx_queue,
struct ef4_loopback_self_tests *lb_tests)
{
struct ef4_nic *efx = tx_queue->efx;
struct ef4_loopback_state *state = efx->loopback_selftest;
int i, begin_rc, end_rc;
for (i = 0; i < 3; i++) {
/* Determine how many packets to send */
state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
state->skbs = kcalloc(state->packet_count,
sizeof(state->skbs[0]), GFP_KERNEL);
if (!state->skbs)
return -ENOMEM;
state->flush = false;
netif_dbg(efx, drv, efx->net_dev,
"TX queue %d testing %s loopback with %d packets\n",
tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count);
ef4_iterate_state(efx);
begin_rc = ef4_begin_loopback(tx_queue);
/* This will normally complete very quickly, but be
* prepared to wait much longer. */
msleep(1);
if (!ef4_poll_loopback(efx)) {
msleep(LOOPBACK_TIMEOUT_MS);
ef4_poll_loopback(efx);
}
end_rc = ef4_end_loopback(tx_queue, lb_tests);
kfree(state->skbs);
if (begin_rc || end_rc) {
/* Wait a while to ensure there are no packets
* floating around after a failure. */
schedule_timeout_uninterruptible(HZ / 10);
return begin_rc ? begin_rc : end_rc;
}
}
netif_dbg(efx, drv, efx->net_dev,
"TX queue %d passed %s loopback test with a burst length "
"of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count);
return 0;
}
/* Wait for link up. On Falcon, we would prefer to rely on ef4_monitor, but
* any contention on the mac lock (via e.g. ef4_mac_mcast_work) causes it
* to delay and retry. Therefore, it's safer to just poll directly. Wait
* for link up and any faults to dissipate. */
static int ef4_wait_for_link(struct ef4_nic *efx)
{
struct ef4_link_state *link_state = &efx->link_state;
int count, link_up_count = 0;
bool link_up;
for (count = 0; count < 40; count++) {
schedule_timeout_uninterruptible(HZ / 10);
if (efx->type->monitor != NULL) {
mutex_lock(&efx->mac_lock);
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
}
mutex_lock(&efx->mac_lock);
link_up = link_state->up;
if (link_up)
link_up = !efx->type->check_mac_fault(efx);
mutex_unlock(&efx->mac_lock);
if (link_up) {
if (++link_up_count == 2)
return 0;
} else {
link_up_count = 0;
}
}
return -ETIMEDOUT;
}
static int ef4_test_loopbacks(struct ef4_nic *efx, struct ef4_self_tests *tests,
unsigned int loopback_modes)
{
enum ef4_loopback_mode mode;
struct ef4_loopback_state *state;
struct ef4_channel *channel =
ef4_get_channel(efx, efx->tx_channel_offset);
struct ef4_tx_queue *tx_queue;
int rc = 0;
/* Set the port loopback_selftest member. From this point on
* all received packets will be dropped. Mark the state as
* "flushing" so all inflight packets are dropped */
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
BUG_ON(efx->loopback_selftest);
state->flush = true;
efx->loopback_selftest = state;
/* Test all supported loopback modes */
for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
if (!(loopback_modes & (1 << mode)))
continue;
/* Move the port into the specified loopback mode. */
state->flush = true;
mutex_lock(&efx->mac_lock);
efx->loopback_mode = mode;
rc = __ef4_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"unable to move into %s loopback\n",
LOOPBACK_MODE(efx));
goto out;
}
rc = ef4_wait_for_link(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"loopback %s never came up\n",
LOOPBACK_MODE(efx));
goto out;
}
/* Test all enabled types of TX queue */
ef4_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue &
EF4_TXQ_TYPE_OFFLOAD);
rc = ef4_test_loopback(tx_queue,
&tests->loopback[mode]);
if (rc)
goto out;
}
}
out:
/* Remove the flush. The caller will remove the loopback setting */
state->flush = true;
efx->loopback_selftest = NULL;
wmb();
kfree(state);
if (rc == -EPERM)
rc = 0;
return rc;
}
/**************************************************************************
*
* Entry point
*
*************************************************************************/
int ef4_selftest(struct ef4_nic *efx, struct ef4_self_tests *tests,
unsigned flags)
{
enum ef4_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
int rc_test = 0, rc_reset, rc;
ef4_selftest_async_cancel(efx);
/* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */
rc = ef4_test_phy_alive(efx, tests);
if (rc && !rc_test)
rc_test = rc;
rc = ef4_test_nvram(efx, tests);
if (rc && !rc_test)
rc_test = rc;
rc = ef4_test_interrupts(efx, tests);
if (rc && !rc_test)
rc_test = rc;
rc = ef4_test_eventq_irq(efx, tests);
if (rc && !rc_test)
rc_test = rc;
if (rc_test)
return rc_test;
if (!(flags & ETH_TEST_FL_OFFLINE))
return ef4_test_phy(efx, tests, flags);
/* Offline (i.e. disruptive) testing
* This checks MAC and PHY loopback on the specified port. */
/* Detach the device so the kernel doesn't transmit during the
* loopback test and the watchdog timeout doesn't fire.
*/
ef4_device_detach_sync(efx);
if (efx->type->test_chip) {
rc_reset = efx->type->test_chip(efx, tests);
if (rc_reset) {
netif_err(efx, hw, efx->net_dev,
"Unable to recover from chip test\n");
ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
return rc_reset;
}
if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
rc_test = -EIO;
}
/* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */
mutex_lock(&efx->mac_lock);
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE;
__ef4_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
rc = ef4_test_phy(efx, tests, flags);
if (rc && !rc_test)
rc_test = rc;
rc = ef4_test_loopbacks(efx, tests, efx->loopback_modes);
if (rc && !rc_test)
rc_test = rc;
/* restore the PHY to the previous state */
mutex_lock(&efx->mac_lock);
efx->phy_mode = phy_mode;
efx->loopback_mode = loopback_mode;
__ef4_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
netif_device_attach(efx->net_dev);
return rc_test;
}
void ef4_selftest_async_start(struct ef4_nic *efx)
{
struct ef4_channel *channel;
ef4_for_each_channel(channel, efx)
ef4_nic_event_test_start(channel);
schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
}
void ef4_selftest_async_cancel(struct ef4_nic *efx)
{
cancel_delayed_work_sync(&efx->selftest_work);
}
void ef4_selftest_async_work(struct work_struct *data)
{
struct ef4_nic *efx = container_of(data, struct ef4_nic,
selftest_work.work);
struct ef4_channel *channel;
int cpu;
ef4_for_each_channel(channel, efx) {
cpu = ef4_nic_event_test_irq_cpu(channel);
if (cpu < 0)
netif_err(efx, ifup, efx->net_dev,
"channel %d failed to trigger an interrupt\n",
channel->channel);
else
netif_dbg(efx, ifup, efx->net_dev,
"channel %d triggered interrupt on CPU %d\n",
channel->channel, cpu);
}
}

View File

@@ -0,0 +1,55 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_SELFTEST_H
#define EF4_SELFTEST_H
#include "net_driver.h"
/*
* Self tests
*/
struct ef4_loopback_self_tests {
int tx_sent[EF4_TXQ_TYPES];
int tx_done[EF4_TXQ_TYPES];
int rx_good;
int rx_bad;
};
#define EF4_MAX_PHY_TESTS 20
/* Efx self test results
* For fields which are not counters, 1 indicates success and -1
* indicates failure; 0 indicates test could not be run.
*/
struct ef4_self_tests {
/* online tests */
int phy_alive;
int nvram;
int interrupt;
int eventq_dma[EF4_MAX_CHANNELS];
int eventq_int[EF4_MAX_CHANNELS];
/* offline tests */
int memory;
int registers;
int phy_ext[EF4_MAX_PHY_TESTS];
struct ef4_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
};
void ef4_loopback_rx_packet(struct ef4_nic *efx, const char *buf_ptr,
int pkt_len);
int ef4_selftest(struct ef4_nic *efx, struct ef4_self_tests *tests,
unsigned flags);
void ef4_selftest_async_start(struct ef4_nic *efx);
void ef4_selftest_async_cancel(struct ef4_nic *efx);
void ef4_selftest_async_work(struct work_struct *data);
#endif /* EF4_SELFTEST_H */

View File

@@ -0,0 +1,494 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2007-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/delay.h>
#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "efx.h"
#include "mdio_10g.h"
#include "nic.h"
#include "phy.h"
#include "workarounds.h"
/* We expect these MMDs to be in the package. */
#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
MDIO_DEVS_PCS | \
MDIO_DEVS_PHYXS | \
MDIO_DEVS_AN)
#define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
(1 << LOOPBACK_PCS) | \
(1 << LOOPBACK_PMAPMD) | \
(1 << LOOPBACK_PHYXS_WS))
/* We complain if we fail to see the link partner as 10G capable this many
* times in a row (must be > 1 as sampling the autoneg. registers is racy)
*/
#define MAX_BAD_LP_TRIES (5)
/* Extended control register */
#define PMA_PMD_XCONTROL_REG 49152
#define PMA_PMD_EXT_GMII_EN_LBN 1
#define PMA_PMD_EXT_GMII_EN_WIDTH 1
#define PMA_PMD_EXT_CLK_OUT_LBN 2
#define PMA_PMD_EXT_CLK_OUT_WIDTH 1
#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
#define PMA_PMD_EXT_CLK312_WIDTH 1
#define PMA_PMD_EXT_LPOWER_LBN 12
#define PMA_PMD_EXT_LPOWER_WIDTH 1
#define PMA_PMD_EXT_ROBUST_LBN 14
#define PMA_PMD_EXT_ROBUST_WIDTH 1
#define PMA_PMD_EXT_SSR_LBN 15
#define PMA_PMD_EXT_SSR_WIDTH 1
/* extended status register */
#define PMA_PMD_XSTATUS_REG 49153
#define PMA_PMD_XSTAT_MDIX_LBN 14
#define PMA_PMD_XSTAT_FLP_LBN (12)
/* LED control register */
#define PMA_PMD_LED_CTRL_REG 49159
#define PMA_PMA_LED_ACTIVITY_LBN (3)
/* LED function override register */
#define PMA_PMD_LED_OVERR_REG 49161
/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/
#define PMA_PMD_LED_LINK_LBN (0)
#define PMA_PMD_LED_SPEED_LBN (2)
#define PMA_PMD_LED_TX_LBN (4)
#define PMA_PMD_LED_RX_LBN (6)
/* Override settings */
#define PMA_PMD_LED_AUTO (0) /* H/W control */
#define PMA_PMD_LED_ON (1)
#define PMA_PMD_LED_OFF (2)
#define PMA_PMD_LED_FLASH (3)
#define PMA_PMD_LED_MASK 3
/* All LEDs under hardware control */
/* Green and Amber under hardware control, Red off */
#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
#define PMA_PMD_SPEED_ENABLE_REG 49192
#define PMA_PMD_100TX_ADV_LBN 1
#define PMA_PMD_100TX_ADV_WIDTH 1
#define PMA_PMD_1000T_ADV_LBN 2
#define PMA_PMD_1000T_ADV_WIDTH 1
#define PMA_PMD_10000T_ADV_LBN 3
#define PMA_PMD_10000T_ADV_WIDTH 1
#define PMA_PMD_SPEED_LBN 4
#define PMA_PMD_SPEED_WIDTH 4
/* Misc register defines */
#define PCS_CLOCK_CTRL_REG 55297
#define PLL312_RST_N_LBN 2
#define PCS_SOFT_RST2_REG 55302
#define SERDES_RST_N_LBN 13
#define XGXS_RST_N_LBN 12
#define PCS_TEST_SELECT_REG 55303 /* PRM 10.5.8 */
#define CLK312_EN_LBN 3
/* PHYXS registers */
#define PHYXS_XCONTROL_REG 49152
#define PHYXS_RESET_LBN 15
#define PHYXS_RESET_WIDTH 1
#define PHYXS_TEST1 (49162)
#define LOOPBACK_NEAR_LBN (8)
#define LOOPBACK_NEAR_WIDTH (1)
/* Boot status register */
#define PCS_BOOT_STATUS_REG 53248
#define PCS_BOOT_FATAL_ERROR_LBN 0
#define PCS_BOOT_PROGRESS_LBN 1
#define PCS_BOOT_PROGRESS_WIDTH 2
#define PCS_BOOT_PROGRESS_INIT 0
#define PCS_BOOT_PROGRESS_WAIT_MDIO 1
#define PCS_BOOT_PROGRESS_CHECKSUM 2
#define PCS_BOOT_PROGRESS_JUMP 3
#define PCS_BOOT_DOWNLOAD_WAIT_LBN 3
#define PCS_BOOT_CODE_STARTED_LBN 4
/* 100M/1G PHY registers */
#define GPHY_XCONTROL_REG 49152
#define GPHY_ISOLATE_LBN 10
#define GPHY_ISOLATE_WIDTH 1
#define GPHY_DUPLEX_LBN 8
#define GPHY_DUPLEX_WIDTH 1
#define GPHY_LOOPBACK_NEAR_LBN 14
#define GPHY_LOOPBACK_NEAR_WIDTH 1
#define C22EXT_STATUS_REG 49153
#define C22EXT_STATUS_LINK_LBN 2
#define C22EXT_STATUS_LINK_WIDTH 1
#define C22EXT_MSTSLV_CTRL 49161
#define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8
#define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9
#define C22EXT_MSTSLV_STATUS 49162
#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10
#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11
/* Time to wait between powering down the LNPGA and turning off the power
* rails */
#define LNPGA_PDOWN_WAIT (HZ / 5)
struct tenxpress_phy_data {
enum ef4_loopback_mode loopback_mode;
enum ef4_phy_mode phy_mode;
int bad_lp_tries;
};
static int tenxpress_init(struct ef4_nic *efx)
{
/* Enable 312.5 MHz clock */
ef4_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
1 << CLK312_EN_LBN);
/* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
1 << PMA_PMA_LED_ACTIVITY_LBN, true);
ef4_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
SFX7101_PMA_PMD_LED_DEFAULT);
return 0;
}
static int tenxpress_phy_probe(struct ef4_nic *efx)
{
struct tenxpress_phy_data *phy_data;
/* Allocate phy private storage */
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
phy_data->phy_mode = efx->phy_mode;
efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45;
efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
ADVERTISED_10000baseT_Full);
return 0;
}
static int tenxpress_phy_init(struct ef4_nic *efx)
{
int rc;
falcon_board(efx)->type->init_phy(efx);
if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
rc = ef4_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
return rc;
rc = ef4_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
return rc;
}
rc = tenxpress_init(efx);
if (rc < 0)
return rc;
/* Reinitialise flow control settings */
ef4_link_set_wanted_fc(efx, efx->wanted_fc);
ef4_mdio_an_reconfigure(efx);
schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
/* Let XGXS and SerDes out of reset */
falcon_reset_xaui(efx);
return 0;
}
/* Perform a "special software reset" on the PHY. The caller is
* responsible for saving and restoring the PHY hardware registers
* properly, and masking/unmasking LASI */
static int tenxpress_special_reset(struct ef4_nic *efx)
{
int rc, reg;
/* The XGMAC clock is driven from the SFX7101 312MHz clock, so
* a special software reset can glitch the XGMAC sufficiently for stats
* requests to fail. */
falcon_stop_nic_stats(efx);
/* Initiate reset */
reg = ef4_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
reg |= (1 << PMA_PMD_EXT_SSR_LBN);
ef4_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
mdelay(200);
/* Wait for the blocks to come out of reset */
rc = ef4_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
goto out;
/* Try and reconfigure the device */
rc = tenxpress_init(efx);
if (rc < 0)
goto out;
/* Wait for the XGXS state machine to churn */
mdelay(10);
out:
falcon_start_nic_stats(efx);
return rc;
}
static void sfx7101_check_bad_lp(struct ef4_nic *efx, bool link_ok)
{
struct tenxpress_phy_data *pd = efx->phy_data;
bool bad_lp;
int reg;
if (link_ok) {
bad_lp = false;
} else {
/* Check that AN has started but not completed. */
reg = ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_STAT1);
if (!(reg & MDIO_AN_STAT1_LPABLE))
return; /* LP status is unknown */
bad_lp = !(reg & MDIO_AN_STAT1_COMPLETE);
if (bad_lp)
pd->bad_lp_tries++;
}
/* Nothing to do if all is well and was previously so. */
if (!pd->bad_lp_tries)
return;
/* Use the RX (red) LED as an error indicator once we've seen AN
* failure several times in a row, and also log a message. */
if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
reg = ef4_mdio_read(efx, MDIO_MMD_PMAPMD,
PMA_PMD_LED_OVERR_REG);
reg &= ~(PMA_PMD_LED_MASK << PMA_PMD_LED_RX_LBN);
if (!bad_lp) {
reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN;
} else {
reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN;
netif_err(efx, link, efx->net_dev,
"appears to be plugged into a port"
" that is not 10GBASE-T capable. The PHY"
" supports 10GBASE-T ONLY, so no link can"
" be established\n");
}
ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
PMA_PMD_LED_OVERR_REG, reg);
pd->bad_lp_tries = bad_lp;
}
}
static bool sfx7101_link_ok(struct ef4_nic *efx)
{
return ef4_mdio_links_ok(efx,
MDIO_DEVS_PMAPMD |
MDIO_DEVS_PCS |
MDIO_DEVS_PHYXS);
}
static void tenxpress_ext_loopback(struct ef4_nic *efx)
{
ef4_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
1 << LOOPBACK_NEAR_LBN,
efx->loopback_mode == LOOPBACK_PHYXS);
}
static void tenxpress_low_power(struct ef4_nic *efx)
{
ef4_mdio_set_mmds_lpower(
efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
TENXPRESS_REQUIRED_DEVS);
}
static int tenxpress_phy_reconfigure(struct ef4_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
bool phy_mode_change, loop_reset;
if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
phy_data->phy_mode = efx->phy_mode;
return 0;
}
phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
phy_data->phy_mode != PHY_MODE_NORMAL);
loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, LOOPBACKS_EXTERNAL(efx)) ||
LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
if (loop_reset || phy_mode_change) {
tenxpress_special_reset(efx);
falcon_reset_xaui(efx);
}
tenxpress_low_power(efx);
ef4_mdio_transmit_disable(efx);
ef4_mdio_phy_reconfigure(efx);
tenxpress_ext_loopback(efx);
ef4_mdio_an_reconfigure(efx);
phy_data->loopback_mode = efx->loopback_mode;
phy_data->phy_mode = efx->phy_mode;
return 0;
}
static void
tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd);
/* Poll for link state changes */
static bool tenxpress_phy_poll(struct ef4_nic *efx)
{
struct ef4_link_state old_state = efx->link_state;
efx->link_state.up = sfx7101_link_ok(efx);
efx->link_state.speed = 10000;
efx->link_state.fd = true;
efx->link_state.fc = ef4_mdio_get_pause(efx);
sfx7101_check_bad_lp(efx, efx->link_state.up);
return !ef4_link_state_equal(&efx->link_state, &old_state);
}
static void sfx7101_phy_fini(struct ef4_nic *efx)
{
int reg;
/* Power down the LNPGA */
reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
ef4_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
/* Waiting here ensures that the board fini, which can turn
* off the power to the PHY, won't get run until the LNPGA
* powerdown has been given long enough to complete. */
schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
}
static void tenxpress_phy_remove(struct ef4_nic *efx)
{
kfree(efx->phy_data);
efx->phy_data = NULL;
}
/* Override the RX, TX and link LEDs */
void tenxpress_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
int reg;
switch (mode) {
case EF4_LED_OFF:
reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) |
(PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) |
(PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN);
break;
case EF4_LED_ON:
reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) |
(PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) |
(PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
break;
default:
reg = SFX7101_PMA_PMD_LED_DEFAULT;
break;
}
ef4_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
}
static const char *const sfx7101_test_names[] = {
"bist"
};
static const char *sfx7101_test_name(struct ef4_nic *efx, unsigned int index)
{
if (index < ARRAY_SIZE(sfx7101_test_names))
return sfx7101_test_names[index];
return NULL;
}
static int
sfx7101_run_tests(struct ef4_nic *efx, int *results, unsigned flags)
{
int rc;
if (!(flags & ETH_TEST_FL_OFFLINE))
return 0;
/* BIST is automatically run after a special software reset */
rc = tenxpress_special_reset(efx);
results[0] = rc ? -1 : 1;
ef4_mdio_an_reconfigure(efx);
return rc;
}
static void
tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
u32 adv = 0, lpa = 0;
int reg;
reg = ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
if (reg & MDIO_AN_10GBT_CTRL_ADV10G)
adv |= ADVERTISED_10000baseT_Full;
reg = ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
if (reg & MDIO_AN_10GBT_STAT_LP10G)
lpa |= ADVERTISED_10000baseT_Full;
mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
/* In loopback, the PHY automatically brings up the correct interface,
* but doesn't advertise the correct speed. So override it */
if (LOOPBACK_EXTERNAL(efx))
ethtool_cmd_speed_set(ecmd, SPEED_10000);
}
static int tenxpress_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
if (!ecmd->autoneg)
return -EINVAL;
return ef4_mdio_set_settings(efx, ecmd);
}
static void sfx7101_set_npage_adv(struct ef4_nic *efx, u32 advertising)
{
ef4_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
MDIO_AN_10GBT_CTRL_ADV10G,
advertising & ADVERTISED_10000baseT_Full);
}
const struct ef4_phy_operations falcon_sfx7101_phy_ops = {
.probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
.poll = tenxpress_phy_poll,
.fini = sfx7101_phy_fini,
.remove = tenxpress_phy_remove,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
.test_alive = ef4_mdio_test_alive,
.test_name = sfx7101_test_name,
.run_tests = sfx7101_run_tests,
};

View File

@@ -0,0 +1,649 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/pci.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <net/ipv6.h>
#include <linux/if_ether.h>
#include <linux/highmem.h>
#include <linux/cache.h>
#include "net_driver.h"
#include "efx.h"
#include "io.h"
#include "nic.h"
#include "tx.h"
#include "workarounds.h"
static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer)
{
unsigned int index = ef4_tx_queue_get_insert_index(tx_queue);
struct ef4_buffer *page_buf =
&tx_queue->cb_page[index >> (PAGE_SHIFT - EF4_TX_CB_ORDER)];
unsigned int offset =
((index << EF4_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
if (unlikely(!page_buf->addr) &&
ef4_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
GFP_ATOMIC))
return NULL;
buffer->dma_addr = page_buf->dma_addr + offset;
buffer->unmap_len = 0;
return (u8 *)page_buf->addr + offset;
}
u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer, size_t len)
{
if (len > EF4_TX_CB_SIZE)
return NULL;
return ef4_tx_get_copy_buffer(tx_queue, buffer);
}
static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer,
unsigned int *pkts_compl,
unsigned int *bytes_compl)
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
if (buffer->flags & EF4_TX_BUF_MAP_SINGLE)
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
else
dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
buffer->unmap_len = 0;
}
if (buffer->flags & EF4_TX_BUF_SKB) {
(*pkts_compl)++;
(*bytes_compl) += buffer->skb->len;
dev_consume_skb_any((struct sk_buff *)buffer->skb);
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n",
tx_queue->queue, tx_queue->read_count);
}
buffer->len = 0;
buffer->flags = 0;
}
unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx)
{
/* This is probably too much since we don't have any TSO support;
* it's a left-over from when we had Software TSO. But it's safer
* to leave it as-is than try to determine a new bound.
*/
/* Header and payload descriptor for each output segment, plus
* one for every input fragment boundary within a segment
*/
unsigned int max_descs = EF4_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
/* Possibly one more per segment for the alignment workaround,
* or for option descriptors
*/
if (EF4_WORKAROUND_5391(efx))
max_descs += EF4_TSO_MAX_SEGS;
/* Possibly more for PCIe page boundaries within input fragments */
if (PAGE_SIZE > EF4_PAGE_SIZE)
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
DIV_ROUND_UP(GSO_MAX_SIZE, EF4_PAGE_SIZE));
return max_descs;
}
static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
{
/* We need to consider both queues that the net core sees as one */
struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(txq1);
struct ef4_nic *efx = txq1->efx;
unsigned int fill_level;
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
if (likely(fill_level < efx->txq_stop_thresh))
return;
/* We used the stale old_read_count above, which gives us a
* pessimistic estimate of the fill level (which may even
* validly be >= efx->txq_entries). Now try again using
* read_count (more likely to be a cache miss).
*
* If we read read_count and then conditionally stop the
* queue, it is possible for the completion path to race with
* us and complete all outstanding descriptors in the middle,
* after which there will be no more completions to wake it.
* Therefore we stop the queue first, then read read_count
* (with a memory barrier to ensure the ordering), then
* restart the queue if the fill level turns out to be low
* enough.
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
EF4_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
if (likely(fill_level < efx->txq_stop_thresh)) {
smp_mb();
if (likely(!efx->loopback_selftest))
netif_tx_start_queue(txq1->core_txq);
}
}
static int ef4_enqueue_skb_copy(struct ef4_tx_queue *tx_queue,
struct sk_buff *skb)
{
unsigned int min_len = tx_queue->tx_min_size;
unsigned int copy_len = skb->len;
struct ef4_tx_buffer *buffer;
u8 *copy_buffer;
int rc;
EF4_BUG_ON_PARANOID(copy_len > EF4_TX_CB_SIZE);
buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
copy_buffer = ef4_tx_get_copy_buffer(tx_queue, buffer);
if (unlikely(!copy_buffer))
return -ENOMEM;
rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
EF4_WARN_ON_PARANOID(rc);
if (unlikely(copy_len < min_len)) {
memset(copy_buffer + copy_len, 0, min_len - copy_len);
buffer->len = min_len;
} else {
buffer->len = copy_len;
}
buffer->skb = skb;
buffer->flags = EF4_TX_BUF_SKB;
++tx_queue->insert_count;
return rc;
}
static struct ef4_tx_buffer *ef4_tx_map_chunk(struct ef4_tx_queue *tx_queue,
dma_addr_t dma_addr,
size_t len)
{
const struct ef4_nic_type *nic_type = tx_queue->efx->type;
struct ef4_tx_buffer *buffer;
unsigned int dma_len;
/* Map the fragment taking account of NIC-dependent DMA limits. */
do {
buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
buffer->len = dma_len;
buffer->dma_addr = dma_addr;
buffer->flags = EF4_TX_BUF_CONT;
len -= dma_len;
dma_addr += dma_len;
++tx_queue->insert_count;
} while (len);
return buffer;
}
/* Map all data from an SKB for DMA and create descriptors on the queue.
*/
static int ef4_tx_map_data(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
{
struct ef4_nic *efx = tx_queue->efx;
struct device *dma_dev = &efx->pci_dev->dev;
unsigned int frag_index, nr_frags;
dma_addr_t dma_addr, unmap_addr;
unsigned short dma_flags;
size_t len, unmap_len;
nr_frags = skb_shinfo(skb)->nr_frags;
frag_index = 0;
/* Map header data. */
len = skb_headlen(skb);
dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
dma_flags = EF4_TX_BUF_MAP_SINGLE;
unmap_len = len;
unmap_addr = dma_addr;
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
return -EIO;
/* Add descriptors for each fragment. */
do {
struct ef4_tx_buffer *buffer;
skb_frag_t *fragment;
buffer = ef4_tx_map_chunk(tx_queue, dma_addr, len);
/* The final descriptor for a fragment is responsible for
* unmapping the whole fragment.
*/
buffer->flags = EF4_TX_BUF_CONT | dma_flags;
buffer->unmap_len = unmap_len;
buffer->dma_offset = buffer->dma_addr - unmap_addr;
if (frag_index >= nr_frags) {
/* Store SKB details with the final buffer for
* the completion.
*/
buffer->skb = skb;
buffer->flags = EF4_TX_BUF_SKB | dma_flags;
return 0;
}
/* Move on to the next fragment. */
fragment = &skb_shinfo(skb)->frags[frag_index++];
len = skb_frag_size(fragment);
dma_addr = skb_frag_dma_map(dma_dev, fragment,
0, len, DMA_TO_DEVICE);
dma_flags = 0;
unmap_len = len;
unmap_addr = dma_addr;
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
return -EIO;
} while (1);
}
/* Remove buffers put into a tx_queue. None of the buffers must have
* an skb attached.
*/
static void ef4_enqueue_unwind(struct ef4_tx_queue *tx_queue)
{
struct ef4_tx_buffer *buffer;
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
--tx_queue->insert_count;
buffer = __ef4_tx_queue_get_insert_buffer(tx_queue);
ef4_dequeue_buffer(tx_queue, buffer, NULL, NULL);
}
}
/*
* Add a socket buffer to a TX queue
*
* This maps all fragments of a socket buffer for DMA and adds them to
* the TX queue. The queue's insert pointer will be incremented by
* the number of fragments in the socket buffer.
*
* If any DMA mapping fails, any mapped fragments will be unmapped,
* the queue's insert pointer will be restored to its original value.
*
* This function is split out from ef4_hard_start_xmit to allow the
* loopback test to direct packets via specific TX queues.
*
* Returns NETDEV_TX_OK.
* You must hold netif_tx_lock() to call this function.
*/
netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
{
bool data_mapped = false;
unsigned int skb_len;
skb_len = skb->len;
EF4_WARN_ON_PARANOID(skb_is_gso(skb));
if (skb_len < tx_queue->tx_min_size ||
(skb->data_len && skb_len <= EF4_TX_CB_SIZE)) {
/* Pad short packets or coalesce short fragmented packets. */
if (ef4_enqueue_skb_copy(tx_queue, skb))
goto err;
tx_queue->cb_packets++;
data_mapped = true;
}
/* Map for DMA and create descriptors if we haven't done so already. */
if (!data_mapped && (ef4_tx_map_data(tx_queue, skb)))
goto err;
/* Update BQL */
netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
/* Pass off to hardware */
if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
* SKBs had skb->xmit_more set. If we do not push those they
* could be left for a long time and cause a netdev watchdog.
*/
if (txq2->xmit_more_available)
ef4_nic_push_buffers(txq2);
ef4_nic_push_buffers(tx_queue);
} else {
tx_queue->xmit_more_available = skb->xmit_more;
}
tx_queue->tx_packets++;
ef4_tx_maybe_stop_queue(tx_queue);
return NETDEV_TX_OK;
err:
ef4_enqueue_unwind(tx_queue);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* Remove packets from the TX queue
*
* This removes packets from the TX queue, up to and including the
* specified index.
*/
static void ef4_dequeue_buffers(struct ef4_tx_queue *tx_queue,
unsigned int index,
unsigned int *pkts_compl,
unsigned int *bytes_compl)
{
struct ef4_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
stop_index = (index + 1) & tx_queue->ptr_mask;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
while (read_ptr != stop_index) {
struct ef4_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
if (!(buffer->flags & EF4_TX_BUF_OPTION) &&
unlikely(buffer->len == 0)) {
netif_err(efx, tx_err, efx->net_dev,
"TX queue %d spurious TX completion id %x\n",
tx_queue->queue, read_ptr);
ef4_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return;
}
ef4_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
}
}
/* Initiate a packet transmission. We use one channel per CPU
* (sharing when we have more CPUs than channels). On Falcon, the TX
* completion events will be directed back to the CPU that transmitted
* the packet, which should be cache-efficient.
*
* Context: non-blocking.
* Note that returning anything other than NETDEV_TX_OK will cause the
* OS to free the skb.
*/
netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
struct ef4_nic *efx = netdev_priv(net_dev);
struct ef4_tx_queue *tx_queue;
unsigned index, type;
EF4_WARN_ON_PARANOID(!netif_device_present(net_dev));
index = skb_get_queue_mapping(skb);
type = skb->ip_summed == CHECKSUM_PARTIAL ? EF4_TXQ_TYPE_OFFLOAD : 0;
if (index >= efx->n_tx_channels) {
index -= efx->n_tx_channels;
type |= EF4_TXQ_TYPE_HIGHPRI;
}
tx_queue = ef4_get_tx_queue(efx, index, type);
return ef4_enqueue_skb(tx_queue, skb);
}
void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue)
{
struct ef4_nic *efx = tx_queue->efx;
/* Must be inverse of queue lookup in ef4_hard_start_xmit() */
tx_queue->core_txq =
netdev_get_tx_queue(efx->net_dev,
tx_queue->queue / EF4_TXQ_TYPES +
((tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
efx->n_tx_channels : 0));
}
int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
struct tc_to_netdev *ntc)
{
struct ef4_nic *efx = netdev_priv(net_dev);
struct ef4_channel *channel;
struct ef4_tx_queue *tx_queue;
unsigned tc, num_tc;
int rc;
if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
num_tc = ntc->tc;
if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC)
return -EINVAL;
if (num_tc == net_dev->num_tc)
return 0;
for (tc = 0; tc < num_tc; tc++) {
net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
}
if (num_tc > net_dev->num_tc) {
/* Initialise high-priority queues as necessary */
ef4_for_each_channel(channel, efx) {
ef4_for_each_possible_channel_tx_queue(tx_queue,
channel) {
if (!(tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI))
continue;
if (!tx_queue->buffer) {
rc = ef4_probe_tx_queue(tx_queue);
if (rc)
return rc;
}
if (!tx_queue->initialised)
ef4_init_tx_queue(tx_queue);
ef4_init_tx_queue_core_txq(tx_queue);
}
}
} else {
/* Reduce number of classes before number of queues */
net_dev->num_tc = num_tc;
}
rc = netif_set_real_num_tx_queues(net_dev,
max_t(int, num_tc, 1) *
efx->n_tx_channels);
if (rc)
return rc;
/* Do not destroy high-priority queues when they become
* unused. We would have to flush them first, and it is
* fairly difficult to flush a subset of TX queues. Leave
* it to ef4_fini_channels().
*/
net_dev->num_tc = num_tc;
return 0;
}
void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
struct ef4_nic *efx = tx_queue->efx;
struct ef4_tx_queue *txq2;
unsigned int pkts_compl = 0, bytes_compl = 0;
EF4_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
ef4_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
if (pkts_compl > 1)
++tx_queue->merge_events;
/* See if we need to restart the netif queue. This memory
* barrier ensures that we write read_count (inside
* ef4_dequeue_buffers()) before reading the queue status.
*/
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled) &&
likely(netif_device_present(efx->net_dev))) {
txq2 = ef4_tx_queue_partner(tx_queue);
fill_level = max(tx_queue->insert_count - tx_queue->read_count,
txq2->insert_count - txq2->read_count);
if (fill_level <= efx->txq_wake_thresh)
netif_tx_wake_queue(tx_queue->core_txq);
}
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
tx_queue->read_count | EF4_EMPTY_COUNT_VALID;
}
}
}
static unsigned int ef4_tx_cb_page_count(struct ef4_tx_queue *tx_queue)
{
return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EF4_TX_CB_ORDER);
}
int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue)
{
struct ef4_nic *efx = tx_queue->efx;
unsigned int entries;
int rc;
/* Create the smallest power-of-two aligned ring */
entries = max(roundup_pow_of_two(efx->txq_entries), EF4_MIN_DMAQ_SIZE);
EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
tx_queue->ptr_mask = entries - 1;
netif_dbg(efx, probe, efx->net_dev,
"creating TX queue %d size %#x mask %#x\n",
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
/* Allocate software ring */
tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
GFP_KERNEL);
if (!tx_queue->buffer)
return -ENOMEM;
tx_queue->cb_page = kcalloc(ef4_tx_cb_page_count(tx_queue),
sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
if (!tx_queue->cb_page) {
rc = -ENOMEM;
goto fail1;
}
/* Allocate hardware ring */
rc = ef4_nic_probe_tx(tx_queue);
if (rc)
goto fail2;
return 0;
fail2:
kfree(tx_queue->cb_page);
tx_queue->cb_page = NULL;
fail1:
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
return rc;
}
void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue)
{
struct ef4_nic *efx = tx_queue->efx;
netif_dbg(efx, drv, efx->net_dev,
"initialising TX queue %d\n", tx_queue->queue);
tx_queue->insert_count = 0;
tx_queue->write_count = 0;
tx_queue->old_write_count = 0;
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
tx_queue->empty_read_count = 0 | EF4_EMPTY_COUNT_VALID;
tx_queue->xmit_more_available = false;
/* Some older hardware requires Tx writes larger than 32. */
tx_queue->tx_min_size = EF4_WORKAROUND_15592(efx) ? 33 : 0;
/* Set up TX descriptor ring */
ef4_nic_init_tx(tx_queue);
tx_queue->initialised = true;
}
void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue)
{
struct ef4_tx_buffer *buffer;
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue);
if (!tx_queue->buffer)
return;
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
ef4_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
++tx_queue->read_count;
}
tx_queue->xmit_more_available = false;
netdev_tx_reset_queue(tx_queue->core_txq);
}
void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue)
{
int i;
if (!tx_queue->buffer)
return;
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"destroying TX queue %d\n", tx_queue->queue);
ef4_nic_remove_tx(tx_queue);
if (tx_queue->cb_page) {
for (i = 0; i < ef4_tx_cb_page_count(tx_queue); i++)
ef4_nic_free_buffer(tx_queue->efx,
&tx_queue->cb_page[i]);
kfree(tx_queue->cb_page);
tx_queue->cb_page = NULL;
}
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
}

View File

@@ -0,0 +1,27 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2015 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_TX_H
#define EF4_TX_H
#include <linux/types.h>
/* Driver internal tx-path related declarations. */
unsigned int ef4_tx_limit_len(struct ef4_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer, size_t len);
int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);
#endif /* EF4_TX_H */

View File

@@ -0,0 +1,560 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
/*
* Driver for Transwitch/Mysticom CX4 retimer
* see www.transwitch.com, part is TXC-43128
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include "efx.h"
#include "mdio_10g.h"
#include "phy.h"
#include "nic.h"
/* We expect these MMDs to be in the package */
#define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \
MDIO_DEVS_PMAPMD | \
MDIO_DEVS_PHYXS)
#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \
(1 << LOOPBACK_PMAPMD) | \
(1 << LOOPBACK_PHYXS_WS))
/**************************************************************************
*
* Compile-time config
*
**************************************************************************
*/
#define TXCNAME "TXC43128"
/* Total length of time we'll wait for the PHY to come out of reset (ms) */
#define TXC_MAX_RESET_TIME 500
/* Interval between checks (ms) */
#define TXC_RESET_WAIT 10
/* How long to run BIST (us) */
#define TXC_BIST_DURATION 50
/**************************************************************************
*
* Register definitions
*
**************************************************************************
*/
/* Command register */
#define TXC_GLRGS_GLCMD 0xc004
/* Useful bits in command register */
/* Lane power-down */
#define TXC_GLCMD_L01PD_LBN 5
#define TXC_GLCMD_L23PD_LBN 6
/* Limited SW reset: preserves configuration but
* initiates a logic reset. Self-clearing */
#define TXC_GLCMD_LMTSWRST_LBN 14
/* Signal Quality Control */
#define TXC_GLRGS_GSGQLCTL 0xc01a
/* Enable bit */
#define TXC_GSGQLCT_SGQLEN_LBN 15
/* Lane selection */
#define TXC_GSGQLCT_LNSL_LBN 13
#define TXC_GSGQLCT_LNSL_WIDTH 2
/* Analog TX control */
#define TXC_ALRGS_ATXCTL 0xc040
/* Lane power-down */
#define TXC_ATXCTL_TXPD3_LBN 15
#define TXC_ATXCTL_TXPD2_LBN 14
#define TXC_ATXCTL_TXPD1_LBN 13
#define TXC_ATXCTL_TXPD0_LBN 12
/* Amplitude on lanes 0, 1 */
#define TXC_ALRGS_ATXAMP0 0xc041
/* Amplitude on lanes 2, 3 */
#define TXC_ALRGS_ATXAMP1 0xc042
/* Bit position of value for lane 0 (or 2) */
#define TXC_ATXAMP_LANE02_LBN 3
/* Bit position of value for lane 1 (or 3) */
#define TXC_ATXAMP_LANE13_LBN 11
#define TXC_ATXAMP_1280_mV 0
#define TXC_ATXAMP_1200_mV 8
#define TXC_ATXAMP_1120_mV 12
#define TXC_ATXAMP_1060_mV 14
#define TXC_ATXAMP_0820_mV 25
#define TXC_ATXAMP_0720_mV 26
#define TXC_ATXAMP_0580_mV 27
#define TXC_ATXAMP_0440_mV 28
#define TXC_ATXAMP_0820_BOTH \
((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \
| (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN))
#define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */
/* Preemphasis on lanes 0, 1 */
#define TXC_ALRGS_ATXPRE0 0xc043
/* Preemphasis on lanes 2, 3 */
#define TXC_ALRGS_ATXPRE1 0xc044
#define TXC_ATXPRE_NONE 0
#define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */
#define TXC_ALRGS_ARXCTL 0xc045
/* Lane power-down */
#define TXC_ARXCTL_RXPD3_LBN 15
#define TXC_ARXCTL_RXPD2_LBN 14
#define TXC_ARXCTL_RXPD1_LBN 13
#define TXC_ARXCTL_RXPD0_LBN 12
/* Main control */
#define TXC_MRGS_CTL 0xc340
/* Bits in main control */
#define TXC_MCTL_RESET_LBN 15 /* Self clear */
#define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */
#define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */
/* GPIO output */
#define TXC_GPIO_OUTPUT 0xc346
#define TXC_GPIO_DIR 0xc348
/* Vendor-specific BIST registers */
#define TXC_BIST_CTL 0xc280
#define TXC_BIST_TXFRMCNT 0xc281
#define TXC_BIST_RX0FRMCNT 0xc282
#define TXC_BIST_RX1FRMCNT 0xc283
#define TXC_BIST_RX2FRMCNT 0xc284
#define TXC_BIST_RX3FRMCNT 0xc285
#define TXC_BIST_RX0ERRCNT 0xc286
#define TXC_BIST_RX1ERRCNT 0xc287
#define TXC_BIST_RX2ERRCNT 0xc288
#define TXC_BIST_RX3ERRCNT 0xc289
/* BIST type (controls bit patter in test) */
#define TXC_BIST_CTRL_TYPE_LBN 10
#define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */
#define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */
#define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */
#define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */
/* Set this to 1 for 10 bit and 0 for 8 bit */
#define TXC_BIST_CTRL_B10EN_LBN 12
/* Enable BIST (write 0 to disable) */
#define TXC_BIST_CTRL_ENAB_LBN 13
/* Stop BIST (self-clears when stop complete) */
#define TXC_BIST_CTRL_STOP_LBN 14
/* Start BIST (cleared by writing 1 to STOP) */
#define TXC_BIST_CTRL_STRT_LBN 15
/* Mt. Diablo test configuration */
#define TXC_MTDIABLO_CTRL 0xc34f
#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10
struct txc43128_data {
unsigned long bug10934_timer;
enum ef4_phy_mode phy_mode;
enum ef4_loopback_mode loopback_mode;
};
/* The PHY sometimes needs a reset to bring the link back up. So long as
* it reports link down, we reset it every 5 seconds.
*/
#define BUG10934_RESET_INTERVAL (5 * HZ)
/* Perform a reset that doesn't clear configuration changes */
static void txc_reset_logic(struct ef4_nic *efx);
/* Set the output value of a gpio */
void falcon_txc_set_gpio_val(struct ef4_nic *efx, int pin, int on)
{
ef4_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
}
/* Set up the GPIO direction register */
void falcon_txc_set_gpio_dir(struct ef4_nic *efx, int pin, int dir)
{
ef4_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
}
/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
* global reset (it's less clear what reset of other MMDs does).*/
static int txc_reset_phy(struct ef4_nic *efx)
{
int rc = ef4_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
TXC_RESET_WAIT);
if (rc < 0)
goto fail;
/* Check that all the MMDs we expect are present and responding. */
rc = ef4_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
if (rc < 0)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n");
return rc;
}
/* Run a single BIST on one MMD */
static int txc_bist_one(struct ef4_nic *efx, int mmd, int test)
{
int ctrl, bctl;
int lane;
int rc = 0;
/* Set PMA to test into loopback using Mt Diablo reg as per app note */
ctrl = ef4_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
ef4_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
/* The BIST app. note lists these as 3 distinct steps. */
/* Set the BIST type */
bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
ef4_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* Set the BSTEN bit in the BIST Control register to enable */
bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
ef4_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* Set the BSTRT bit in the BIST Control register */
ef4_mdio_write(efx, mmd, TXC_BIST_CTL,
bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
/* Wait. */
udelay(TXC_BIST_DURATION);
/* Set the BSTOP bit in the BIST Control register */
bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
ef4_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* The STOP bit should go off when things have stopped */
while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
bctl = ef4_mdio_read(efx, mmd, TXC_BIST_CTL);
/* Check all the error counts are 0 and all the frame counts are
non-zero */
for (lane = 0; lane < 4; lane++) {
int count = ef4_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
if (count != 0) {
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
"Lane %d had %d errs\n", lane, count);
rc = -EIO;
}
count = ef4_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
if (count == 0) {
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
"Lane %d got 0 frames\n", lane);
rc = -EIO;
}
}
if (rc == 0)
netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
/* Disable BIST */
ef4_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
/* Turn off loopback */
ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
ef4_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
return rc;
}
static int txc_bist(struct ef4_nic *efx)
{
return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
}
/* Push the non-configurable defaults into the PHY. This must be
* done after every full reset */
static void txc_apply_defaults(struct ef4_nic *efx)
{
int mctrl;
/* Turn amplitude down and preemphasis off on the host side
* (PHY<->MAC) as this is believed less likely to upset Falcon
* and no adverse effects have been noted. It probably also
* saves a picowatt or two */
/* Turn off preemphasis */
ef4_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
ef4_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
/* Turn down the amplitude */
ef4_mdio_write(efx, MDIO_MMD_PHYXS,
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
ef4_mdio_write(efx, MDIO_MMD_PHYXS,
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
/* Set the line side amplitude and preemphasis to the databook
* defaults as an erratum causes them to be 0 on at least some
* PHY rev.s */
ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
/* Set up the LEDs */
mctrl = ef4_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
/* Set the Green and Red LEDs to their default modes */
mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
ef4_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
/* Databook recommends doing this after configuration changes */
txc_reset_logic(efx);
falcon_board(efx)->type->init_phy(efx);
}
static int txc43128_phy_probe(struct ef4_nic *efx)
{
struct txc43128_data *phy_data;
/* Allocate phy private storage */
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
phy_data->phy_mode = efx->phy_mode;
efx->mdio.mmds = TXC_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
return 0;
}
/* Initialisation entry point for this PHY driver */
static int txc43128_phy_init(struct ef4_nic *efx)
{
int rc;
rc = txc_reset_phy(efx);
if (rc < 0)
return rc;
rc = txc_bist(efx);
if (rc < 0)
return rc;
txc_apply_defaults(efx);
return 0;
}
/* Set the lane power down state in the global registers */
static void txc_glrgs_lane_power(struct ef4_nic *efx, int mmd)
{
int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
int ctl = ef4_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
ctl &= ~pd;
else
ctl |= pd;
ef4_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
}
/* Set the lane power down state in the analog control registers */
static void txc_analog_lane_power(struct ef4_nic *efx, int mmd)
{
int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
| (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
| (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
int txctl = ef4_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
int rxctl = ef4_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
txctl &= ~txpd;
rxctl &= ~rxpd;
} else {
txctl |= txpd;
rxctl |= rxpd;
}
ef4_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
ef4_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
}
static void txc_set_power(struct ef4_nic *efx)
{
/* According to the data book, all the MMDs can do low power */
ef4_mdio_set_mmds_lpower(efx,
!!(efx->phy_mode & PHY_MODE_LOW_POWER),
TXC_REQUIRED_DEVS);
/* Global register bank is in PCS, PHY XS. These control the host
* side and line side settings respectively. */
txc_glrgs_lane_power(efx, MDIO_MMD_PCS);
txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS);
/* Analog register bank in PMA/PMD, PHY XS */
txc_analog_lane_power(efx, MDIO_MMD_PMAPMD);
txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
}
static void txc_reset_logic_mmd(struct ef4_nic *efx, int mmd)
{
int val = ef4_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
int tries = 50;
val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
ef4_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
while (--tries) {
val = ef4_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
break;
udelay(1);
}
if (!tries)
netif_info(efx, hw, efx->net_dev,
TXCNAME " Logic reset timed out!\n");
}
/* Perform a logic reset. This preserves the configuration registers
* and is needed for some configuration changes to take effect */
static void txc_reset_logic(struct ef4_nic *efx)
{
/* The data sheet claims we can do the logic reset on either the
* PCS or the PHYXS and the result is a reset of both host- and
* line-side logic. */
txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
}
static bool txc43128_phy_read_link(struct ef4_nic *efx)
{
return ef4_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
}
static int txc43128_phy_reconfigure(struct ef4_nic *efx)
{
struct txc43128_data *phy_data = efx->phy_data;
enum ef4_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
txc_reset_phy(efx);
txc_apply_defaults(efx);
falcon_reset_xaui(efx);
mode_change &= ~PHY_MODE_TX_DISABLED;
}
ef4_mdio_transmit_disable(efx);
ef4_mdio_phy_reconfigure(efx);
if (mode_change & PHY_MODE_LOW_POWER)
txc_set_power(efx);
/* The data sheet claims this is required after every reconfiguration
* (note at end of 7.1), but we mustn't do it when nothing changes as
* it glitches the link, and reconfigure gets called on link change,
* so we get an IRQ storm on link up. */
if (loop_change || mode_change)
txc_reset_logic(efx);
phy_data->phy_mode = efx->phy_mode;
phy_data->loopback_mode = efx->loopback_mode;
return 0;
}
static void txc43128_phy_fini(struct ef4_nic *efx)
{
/* Disable link events */
ef4_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
}
static void txc43128_phy_remove(struct ef4_nic *efx)
{
kfree(efx->phy_data);
efx->phy_data = NULL;
}
/* Periodic callback: this exists mainly to poll link status as we
* don't use LASI interrupts */
static bool txc43128_phy_poll(struct ef4_nic *efx)
{
struct txc43128_data *data = efx->phy_data;
bool was_up = efx->link_state.up;
efx->link_state.up = txc43128_phy_read_link(efx);
efx->link_state.speed = 10000;
efx->link_state.fd = true;
efx->link_state.fc = efx->wanted_fc;
if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) {
data->bug10934_timer = jiffies;
} else {
if (time_after_eq(jiffies, (data->bug10934_timer +
BUG10934_RESET_INTERVAL))) {
data->bug10934_timer = jiffies;
txc_reset_logic(efx);
}
}
return efx->link_state.up != was_up;
}
static const char *const txc43128_test_names[] = {
"bist"
};
static const char *txc43128_test_name(struct ef4_nic *efx, unsigned int index)
{
if (index < ARRAY_SIZE(txc43128_test_names))
return txc43128_test_names[index];
return NULL;
}
static int txc43128_run_tests(struct ef4_nic *efx, int *results, unsigned flags)
{
int rc;
if (!(flags & ETH_TEST_FL_OFFLINE))
return 0;
rc = txc_reset_phy(efx);
if (rc < 0)
return rc;
rc = txc_bist(efx);
txc_apply_defaults(efx);
results[0] = rc ? -1 : 1;
return rc;
}
static void txc43128_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
const struct ef4_phy_operations falcon_txc_phy_ops = {
.probe = txc43128_phy_probe,
.init = txc43128_phy_init,
.reconfigure = txc43128_phy_reconfigure,
.poll = txc43128_phy_poll,
.fini = txc43128_phy_fini,
.remove = txc43128_phy_remove,
.get_settings = txc43128_get_settings,
.set_settings = ef4_mdio_set_settings,
.test_alive = ef4_mdio_test_alive,
.run_tests = txc43128_run_tests,
.test_name = txc43128_test_name,
};

View File

@@ -0,0 +1,44 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_WORKAROUNDS_H
#define EF4_WORKAROUNDS_H
/*
* Hardware workarounds.
* Bug numbers are from Solarflare's Bugzilla.
*/
#define EF4_WORKAROUND_FALCON_A(efx) (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1)
#define EF4_WORKAROUND_FALCON_AB(efx) (ef4_nic_rev(efx) <= EF4_REV_FALCON_B0)
#define EF4_WORKAROUND_10G(efx) 1
/* Bit-bashed I2C reads cause performance drop */
#define EF4_WORKAROUND_7884 EF4_WORKAROUND_10G
/* Truncated IPv4 packets can confuse the TX packet parser */
#define EF4_WORKAROUND_15592 EF4_WORKAROUND_FALCON_AB
/* Spurious parity errors in TSORT buffers */
#define EF4_WORKAROUND_5129 EF4_WORKAROUND_FALCON_A
/* Unaligned read request >512 bytes after aligning may break TSORT */
#define EF4_WORKAROUND_5391 EF4_WORKAROUND_FALCON_A
/* iSCSI parsing errors */
#define EF4_WORKAROUND_5583 EF4_WORKAROUND_FALCON_A
/* RX events go missing */
#define EF4_WORKAROUND_5676 EF4_WORKAROUND_FALCON_A
/* RX_RESET on A1 */
#define EF4_WORKAROUND_6555 EF4_WORKAROUND_FALCON_A
/* Increase filter depth to avoid RX_RESET */
#define EF4_WORKAROUND_7244 EF4_WORKAROUND_FALCON_A
/* Flushes may never complete */
#define EF4_WORKAROUND_7803 EF4_WORKAROUND_FALCON_AB
/* Leak overlength packets rather than free */
#define EF4_WORKAROUND_8071 EF4_WORKAROUND_FALCON_A
#endif /* EF4_WORKAROUNDS_H */