sfc: Move the Solarflare drivers

Moves the Solarflare drivers into drivers/net/ethernet/sfc/ and
make the necessary Kconfig and Makefile changes.

CC: Steve Hodgson <shodgson@solarflare.com>
CC: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Jeff Kirsher
2011-05-13 00:17:42 -07:00
parent e689cf4a04
commit 874aeea5d0
42 changed files with 5 additions and 6 deletions

View File

@@ -0,0 +1,21 @@
config SFC
tristate "Solarflare SFC4000/SFC9000-family support"
depends on PCI && INET
select MDIO
select CRC32
select I2C
select I2C_ALGOBIT
---help---
This driver supports 10-gigabit Ethernet cards based on
the Solarflare SFC4000 and SFC9000-family controllers.
To compile this driver as a module, choose M here. The module
will be called sfc.
config SFC_MTD
bool "Solarflare SFC4000/SFC9000-family MTD support"
depends on SFC && MTD && !(SFC=y && MTD=m)
default y
---help---
This exposes the on-board flash memory as MTD devices (e.g.
/dev/mtd1). This makes it possible to upload new firmware
to the NIC.

View File

@@ -0,0 +1,8 @@
sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
falcon_xmac.o mcdi_mac.o \
selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
mcdi.o mcdi_phy.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
obj-$(CONFIG_SFC) += sfc.o

View File

@@ -0,0 +1,538 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2009 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_BITFIELD_H
#define EFX_BITFIELD_H
/*
* Efx bitfield access
*
* Efx NICs make extensive use of bitfields up to 128 bits
* wide. Since there is no native 128-bit datatype on most systems,
* and since 64-bit datatypes are inefficient on 32-bit systems and
* vice versa, we wrap accesses in a way that uses the most efficient
* datatype.
*
* The NICs are PCI devices and therefore little-endian. Since most
* of the quantities that we deal with are DMAed to/from host memory,
* we define our datatypes (efx_oword_t, efx_qword_t and
* efx_dword_t) to be little-endian.
*/
/* Lowest bit numbers and widths */
#define EFX_DUMMY_FIELD_LBN 0
#define EFX_DUMMY_FIELD_WIDTH 0
#define EFX_DWORD_0_LBN 0
#define EFX_DWORD_0_WIDTH 32
#define EFX_DWORD_1_LBN 32
#define EFX_DWORD_1_WIDTH 32
#define EFX_DWORD_2_LBN 64
#define EFX_DWORD_2_WIDTH 32
#define EFX_DWORD_3_LBN 96
#define EFX_DWORD_3_WIDTH 32
#define EFX_QWORD_0_LBN 0
#define EFX_QWORD_0_WIDTH 64
/* Specified attribute (e.g. LBN) of the specified field */
#define EFX_VAL(field, attribute) field ## _ ## attribute
/* Low bit number of the specified field */
#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
/* Bit width of the specified field */
#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
/* High bit number of the specified field */
#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
/* Mask equal in width to the specified field.
*
* For example, a field with width 5 would have a mask of 0x1f.
*
* The maximum width mask that can be generated is 64 bits.
*/
#define EFX_MASK64(width) \
((width) == 64 ? ~((u64) 0) : \
(((((u64) 1) << (width))) - 1))
/* Mask equal in width to the specified field.
*
* For example, a field with width 5 would have a mask of 0x1f.
*
* The maximum width mask that can be generated is 32 bits. Use
* EFX_MASK64 for higher width fields.
*/
#define EFX_MASK32(width) \
((width) == 32 ? ~((u32) 0) : \
(((((u32) 1) << (width))) - 1))
/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
typedef union efx_dword {
__le32 u32[1];
} efx_dword_t;
/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
typedef union efx_qword {
__le64 u64[1];
__le32 u32[2];
efx_dword_t dword[2];
} efx_qword_t;
/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
typedef union efx_oword {
__le64 u64[2];
efx_qword_t qword[2];
__le32 u32[4];
efx_dword_t dword[4];
} efx_oword_t;
/* Format string and value expanders for printk */
#define EFX_DWORD_FMT "%08x"
#define EFX_QWORD_FMT "%08x:%08x"
#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
#define EFX_DWORD_VAL(dword) \
((unsigned int) le32_to_cpu((dword).u32[0]))
#define EFX_QWORD_VAL(qword) \
((unsigned int) le32_to_cpu((qword).u32[1])), \
((unsigned int) le32_to_cpu((qword).u32[0]))
#define EFX_OWORD_VAL(oword) \
((unsigned int) le32_to_cpu((oword).u32[3])), \
((unsigned int) le32_to_cpu((oword).u32[2])), \
((unsigned int) le32_to_cpu((oword).u32[1])), \
((unsigned int) le32_to_cpu((oword).u32[0]))
/*
* Extract bit field portion [low,high) from the native-endian element
* which contains bits [min,max).
*
* For example, suppose "element" represents the high 32 bits of a
* 64-bit value, and we wish to extract the bits belonging to the bit
* field occupying bits 28-45 of this 64-bit value.
*
* Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
*
* ( element ) << 4
*
* The result will contain the relevant bits filled in in the range
* [0,high-low), with garbage in bits [high-low+1,...).
*/
#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
(((low > max) || (high < min)) ? 0 : \
((low > min) ? \
((native_element) >> (low - min)) : \
((native_element) << (min - low))))
/*
* Extract bit field portion [low,high) from the 64-bit little-endian
* element which contains bits [min,max)
*/
#define EFX_EXTRACT64(element, min, max, low, high) \
EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
/*
* Extract bit field portion [low,high) from the 32-bit little-endian
* element which contains bits [min,max)
*/
#define EFX_EXTRACT32(element, min, max, low, high) \
EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
#define EFX_EXTRACT_OWORD64(oword, low, high) \
((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
EFX_MASK64(high + 1 - low))
#define EFX_EXTRACT_QWORD64(qword, low, high) \
(EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
EFX_MASK64(high + 1 - low))
#define EFX_EXTRACT_OWORD32(oword, low, high) \
((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
EFX_MASK32(high + 1 - low))
#define EFX_EXTRACT_QWORD32(qword, low, high) \
((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
EFX_MASK32(high + 1 - low))
#define EFX_EXTRACT_DWORD(dword, low, high) \
(EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
EFX_MASK32(high + 1 - low))
#define EFX_OWORD_FIELD64(oword, field) \
EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field))
#define EFX_QWORD_FIELD64(qword, field) \
EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field))
#define EFX_OWORD_FIELD32(oword, field) \
EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field))
#define EFX_QWORD_FIELD32(qword, field) \
EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field))
#define EFX_DWORD_FIELD(dword, field) \
EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field))
#define EFX_OWORD_IS_ZERO64(oword) \
(((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
#define EFX_QWORD_IS_ZERO64(qword) \
(((qword).u64[0]) == (__force __le64) 0)
#define EFX_OWORD_IS_ZERO32(oword) \
(((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
== (__force __le32) 0)
#define EFX_QWORD_IS_ZERO32(qword) \
(((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
#define EFX_DWORD_IS_ZERO(dword) \
(((dword).u32[0]) == (__force __le32) 0)
#define EFX_OWORD_IS_ALL_ONES64(oword) \
(((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
#define EFX_QWORD_IS_ALL_ONES64(qword) \
((qword).u64[0] == ~((__force __le64) 0))
#define EFX_OWORD_IS_ALL_ONES32(oword) \
(((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
== ~((__force __le32) 0))
#define EFX_QWORD_IS_ALL_ONES32(qword) \
(((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
#define EFX_DWORD_IS_ALL_ONES(dword) \
((dword).u32[0] == ~((__force __le32) 0))
#if BITS_PER_LONG == 64
#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64
#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64
#else
#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32
#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32
#endif
/*
* Construct bit field portion
*
* Creates the portion of the bit field [low,high) that lies within
* the range [min,max).
*/
#define EFX_INSERT_NATIVE64(min, max, low, high, value) \
(((low > max) || (high < min)) ? 0 : \
((low > min) ? \
(((u64) (value)) << (low - min)) : \
(((u64) (value)) >> (min - low))))
#define EFX_INSERT_NATIVE32(min, max, low, high, value) \
(((low > max) || (high < min)) ? 0 : \
((low > min) ? \
(((u32) (value)) << (low - min)) : \
(((u32) (value)) >> (min - low))))
#define EFX_INSERT_NATIVE(min, max, low, high, value) \
((((max - min) >= 32) || ((high - low) >= 32)) ? \
EFX_INSERT_NATIVE64(min, max, low, high, value) : \
EFX_INSERT_NATIVE32(min, max, low, high, value))
/*
* Construct bit field portion
*
* Creates the portion of the named bit field that lies within the
* range [min,max).
*/
#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \
EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field), value)
/*
* Construct bit field
*
* Creates the portion of the named bit fields that lie within the
* range [min,max).
*/
#define EFX_INSERT_FIELDS_NATIVE(min, max, \
field1, value1, \
field2, value2, \
field3, value3, \
field4, value4, \
field5, value5, \
field6, value6, \
field7, value7, \
field8, value8, \
field9, value9, \
field10, value10) \
(EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
#define EFX_INSERT_FIELDS64(...) \
cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
#define EFX_INSERT_FIELDS32(...) \
cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
#define EFX_POPULATE_OWORD64(oword, ...) do { \
(oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
(oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
} while (0)
#define EFX_POPULATE_QWORD64(qword, ...) do { \
(qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
} while (0)
#define EFX_POPULATE_OWORD32(oword, ...) do { \
(oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
(oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
(oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
(oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
} while (0)
#define EFX_POPULATE_QWORD32(qword, ...) do { \
(qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
(qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
} while (0)
#define EFX_POPULATE_DWORD(dword, ...) do { \
(dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
} while (0)
#if BITS_PER_LONG == 64
#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
#else
#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
#endif
/* Populate an octword field with various numbers of arguments */
#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
#define EFX_POPULATE_OWORD_9(oword, ...) \
EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_8(oword, ...) \
EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_7(oword, ...) \
EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_6(oword, ...) \
EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_5(oword, ...) \
EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_4(oword, ...) \
EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_3(oword, ...) \
EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_2(oword, ...) \
EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_1(oword, ...) \
EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_ZERO_OWORD(oword) \
EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
#define EFX_SET_OWORD(oword) \
EFX_POPULATE_OWORD_4(oword, \
EFX_DWORD_0, 0xffffffff, \
EFX_DWORD_1, 0xffffffff, \
EFX_DWORD_2, 0xffffffff, \
EFX_DWORD_3, 0xffffffff)
/* Populate a quadword field with various numbers of arguments */
#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
#define EFX_POPULATE_QWORD_9(qword, ...) \
EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_8(qword, ...) \
EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_7(qword, ...) \
EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_6(qword, ...) \
EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_5(qword, ...) \
EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_4(qword, ...) \
EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_3(qword, ...) \
EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_2(qword, ...) \
EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_1(qword, ...) \
EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_ZERO_QWORD(qword) \
EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
#define EFX_SET_QWORD(qword) \
EFX_POPULATE_QWORD_2(qword, \
EFX_DWORD_0, 0xffffffff, \
EFX_DWORD_1, 0xffffffff)
/* Populate a dword field with various numbers of arguments */
#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
#define EFX_POPULATE_DWORD_9(dword, ...) \
EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_8(dword, ...) \
EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_7(dword, ...) \
EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_6(dword, ...) \
EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_5(dword, ...) \
EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_4(dword, ...) \
EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_3(dword, ...) \
EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_2(dword, ...) \
EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_1(dword, ...) \
EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_ZERO_DWORD(dword) \
EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
#define EFX_SET_DWORD(dword) \
EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
/*
* Modify a named field within an already-populated structure. Used
* for read-modify-write operations.
*
*/
#define EFX_INVERT_OWORD(oword) do { \
(oword).u64[0] = ~((oword).u64[0]); \
(oword).u64[1] = ~((oword).u64[1]); \
} while (0)
#define EFX_AND_OWORD(oword, from, mask) \
do { \
(oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
(oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
} while (0)
#define EFX_OR_OWORD(oword, from, mask) \
do { \
(oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
(oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
} while (0)
#define EFX_INSERT64(min, max, low, high, value) \
cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value))
#define EFX_INSERT32(min, max, low, high, value) \
cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
#define EFX_INPLACE_MASK64(min, max, low, high) \
EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low))
#define EFX_INPLACE_MASK32(min, max, low, high) \
EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
#define EFX_SET_OWORD64(oword, low, high, value) do { \
(oword).u64[0] = (((oword).u64[0] \
& ~EFX_INPLACE_MASK64(0, 63, low, high)) \
| EFX_INSERT64(0, 63, low, high, value)); \
(oword).u64[1] = (((oword).u64[1] \
& ~EFX_INPLACE_MASK64(64, 127, low, high)) \
| EFX_INSERT64(64, 127, low, high, value)); \
} while (0)
#define EFX_SET_QWORD64(qword, low, high, value) do { \
(qword).u64[0] = (((qword).u64[0] \
& ~EFX_INPLACE_MASK64(0, 63, low, high)) \
| EFX_INSERT64(0, 63, low, high, value)); \
} while (0)
#define EFX_SET_OWORD32(oword, low, high, value) do { \
(oword).u32[0] = (((oword).u32[0] \
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
| EFX_INSERT32(0, 31, low, high, value)); \
(oword).u32[1] = (((oword).u32[1] \
& ~EFX_INPLACE_MASK32(32, 63, low, high)) \
| EFX_INSERT32(32, 63, low, high, value)); \
(oword).u32[2] = (((oword).u32[2] \
& ~EFX_INPLACE_MASK32(64, 95, low, high)) \
| EFX_INSERT32(64, 95, low, high, value)); \
(oword).u32[3] = (((oword).u32[3] \
& ~EFX_INPLACE_MASK32(96, 127, low, high)) \
| EFX_INSERT32(96, 127, low, high, value)); \
} while (0)
#define EFX_SET_QWORD32(qword, low, high, value) do { \
(qword).u32[0] = (((qword).u32[0] \
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
| EFX_INSERT32(0, 31, low, high, value)); \
(qword).u32[1] = (((qword).u32[1] \
& ~EFX_INPLACE_MASK32(32, 63, low, high)) \
| EFX_INSERT32(32, 63, low, high, value)); \
} while (0)
#define EFX_SET_DWORD32(dword, low, high, value) do { \
(dword).u32[0] = (((dword).u32[0] \
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
| EFX_INSERT32(0, 31, low, high, value)); \
} while (0)
#define EFX_SET_OWORD_FIELD64(oword, field, value) \
EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field), value)
#define EFX_SET_QWORD_FIELD64(qword, field, value) \
EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field), value)
#define EFX_SET_OWORD_FIELD32(oword, field, value) \
EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field), value)
#define EFX_SET_QWORD_FIELD32(qword, field, value) \
EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field), value)
#define EFX_SET_DWORD_FIELD(dword, field, value) \
EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \
EFX_HIGH_BIT(field), value)
#if BITS_PER_LONG == 64
#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
#else
#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
#endif
/* Used to avoid compiler warnings about shift range exceeding width
* of the data types when dma_addr_t is only 32 bits wide.
*/
#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
#define EFX_DMA_TYPE_WIDTH(width) \
(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
/* Static initialiser */
#define EFX_OWORD32(a, b, c, d) \
{ .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
cpu_to_le32(c), cpu_to_le32(d) } }
#endif /* EFX_BITFIELD_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,147 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_EFX_H
#define EFX_EFX_H
#include "net_driver.h"
#include "filter.h"
/* PCI IDs */
#define EFX_VENDID_SFC 0x1924
#define FALCON_A_P_DEVID 0x0703
#define FALCON_A_S_DEVID 0x6703
#define FALCON_B_P_DEVID 0x0710
#define BETHPAGE_A_P_DEVID 0x0803
#define SIENA_A_P_DEVID 0x0813
/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
#define EFX_MEM_BAR 2
/* TX */
extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
extern netdev_tx_t
efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
/* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_rx_strategy(struct efx_channel *channel);
extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
extern void efx_rx_slow_fill(unsigned long context);
extern void __efx_rx_packet(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, bool checksummed);
extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard);
extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
#define EFX_MIN_DMAQ_SIZE 512UL
#define EFX_MAX_EVQ_SIZE 16384UL
#define EFX_MIN_EVQ_SIZE 512UL
/* The smallest [rt]xq_entries that the driver supports. Callers of
* efx_wake_queue() assume that they can subsequently send at least one
* skb. Falcon/A1 may require up to three descriptors per skb_frag. */
#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
/* Filters */
extern int efx_probe_filters(struct efx_nic *efx);
extern void efx_restore_filters(struct efx_nic *efx);
extern void efx_remove_filters(struct efx_nic *efx);
extern int efx_filter_insert_filter(struct efx_nic *efx,
struct efx_filter_spec *spec,
bool replace);
extern int efx_filter_remove_filter(struct efx_nic *efx,
struct efx_filter_spec *spec);
extern void efx_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority);
#ifdef CONFIG_RFS_ACCEL
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
static inline void efx_filter_rfs_expire(struct efx_channel *channel)
{
if (channel->rfs_filters_added >= 60 &&
__efx_filter_rfs_expire(channel->efx, 100))
channel->rfs_filters_added -= 60;
}
#define efx_filter_rfs_enabled() 1
#else
static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
#define efx_filter_rfs_enabled() 0
#endif
/* Channels */
extern void efx_process_channel_now(struct efx_channel *channel);
extern int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
/* Ports */
extern int efx_reconfigure_port(struct efx_nic *efx);
extern int __efx_reconfigure_port(struct efx_nic *efx);
/* Ethtool support */
extern const struct ethtool_ops efx_ethtool_ops;
/* Reset handling */
extern int efx_reset(struct efx_nic *efx, enum reset_type method);
extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
/* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
int rx_usecs, bool rx_adaptive);
/* Dummy PHY ops for PHY drivers */
extern int efx_port_dummy_op_int(struct efx_nic *efx);
extern void efx_port_dummy_op_void(struct efx_nic *efx);
/* MTD */
#ifdef CONFIG_SFC_MTD
extern int efx_mtd_probe(struct efx_nic *efx);
extern void efx_mtd_rename(struct efx_nic *efx);
extern void efx_mtd_remove(struct efx_nic *efx);
#else
static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mtd_rename(struct efx_nic *efx) {}
static inline void efx_mtd_remove(struct efx_nic *efx) {}
#endif
static inline void efx_schedule_channel(struct efx_channel *channel)
{
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d scheduling NAPI poll on CPU%d\n",
channel->channel, raw_smp_processor_id());
channel->work_pending = true;
napi_schedule(&channel->napi_str);
}
extern void efx_link_status_changed(struct efx_nic *efx);
extern void efx_link_set_advertising(struct efx_nic *efx, u32);
extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
#endif /* EFX_EFX_H */

View File

@@ -0,0 +1,167 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2007-2009 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_ENUM_H
#define EFX_ENUM_H
/**
* enum efx_loopback_mode - loopback modes
* @LOOPBACK_NONE: no loopback
* @LOOPBACK_DATA: data path loopback
* @LOOPBACK_GMAC: loopback within GMAC
* @LOOPBACK_XGMII: loopback after XMAC
* @LOOPBACK_XGXS: loopback within BPX after XGXS
* @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
* @LOOPBACK_GMII: loopback within BPX after GMAC
* @LOOPBACK_SGMII: loopback within BPX within SGMII
* @LOOPBACK_XGBR: loopback within BPX within XGBR
* @LOOPBACK_XFI: loopback within BPX before XFI serdes
* @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
* @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
* @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
* @LOOPBACK_XFI_FAR: loopback after XFI serdes
* @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
* @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
* @LOOPBACK_PCS: loopback within 10G PHY at PCS level
* @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
* @LOOPBACK_XPORT: cross port loopback
* @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
* @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
* @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
* @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
* @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
* @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
* @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
* @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
*/
/* Please keep up-to-date w.r.t the following two #defines */
enum efx_loopback_mode {
LOOPBACK_NONE = 0,
LOOPBACK_DATA = 1,
LOOPBACK_GMAC = 2,
LOOPBACK_XGMII = 3,
LOOPBACK_XGXS = 4,
LOOPBACK_XAUI = 5,
LOOPBACK_GMII = 6,
LOOPBACK_SGMII = 7,
LOOPBACK_XGBR = 8,
LOOPBACK_XFI = 9,
LOOPBACK_XAUI_FAR = 10,
LOOPBACK_GMII_FAR = 11,
LOOPBACK_SGMII_FAR = 12,
LOOPBACK_XFI_FAR = 13,
LOOPBACK_GPHY = 14,
LOOPBACK_PHYXS = 15,
LOOPBACK_PCS = 16,
LOOPBACK_PMAPMD = 17,
LOOPBACK_XPORT = 18,
LOOPBACK_XGMII_WS = 19,
LOOPBACK_XAUI_WS = 20,
LOOPBACK_XAUI_WS_FAR = 21,
LOOPBACK_XAUI_WS_NEAR = 22,
LOOPBACK_GMII_WS = 23,
LOOPBACK_XFI_WS = 24,
LOOPBACK_XFI_WS_FAR = 25,
LOOPBACK_PHYXS_WS = 26,
LOOPBACK_MAX
};
#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
/* These loopbacks occur within the controller */
#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \
(1 << LOOPBACK_GMAC) | \
(1 << LOOPBACK_XGMII)| \
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI) | \
(1 << LOOPBACK_GMII) | \
(1 << LOOPBACK_SGMII) | \
(1 << LOOPBACK_SGMII) | \
(1 << LOOPBACK_XGBR) | \
(1 << LOOPBACK_XFI) | \
(1 << LOOPBACK_XAUI_FAR) | \
(1 << LOOPBACK_GMII_FAR) | \
(1 << LOOPBACK_SGMII_FAR) | \
(1 << LOOPBACK_XFI_FAR) | \
(1 << LOOPBACK_XGMII_WS) | \
(1 << LOOPBACK_XAUI_WS) | \
(1 << LOOPBACK_XAUI_WS_FAR) | \
(1 << LOOPBACK_XAUI_WS_NEAR) | \
(1 << LOOPBACK_GMII_WS) | \
(1 << LOOPBACK_XFI_WS) | \
(1 << LOOPBACK_XFI_WS_FAR))
#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \
(1 << LOOPBACK_XAUI_WS) | \
(1 << LOOPBACK_XAUI_WS_FAR) | \
(1 << LOOPBACK_XAUI_WS_NEAR) | \
(1 << LOOPBACK_GMII_WS) | \
(1 << LOOPBACK_XFI_WS) | \
(1 << LOOPBACK_XFI_WS_FAR) | \
(1 << LOOPBACK_PHYXS_WS))
#define LOOPBACKS_EXTERNAL(_efx) \
((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \
~(1 << LOOPBACK_NONE))
#define LOOPBACK_MASK(_efx) \
(1 << (_efx)->loopback_mode)
#define LOOPBACK_INTERNAL(_efx) \
(!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
#define LOOPBACK_EXTERNAL(_efx) \
(!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
#define LOOPBACK_CHANGED(_from, _to, _mask) \
(!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
#define LOOPBACK_OUT_OF(_from, _to, _mask) \
((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
/*****************************************************************************/
/**
* enum reset_type - reset types
*
* %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
* %RESET_TYPE_DISABLE specify the method/scope of the reset. The
* other valuesspecify reasons, which efx_schedule_reset() will choose
* a method for.
*
* Reset methods are numbered in order of increasing scope.
*
* @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
* @RESET_TYPE_ALL: reset everything but PCI core blocks
* @RESET_TYPE_WORLD: reset everything, save & restore PCI config
* @RESET_TYPE_DISABLE: disable NIC
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
* @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
* @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
*/
enum reset_type {
RESET_TYPE_INVISIBLE = 0,
RESET_TYPE_ALL = 1,
RESET_TYPE_WORLD = 2,
RESET_TYPE_DISABLE = 3,
RESET_TYPE_MAX_METHOD,
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
RESET_TYPE_RX_RECOVERY,
RESET_TYPE_RX_DESC_FETCH,
RESET_TYPE_TX_DESC_FETCH,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
RESET_TYPE_MAX,
};
#endif /* EFX_ENUM_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,776 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2007-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/rtnetlink.h>
#include "net_driver.h"
#include "phy.h"
#include "efx.h"
#include "nic.h"
#include "workarounds.h"
/* Macros for unpacking the board revision */
/* The revision info is in host byte order. */
#define FALCON_BOARD_TYPE(_rev) (_rev >> 8)
#define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
#define FALCON_BOARD_MINOR(_rev) (_rev & 0xf)
/* Board types */
#define FALCON_BOARD_SFE4001 0x01
#define FALCON_BOARD_SFE4002 0x02
#define FALCON_BOARD_SFE4003 0x03
#define FALCON_BOARD_SFN4112F 0x52
/* Board temperature is about 15°C above ambient when air flow is
* limited. The maximum acceptable ambient temperature varies
* depending on the PHY specifications but the critical temperature
* above which we should shut down to avoid damage is 80°C. */
#define FALCON_BOARD_TEMP_BIAS 15
#define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS)
/* SFC4000 datasheet says: 'The maximum permitted junction temperature
* is 125°C; the thermal design of the environment for the SFC4000
* should aim to keep this well below 100°C.' */
#define FALCON_JUNC_TEMP_MIN 0
#define FALCON_JUNC_TEMP_MAX 90
#define FALCON_JUNC_TEMP_CRIT 125
/*****************************************************************************
* Support for LM87 sensor chip used on several boards
*/
#define LM87_REG_TEMP_HW_INT_LOCK 0x13
#define LM87_REG_TEMP_HW_EXT_LOCK 0x14
#define LM87_REG_TEMP_HW_INT 0x17
#define LM87_REG_TEMP_HW_EXT 0x18
#define LM87_REG_TEMP_EXT1 0x26
#define LM87_REG_TEMP_INT 0x27
#define LM87_REG_ALARMS1 0x41
#define LM87_REG_ALARMS2 0x42
#define LM87_IN_LIMITS(nr, _min, _max) \
0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
#define LM87_AIN_LIMITS(nr, _min, _max) \
0x3B + (nr), _max, 0x1A + (nr), _min
#define LM87_TEMP_INT_LIMITS(_min, _max) \
0x39, _max, 0x3A, _min
#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
0x37, _max, 0x38, _min
#define LM87_ALARM_TEMP_INT 0x10
#define LM87_ALARM_TEMP_EXT1 0x20
#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
{
while (*reg_values) {
u8 reg = *reg_values++;
u8 value = *reg_values++;
int rc = i2c_smbus_write_byte_data(client, reg, value);
if (rc)
return rc;
}
return 0;
}
static const u8 falcon_lm87_common_regs[] = {
LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT,
LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT,
LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX),
LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT,
LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT,
0
};
static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
const u8 *reg_values)
{
struct falcon_board *board = falcon_board(efx);
struct i2c_client *client = i2c_new_device(&board->i2c_adap, info);
int rc;
if (!client)
return -EIO;
/* Read-to-clear alarm/interrupt status */
i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
rc = efx_poke_lm87(client, reg_values);
if (rc)
goto err;
rc = efx_poke_lm87(client, falcon_lm87_common_regs);
if (rc)
goto err;
board->hwmon_client = client;
return 0;
err:
i2c_unregister_device(client);
return rc;
}
static void efx_fini_lm87(struct efx_nic *efx)
{
i2c_unregister_device(falcon_board(efx)->hwmon_client);
}
static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
{
struct i2c_client *client = falcon_board(efx)->hwmon_client;
bool temp_crit, elec_fault, is_failure;
u16 alarms;
s32 reg;
/* If link is up then do not monitor temperature */
if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
return 0;
reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
if (reg < 0)
return reg;
alarms = reg;
reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
if (reg < 0)
return reg;
alarms |= reg << 8;
alarms &= mask;
temp_crit = false;
if (alarms & LM87_ALARM_TEMP_INT) {
reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT);
if (reg < 0)
return reg;
if (reg > FALCON_BOARD_TEMP_CRIT)
temp_crit = true;
}
if (alarms & LM87_ALARM_TEMP_EXT1) {
reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1);
if (reg < 0)
return reg;
if (reg > FALCON_JUNC_TEMP_CRIT)
temp_crit = true;
}
elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1);
is_failure = temp_crit || elec_fault;
if (alarms)
netif_err(efx, hw, efx->net_dev,
"LM87 detected a hardware %s (status %02x:%02x)"
"%s%s%s%s\n",
is_failure ? "failure" : "problem",
alarms & 0xff, alarms >> 8,
(alarms & LM87_ALARM_TEMP_INT) ?
"; board is overheating" : "",
(alarms & LM87_ALARM_TEMP_EXT1) ?
"; controller is overheating" : "",
temp_crit ? "; reached critical temperature" : "",
elec_fault ? "; electrical fault" : "");
return is_failure ? -ERANGE : 0;
}
#else /* !CONFIG_SENSORS_LM87 */
static inline int
efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
const u8 *reg_values)
{
return 0;
}
static inline void efx_fini_lm87(struct efx_nic *efx)
{
}
static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
{
return 0;
}
#endif /* CONFIG_SENSORS_LM87 */
/*****************************************************************************
* Support for the SFE4001 NIC.
*
* The SFE4001 does not power-up fully at reset due to its high power
* consumption. We control its power via a PCA9539 I/O expander.
* It also has a MAX6647 temperature monitor which we expose to
* the lm90 driver.
*
* This also provides minimal support for reflashing the PHY, which is
* initiated by resetting it with the FLASH_CFG_1 pin pulled down.
* On SFE4001 rev A2 and later this is connected to the 3V3X output of
* the IO-expander.
* We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
* exclusive with the network device being open.
*/
/**************************************************************************
* Support for I2C IO Expander device on SFE4001
*/
#define PCA9539 0x74
#define P0_IN 0x00
#define P0_OUT 0x02
#define P0_INVERT 0x04
#define P0_CONFIG 0x06
#define P0_EN_1V0X_LBN 0
#define P0_EN_1V0X_WIDTH 1
#define P0_EN_1V2_LBN 1
#define P0_EN_1V2_WIDTH 1
#define P0_EN_2V5_LBN 2
#define P0_EN_2V5_WIDTH 1
#define P0_EN_3V3X_LBN 3
#define P0_EN_3V3X_WIDTH 1
#define P0_EN_5V_LBN 4
#define P0_EN_5V_WIDTH 1
#define P0_SHORTEN_JTAG_LBN 5
#define P0_SHORTEN_JTAG_WIDTH 1
#define P0_X_TRST_LBN 6
#define P0_X_TRST_WIDTH 1
#define P0_DSP_RESET_LBN 7
#define P0_DSP_RESET_WIDTH 1
#define P1_IN 0x01
#define P1_OUT 0x03
#define P1_INVERT 0x05
#define P1_CONFIG 0x07
#define P1_AFE_PWD_LBN 0
#define P1_AFE_PWD_WIDTH 1
#define P1_DSP_PWD25_LBN 1
#define P1_DSP_PWD25_WIDTH 1
#define P1_RESERVED_LBN 2
#define P1_RESERVED_WIDTH 2
#define P1_SPARE_LBN 4
#define P1_SPARE_WIDTH 4
/* Temperature Sensor */
#define MAX664X_REG_RSL 0x02
#define MAX664X_REG_WLHO 0x0B
static void sfe4001_poweroff(struct efx_nic *efx)
{
struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
/* Turn off all power rails and disable outputs */
i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
/* Clear any over-temperature alert */
i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
}
static int sfe4001_poweron(struct efx_nic *efx)
{
struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
unsigned int i, j;
int rc;
u8 out;
/* Clear any previous over-temperature alert */
rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
if (rc < 0)
return rc;
/* Enable port 0 and port 1 outputs on IO expander */
rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
if (rc)
return rc;
rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
0xff & ~(1 << P1_SPARE_LBN));
if (rc)
goto fail_on;
/* If PHY power is on, turn it all off and wait 1 second to
* ensure a full reset.
*/
rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
if (rc < 0)
goto fail_on;
out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
(0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
(0 << P0_EN_1V0X_LBN));
if (rc != out) {
netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n");
rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
if (rc)
goto fail_on;
schedule_timeout_uninterruptible(HZ);
}
for (i = 0; i < 20; ++i) {
/* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
(1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
(1 << P0_X_TRST_LBN));
if (efx->phy_mode & PHY_MODE_SPECIAL)
out |= 1 << P0_EN_3V3X_LBN;
rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
if (rc)
goto fail_on;
msleep(10);
/* Turn on 1V power rail */
out &= ~(1 << P0_EN_1V0X_LBN);
rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
if (rc)
goto fail_on;
netif_info(efx, hw, efx->net_dev,
"waiting for DSP boot (attempt %d)...\n", i);
/* In flash config mode, DSP does not turn on AFE, so
* just wait 1 second.
*/
if (efx->phy_mode & PHY_MODE_SPECIAL) {
schedule_timeout_uninterruptible(HZ);
return 0;
}
for (j = 0; j < 10; ++j) {
msleep(100);
/* Check DSP has asserted AFE power line */
rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
if (rc < 0)
goto fail_on;
if (rc & (1 << P1_AFE_PWD_LBN))
return 0;
}
}
netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n");
rc = -ETIMEDOUT;
fail_on:
sfe4001_poweroff(efx);
return rc;
}
static ssize_t show_phy_flash_cfg(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
}
static ssize_t set_phy_flash_cfg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
enum efx_phy_mode old_mode, new_mode;
int err;
rtnl_lock();
old_mode = efx->phy_mode;
if (count == 0 || *buf == '0')
new_mode = old_mode & ~PHY_MODE_SPECIAL;
else
new_mode = PHY_MODE_SPECIAL;
if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
err = 0;
} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
err = -EBUSY;
} else {
/* Reset the PHY, reconfigure the MAC and enable/disable
* MAC stats accordingly. */
efx->phy_mode = new_mode;
if (new_mode & PHY_MODE_SPECIAL)
falcon_stop_nic_stats(efx);
err = sfe4001_poweron(efx);
if (!err)
err = efx_reconfigure_port(efx);
if (!(new_mode & PHY_MODE_SPECIAL))
falcon_start_nic_stats(efx);
}
rtnl_unlock();
return err ? err : count;
}
static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
static void sfe4001_fini(struct efx_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
sfe4001_poweroff(efx);
i2c_unregister_device(board->ioexp_client);
i2c_unregister_device(board->hwmon_client);
}
static int sfe4001_check_hw(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
s32 status;
/* If XAUI link is up then do not monitor */
if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
return 0;
/* Check the powered status of the PHY. Lack of power implies that
* the MAX6647 has shut down power to it, probably due to a temp.
* alarm. Reading the power status rather than the MAX6647 status
* directly because the later is read-to-clear and would thus
* start to power up the PHY again when polled, causing us to blip
* the power undesirably.
* We know we can read from the IO expander because we did
* it during power-on. Assume failure now is bad news. */
status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN);
if (status >= 0 &&
(status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
return 0;
/* Use board power control, not PHY power control */
sfe4001_poweroff(efx);
efx->phy_mode = PHY_MODE_OFF;
return (status < 0) ? -EIO : -ERANGE;
}
static struct i2c_board_info sfe4001_hwmon_info = {
I2C_BOARD_INFO("max6647", 0x4e),
};
/* This board uses an I2C expander to provider power to the PHY, which needs to
* be turned on before the PHY can be used.
* Context: Process context, rtnl lock held
*/
static int sfe4001_init(struct efx_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
int rc;
#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
board->hwmon_client =
i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
#else
board->hwmon_client =
i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr);
#endif
if (!board->hwmon_client)
return -EIO;
/* Raise board/PHY high limit from 85 to 90 degrees Celsius */
rc = i2c_smbus_write_byte_data(board->hwmon_client,
MAX664X_REG_WLHO, 90);
if (rc)
goto fail_hwmon;
board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539);
if (!board->ioexp_client) {
rc = -EIO;
goto fail_hwmon;
}
if (efx->phy_mode & PHY_MODE_SPECIAL) {
/* PHY won't generate a 156.25 MHz clock and MAC stats fetch
* will fail. */
falcon_stop_nic_stats(efx);
}
rc = sfe4001_poweron(efx);
if (rc)
goto fail_ioexp;
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
if (rc)
goto fail_on;
netif_info(efx, hw, efx->net_dev, "PHY is powered on\n");
return 0;
fail_on:
sfe4001_poweroff(efx);
fail_ioexp:
i2c_unregister_device(board->ioexp_client);
fail_hwmon:
i2c_unregister_device(board->hwmon_client);
return rc;
}
/*****************************************************************************
* Support for the SFE4002
*
*/
static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
static const u8 sfe4002_lm87_regs[] = {
LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */
LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */
LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS),
LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
0
};
static struct i2c_board_info sfe4002_hwmon_info = {
I2C_BOARD_INFO("lm87", 0x2e),
.platform_data = &sfe4002_lm87_channel,
};
/****************************************************************************/
/* LED allocations. Note that on rev A0 boards the schematic and the reality
* differ: red and green are swapped. Below is the fixed (A1) layout (there
* are only 3 A0 boards in existence, so no real reason to make this
* conditional).
*/
#define SFE4002_FAULT_LED (2) /* Red */
#define SFE4002_RX_LED (0) /* Green */
#define SFE4002_TX_LED (1) /* Amber */
static void sfe4002_init_phy(struct efx_nic *efx)
{
/* Set the TX and RX LEDs to reflect status and activity, and the
* fault LED off */
falcon_qt202x_set_led(efx, SFE4002_TX_LED,
QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
falcon_qt202x_set_led(efx, SFE4002_RX_LED,
QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
}
static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
falcon_qt202x_set_led(
efx, SFE4002_FAULT_LED,
(mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF);
}
static int sfe4002_check_hw(struct efx_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
/* A0 board rev. 4002s report a temperature fault the whole time
* (bad sensor) so we mask it out. */
unsigned alarm_mask =
(board->major == 0 && board->minor == 0) ?
~LM87_ALARM_TEMP_EXT1 : ~0;
return efx_check_lm87(efx, alarm_mask);
}
static int sfe4002_init(struct efx_nic *efx)
{
return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
}
/*****************************************************************************
* Support for the SFN4112F
*
*/
static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
static const u8 sfn4112f_lm87_regs[] = {
LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS),
LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
0
};
static struct i2c_board_info sfn4112f_hwmon_info = {
I2C_BOARD_INFO("lm87", 0x2e),
.platform_data = &sfn4112f_lm87_channel,
};
#define SFN4112F_ACT_LED 0
#define SFN4112F_LINK_LED 1
static void sfn4112f_init_phy(struct efx_nic *efx)
{
falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
}
static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
int reg;
switch (mode) {
case EFX_LED_OFF:
reg = QUAKE_LED_OFF;
break;
case EFX_LED_ON:
reg = QUAKE_LED_ON;
break;
default:
reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT;
break;
}
falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg);
}
static int sfn4112f_check_hw(struct efx_nic *efx)
{
/* Mask out unused sensors */
return efx_check_lm87(efx, ~0x48);
}
static int sfn4112f_init(struct efx_nic *efx)
{
return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
}
/*****************************************************************************
* Support for the SFE4003
*
*/
static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */
static const u8 sfe4003_lm87_regs[] = {
LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */
LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS),
0
};
static struct i2c_board_info sfe4003_hwmon_info = {
I2C_BOARD_INFO("lm87", 0x2e),
.platform_data = &sfe4003_lm87_channel,
};
/* Board-specific LED info. */
#define SFE4003_RED_LED_GPIO 11
#define SFE4003_LED_ON 1
#define SFE4003_LED_OFF 0
static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
struct falcon_board *board = falcon_board(efx);
/* The LEDs were not wired to GPIOs before A3 */
if (board->minor < 3 && board->major == 0)
return;
falcon_txc_set_gpio_val(
efx, SFE4003_RED_LED_GPIO,
(mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
}
static void sfe4003_init_phy(struct efx_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
/* The LEDs were not wired to GPIOs before A3 */
if (board->minor < 3 && board->major == 0)
return;
falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT);
falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
}
static int sfe4003_check_hw(struct efx_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
/* A0/A1/A2 board rev. 4003s report a temperature fault the whole time
* (bad sensor) so we mask it out. */
unsigned alarm_mask =
(board->major == 0 && board->minor <= 2) ?
~LM87_ALARM_TEMP_EXT1 : ~0;
return efx_check_lm87(efx, alarm_mask);
}
static int sfe4003_init(struct efx_nic *efx)
{
return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
}
static const struct falcon_board_type board_types[] = {
{
.id = FALCON_BOARD_SFE4001,
.ref_model = "SFE4001",
.gen_type = "10GBASE-T adapter",
.init = sfe4001_init,
.init_phy = efx_port_dummy_op_void,
.fini = sfe4001_fini,
.set_id_led = tenxpress_set_id_led,
.monitor = sfe4001_check_hw,
},
{
.id = FALCON_BOARD_SFE4002,
.ref_model = "SFE4002",
.gen_type = "XFP adapter",
.init = sfe4002_init,
.init_phy = sfe4002_init_phy,
.fini = efx_fini_lm87,
.set_id_led = sfe4002_set_id_led,
.monitor = sfe4002_check_hw,
},
{
.id = FALCON_BOARD_SFE4003,
.ref_model = "SFE4003",
.gen_type = "10GBASE-CX4 adapter",
.init = sfe4003_init,
.init_phy = sfe4003_init_phy,
.fini = efx_fini_lm87,
.set_id_led = sfe4003_set_id_led,
.monitor = sfe4003_check_hw,
},
{
.id = FALCON_BOARD_SFN4112F,
.ref_model = "SFN4112F",
.gen_type = "SFP+ adapter",
.init = sfn4112f_init,
.init_phy = sfn4112f_init_phy,
.fini = efx_fini_lm87,
.set_id_led = sfn4112f_set_id_led,
.monitor = sfn4112f_check_hw,
},
};
int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
{
struct falcon_board *board = falcon_board(efx);
u8 type_id = FALCON_BOARD_TYPE(revision_info);
int i;
board->major = FALCON_BOARD_MAJOR(revision_info);
board->minor = FALCON_BOARD_MINOR(revision_info);
for (i = 0; i < ARRAY_SIZE(board_types); i++)
if (board_types[i].id == type_id)
board->type = &board_types[i];
if (board->type) {
netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n",
(efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
? board->type->ref_model : board->type->gen_type,
'A' + board->major, board->minor);
return 0;
} else {
netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
type_id);
return -ENODEV;
}
}

View File

@@ -0,0 +1,369 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/delay.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
#include "regs.h"
#include "io.h"
#include "mac.h"
#include "mdio_10g.h"
#include "workarounds.h"
/**************************************************************************
*
* MAC operations
*
*************************************************************************/
/* Configure the XAUI driver that is an output from Falcon */
void falcon_setup_xaui(struct efx_nic *efx)
{
efx_oword_t sdctl, txdrv;
/* Move the XAUI into low power, unless there is no PHY, in
* which case the XAUI will have to drive a cable. */
if (efx->phy_type == PHY_TYPE_NONE)
return;
efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
EFX_POPULATE_OWORD_8(txdrv,
FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
}
int falcon_reset_xaui(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t reg;
int count;
/* Don't fetch MAC statistics over an XMAC reset */
WARN_ON(nic_data->stats_disable_count == 0);
/* Start reset sequence */
EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
/* Wait up to 10 ms for completion, then reinitialise */
for (count = 0; count < 1000; count++) {
efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
falcon_setup_xaui(efx);
return 0;
}
udelay(10);
}
netif_err(efx, hw, efx->net_dev,
"timed out waiting for XAUI/XGXS reset\n");
return -ETIMEDOUT;
}
static void falcon_ack_status_intr(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t reg;
if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
return;
/* We expect xgmii faults if the wireside link is down */
if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
return;
/* We can only use this interrupt to signal the negative edge of
* xaui_align [we have to poll the positive edge]. */
if (nic_data->xmac_poll_required)
return;
efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
}
static bool falcon_xgxs_link_ok(struct efx_nic *efx)
{
efx_oword_t reg;
bool align_done, link_ok = false;
int sync_status;
/* Read link status */
efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
link_ok = true;
/* Clear link status ready for next read */
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
return link_ok;
}
static bool falcon_xmac_link_ok(struct efx_nic *efx)
{
/*
* Check MAC's XGXS link status except when using XGMII loopback
* which bypasses the XGXS block.
* If possible, check PHY's XGXS link status except when using
* MAC loopback.
*/
return (efx->loopback_mode == LOOPBACK_XGMII ||
falcon_xgxs_link_ok(efx)) &&
(!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
LOOPBACK_INTERNAL(efx) ||
efx_mdio_phyxgxs_lane_sync(efx));
}
static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
{
unsigned int max_frame_len;
efx_oword_t reg;
bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
/* Configure MAC - cut-thru mode is hard wired on */
EFX_POPULATE_OWORD_3(reg,
FRF_AB_XM_RX_JUMBO_MODE, 1,
FRF_AB_XM_TX_STAT_EN, 1,
FRF_AB_XM_RX_STAT_EN, 1);
efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
/* Configure TX */
EFX_POPULATE_OWORD_6(reg,
FRF_AB_XM_TXEN, 1,
FRF_AB_XM_TX_PRMBL, 1,
FRF_AB_XM_AUTO_PAD, 1,
FRF_AB_XM_TXCRC, 1,
FRF_AB_XM_FCNTL, tx_fc,
FRF_AB_XM_IPG, 0x3);
efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
/* Configure RX */
EFX_POPULATE_OWORD_5(reg,
FRF_AB_XM_RXEN, 1,
FRF_AB_XM_AUTO_DEPAD, 0,
FRF_AB_XM_ACPT_ALL_MCAST, 1,
FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
FRF_AB_XM_PASS_CRC_ERR, 1);
efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
/* Set frame length */
max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
EFX_POPULATE_OWORD_2(reg,
FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
FRF_AB_XM_TX_JUMBO_MODE, 1);
efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
EFX_POPULATE_OWORD_2(reg,
FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
FRF_AB_XM_DIS_FCNTL, !rx_fc);
efx_writeo(efx, &reg, FR_AB_XM_FC);
/* Set MAC address */
memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
}
static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
{
efx_oword_t reg;
bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
/* XGXS block is flaky and will need to be reset if moving
* into our out of XGMII, XGXS or XAUI loopbacks. */
if (EFX_WORKAROUND_5147(efx)) {
bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
bool reset_xgxs;
efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
old_xgmii_loopback =
EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
/* The PHY driver may have turned XAUI off */
reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
(xaui_loopback != old_xaui_loopback) ||
(xgmii_loopback != old_xgmii_loopback));
if (reset_xgxs)
falcon_reset_xaui(efx);
}
efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
(xgxs_loopback || xaui_loopback) ?
FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
}
/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
{
bool mac_up = falcon_xmac_link_ok(efx);
if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
efx_phy_mode_disabled(efx->phy_mode))
/* XAUI link is expected to be down */
return mac_up;
falcon_stop_nic_stats(efx);
while (!mac_up && tries) {
netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
falcon_reset_xaui(efx);
udelay(200);
mac_up = falcon_xmac_link_ok(efx);
--tries;
}
falcon_start_nic_stats(efx);
return mac_up;
}
static bool falcon_xmac_check_fault(struct efx_nic *efx)
{
return !falcon_xmac_link_ok_retry(efx, 5);
}
static int falcon_reconfigure_xmac(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
falcon_reconfigure_xgxs_core(efx);
falcon_reconfigure_xmac_core(efx);
falcon_reconfigure_mac_wrapper(efx);
nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
falcon_ack_status_intr(efx);
return 0;
}
static void falcon_update_stats_xmac(struct efx_nic *efx)
{
struct efx_mac_stats *mac_stats = &efx->mac_stats;
/* Update MAC stats from DMAed values */
FALCON_STAT(efx, XgRxOctets, rx_bytes);
FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
FALCON_STAT(efx, XgRxPkts, rx_packets);
FALCON_STAT(efx, XgRxPktsOK, rx_good);
FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
FALCON_STAT(efx, XgRxAlignError, rx_align_error);
FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
FALCON_STAT(efx, XgRxControlPkts, rx_control);
FALCON_STAT(efx, XgRxPausePkts, rx_pause);
FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
FALCON_STAT(efx, XgRxLengthError, rx_length_error);
FALCON_STAT(efx, XgTxPkts, tx_packets);
FALCON_STAT(efx, XgTxOctets, tx_bytes);
FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
FALCON_STAT(efx, XgTxControlPkts, tx_control);
FALCON_STAT(efx, XgTxPausePkts, tx_pause);
FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
/* Update derived statistics */
mac_stats->tx_good_bytes =
(mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
mac_stats->tx_control * 64);
mac_stats->rx_bad_bytes =
(mac_stats->rx_bytes - mac_stats->rx_good_bytes -
mac_stats->rx_control * 64);
}
void falcon_poll_xmac(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
!nic_data->xmac_poll_required)
return;
nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
falcon_ack_status_intr(efx);
}
const struct efx_mac_operations falcon_xmac_operations = {
.reconfigure = falcon_reconfigure_xmac,
.update_stats = falcon_update_stats_xmac,
.check_fault = falcon_xmac_check_fault,
};

View File

@@ -0,0 +1,727 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/in.h>
#include <net/ip.h>
#include "efx.h"
#include "filter.h"
#include "io.h"
#include "nic.h"
#include "regs.h"
/* "Fudge factors" - difference between programmed value and actual depth.
* Due to pipelined implementation we need to program H/W with a value that
* is larger than the hop limit we want.
*/
#define FILTER_CTL_SRCH_FUDGE_WILD 3
#define FILTER_CTL_SRCH_FUDGE_FULL 1
/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
* We also need to avoid infinite loops in efx_filter_search() when the
* table is full.
*/
#define FILTER_CTL_SRCH_MAX 200
/* Don't try very hard to find space for performance hints, as this is
* counter-productive. */
#define FILTER_CTL_SRCH_HINT_MAX 5
enum efx_filter_table_id {
EFX_FILTER_TABLE_RX_IP = 0,
EFX_FILTER_TABLE_RX_MAC,
EFX_FILTER_TABLE_COUNT,
};
struct efx_filter_table {
enum efx_filter_table_id id;
u32 offset; /* address of table relative to BAR */
unsigned size; /* number of entries */
unsigned step; /* step between entries */
unsigned used; /* number currently used */
unsigned long *used_bitmap;
struct efx_filter_spec *spec;
unsigned search_depth[EFX_FILTER_TYPE_COUNT];
};
struct efx_filter_state {
spinlock_t lock;
struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
#ifdef CONFIG_RFS_ACCEL
u32 *rps_flow_id;
unsigned rps_expire_index;
#endif
};
/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
* key derived from the n-tuple. The initial LFSR state is 0xffff. */
static u16 efx_filter_hash(u32 key)
{
u16 tmp;
/* First 16 rounds */
tmp = 0x1fff ^ key >> 16;
tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
tmp = tmp ^ tmp >> 9;
/* Last 16 rounds */
tmp = tmp ^ tmp << 13 ^ key;
tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
return tmp ^ tmp >> 9;
}
/* To allow for hash collisions, filter search continues at these
* increments from the first possible entry selected by the hash. */
static u16 efx_filter_increment(u32 key)
{
return key * 2 - 1;
}
static enum efx_filter_table_id
efx_filter_spec_table_id(const struct efx_filter_spec *spec)
{
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
return spec->type >> 2;
}
static struct efx_filter_table *
efx_filter_spec_table(struct efx_filter_state *state,
const struct efx_filter_spec *spec)
{
if (spec->type == EFX_FILTER_UNSPEC)
return NULL;
else
return &state->table[efx_filter_spec_table_id(spec)];
}
static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
{
memset(table->search_depth, 0, sizeof(table->search_depth));
}
static void efx_filter_push_rx_limits(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table;
efx_oword_t filter_ctl;
efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
table = &state->table[EFX_FILTER_TABLE_RX_IP];
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
table->search_depth[EFX_FILTER_TCP_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
table->search_depth[EFX_FILTER_TCP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
table->search_depth[EFX_FILTER_UDP_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
table->search_depth[EFX_FILTER_UDP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
table = &state->table[EFX_FILTER_TABLE_RX_MAC];
if (table->size) {
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
table->search_depth[EFX_FILTER_MAC_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
table->search_depth[EFX_FILTER_MAC_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
}
efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
}
static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
__be32 host1, __be16 port1,
__be32 host2, __be16 port2)
{
spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
spec->data[2] = ntohl(host2);
}
/**
* efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
* @spec: Specification to initialise
* @proto: Transport layer protocol number
* @host: Local host address (network byte order)
* @port: Local port (network byte order)
*/
int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port)
{
__be32 host1;
__be16 port1;
EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
/* This cannot currently be combined with other filtering */
if (spec->type != EFX_FILTER_UNSPEC)
return -EPROTONOSUPPORT;
if (port == 0)
return -EINVAL;
switch (proto) {
case IPPROTO_TCP:
spec->type = EFX_FILTER_TCP_WILD;
break;
case IPPROTO_UDP:
spec->type = EFX_FILTER_UDP_WILD;
break;
default:
return -EPROTONOSUPPORT;
}
/* Filter is constructed in terms of source and destination,
* with the odd wrinkle that the ports are swapped in a UDP
* wildcard filter. We need to convert from local and remote
* (= zero for wildcard) addresses.
*/
host1 = 0;
if (proto != IPPROTO_UDP) {
port1 = 0;
} else {
port1 = port;
port = 0;
}
__efx_filter_set_ipv4(spec, host1, port1, host, port);
return 0;
}
/**
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
* @spec: Specification to initialise
* @proto: Transport layer protocol number
* @host: Local host address (network byte order)
* @port: Local port (network byte order)
* @rhost: Remote host address (network byte order)
* @rport: Remote port (network byte order)
*/
int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port,
__be32 rhost, __be16 rport)
{
EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
/* This cannot currently be combined with other filtering */
if (spec->type != EFX_FILTER_UNSPEC)
return -EPROTONOSUPPORT;
if (port == 0 || rport == 0)
return -EINVAL;
switch (proto) {
case IPPROTO_TCP:
spec->type = EFX_FILTER_TCP_FULL;
break;
case IPPROTO_UDP:
spec->type = EFX_FILTER_UDP_FULL;
break;
default:
return -EPROTONOSUPPORT;
}
__efx_filter_set_ipv4(spec, rhost, rport, host, port);
return 0;
}
/**
* efx_filter_set_eth_local - specify local Ethernet address and optional VID
* @spec: Specification to initialise
* @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
* @addr: Local Ethernet MAC address
*/
int efx_filter_set_eth_local(struct efx_filter_spec *spec,
u16 vid, const u8 *addr)
{
EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
/* This cannot currently be combined with other filtering */
if (spec->type != EFX_FILTER_UNSPEC)
return -EPROTONOSUPPORT;
if (vid == EFX_FILTER_VID_UNSPEC) {
spec->type = EFX_FILTER_MAC_WILD;
spec->data[0] = 0;
} else {
spec->type = EFX_FILTER_MAC_FULL;
spec->data[0] = vid;
}
spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
spec->data[2] = addr[0] << 8 | addr[1];
return 0;
}
/* Build a filter entry and return its n-tuple key. */
static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
{
u32 data3;
switch (efx_filter_spec_table_id(spec)) {
case EFX_FILTER_TABLE_RX_IP: {
bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
spec->type == EFX_FILTER_UDP_WILD);
EFX_POPULATE_OWORD_7(
*filter,
FRF_BZ_RSS_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
FRF_BZ_SCATTER_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
FRF_BZ_TCP_UDP, is_udp,
FRF_BZ_RXQ_ID, spec->dmaq_id,
EFX_DWORD_2, spec->data[2],
EFX_DWORD_1, spec->data[1],
EFX_DWORD_0, spec->data[0]);
data3 = is_udp;
break;
}
case EFX_FILTER_TABLE_RX_MAC: {
bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_8(
*filter,
FRF_CZ_RMFT_RSS_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
FRF_CZ_RMFT_SCATTER_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
FRF_CZ_RMFT_IP_OVERRIDE,
!!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
data3 = is_wild;
break;
}
default:
BUG();
}
return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
}
static bool efx_filter_equal(const struct efx_filter_spec *left,
const struct efx_filter_spec *right)
{
if (left->type != right->type ||
memcmp(left->data, right->data, sizeof(left->data)))
return false;
return true;
}
static int efx_filter_search(struct efx_filter_table *table,
struct efx_filter_spec *spec, u32 key,
bool for_insert, int *depth_required)
{
unsigned hash, incr, filter_idx, depth, depth_max;
hash = efx_filter_hash(key);
incr = efx_filter_increment(key);
filter_idx = hash & (table->size - 1);
depth = 1;
depth_max = (for_insert ?
(spec->priority <= EFX_FILTER_PRI_HINT ?
FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
table->search_depth[spec->type]);
for (;;) {
/* Return success if entry is used and matches this spec
* or entry is unused and we are trying to insert.
*/
if (test_bit(filter_idx, table->used_bitmap) ?
efx_filter_equal(spec, &table->spec[filter_idx]) :
for_insert) {
*depth_required = depth;
return filter_idx;
}
/* Return failure if we reached the maximum search depth */
if (depth == depth_max)
return for_insert ? -EBUSY : -ENOENT;
filter_idx = (filter_idx + incr) & (table->size - 1);
++depth;
}
}
/* Construct/deconstruct external filter IDs */
static inline int
efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
{
return table_id << 16 | index;
}
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
* @spec: Specification for the filter
* @replace: Flag for whether the specified filter may replace a filter
* with an identical match expression and equal or lower priority
*
* On success, return the filter ID.
* On failure, return a negative error code.
*/
int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
bool replace)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter;
int filter_idx, depth;
u32 key;
int rc;
if (!table || table->size == 0)
return -EINVAL;
key = efx_filter_build(&filter, spec);
netif_vdbg(efx, hw, efx->net_dev,
"%s: type %d search_depth=%d", __func__, spec->type,
table->search_depth[spec->type]);
spin_lock_bh(&state->lock);
rc = efx_filter_search(table, spec, key, true, &depth);
if (rc < 0)
goto out;
filter_idx = rc;
BUG_ON(filter_idx >= table->size);
saved_spec = &table->spec[filter_idx];
if (test_bit(filter_idx, table->used_bitmap)) {
/* Should we replace the existing filter? */
if (!replace) {
rc = -EEXIST;
goto out;
}
if (spec->priority < saved_spec->priority) {
rc = -EPERM;
goto out;
}
} else {
__set_bit(filter_idx, table->used_bitmap);
++table->used;
}
*saved_spec = *spec;
if (table->search_depth[spec->type] < depth) {
table->search_depth[spec->type] = depth;
efx_filter_push_rx_limits(efx);
}
efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
__func__, spec->type, filter_idx, spec->dmaq_id);
rc = efx_filter_make_id(table->id, filter_idx);
out:
spin_unlock_bh(&state->lock);
return rc;
}
static void efx_filter_table_clear_entry(struct efx_nic *efx,
struct efx_filter_table *table,
int filter_idx)
{
static efx_oword_t filter;
if (test_bit(filter_idx, table->used_bitmap)) {
__clear_bit(filter_idx, table->used_bitmap);
--table->used;
memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
efx_writeo(efx, &filter,
table->offset + table->step * filter_idx);
}
}
/**
* efx_filter_remove_filter - remove a filter by specification
* @efx: NIC from which to remove the filter
* @spec: Specification for the filter
*
* On success, return zero.
* On failure, return a negative error code.
*/
int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter;
int filter_idx, depth;
u32 key;
int rc;
if (!table)
return -EINVAL;
key = efx_filter_build(&filter, spec);
spin_lock_bh(&state->lock);
rc = efx_filter_search(table, spec, key, false, &depth);
if (rc < 0)
goto out;
filter_idx = rc;
saved_spec = &table->spec[filter_idx];
if (spec->priority < saved_spec->priority) {
rc = -EPERM;
goto out;
}
efx_filter_table_clear_entry(efx, table, filter_idx);
if (table->used == 0)
efx_filter_table_reset_search_depth(table);
rc = 0;
out:
spin_unlock_bh(&state->lock);
return rc;
}
static void efx_filter_table_clear(struct efx_nic *efx,
enum efx_filter_table_id table_id,
enum efx_filter_priority priority)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[table_id];
int filter_idx;
spin_lock_bh(&state->lock);
for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
if (table->spec[filter_idx].priority <= priority)
efx_filter_table_clear_entry(efx, table, filter_idx);
if (table->used == 0)
efx_filter_table_reset_search_depth(table);
spin_unlock_bh(&state->lock);
}
/**
* efx_filter_clear_rx - remove RX filters by priority
* @efx: NIC from which to remove the filters
* @priority: Maximum priority to remove
*/
void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
{
efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
}
/* Restore filter stater after reset */
void efx_restore_filters(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
enum efx_filter_table_id table_id;
struct efx_filter_table *table;
efx_oword_t filter;
int filter_idx;
spin_lock_bh(&state->lock);
for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
table = &state->table[table_id];
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
if (!test_bit(filter_idx, table->used_bitmap))
continue;
efx_filter_build(&filter, &table->spec[filter_idx]);
efx_writeo(efx, &filter,
table->offset + table->step * filter_idx);
}
}
efx_filter_push_rx_limits(efx);
spin_unlock_bh(&state->lock);
}
int efx_probe_filters(struct efx_nic *efx)
{
struct efx_filter_state *state;
struct efx_filter_table *table;
unsigned table_id;
state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
if (!state)
return -ENOMEM;
efx->filter_state = state;
spin_lock_init(&state->lock);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
#ifdef CONFIG_RFS_ACCEL
state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
sizeof(*state->rps_flow_id),
GFP_KERNEL);
if (!state->rps_flow_id)
goto fail;
#endif
table = &state->table[EFX_FILTER_TABLE_RX_IP];
table->id = EFX_FILTER_TABLE_RX_IP;
table->offset = FR_BZ_RX_FILTER_TBL0;
table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
table->step = FR_BZ_RX_FILTER_TBL0_STEP;
}
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
table = &state->table[EFX_FILTER_TABLE_RX_MAC];
table->id = EFX_FILTER_TABLE_RX_MAC;
table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
}
for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
table = &state->table[table_id];
if (table->size == 0)
continue;
table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
sizeof(unsigned long),
GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
table->spec = vzalloc(table->size * sizeof(*table->spec));
if (!table->spec)
goto fail;
}
return 0;
fail:
efx_remove_filters(efx);
return -ENOMEM;
}
void efx_remove_filters(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
enum efx_filter_table_id table_id;
for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
kfree(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
#ifdef CONFIG_RFS_ACCEL
kfree(state->rps_flow_id);
#endif
kfree(state);
}
#ifdef CONFIG_RFS_ACCEL
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_spec spec;
const struct iphdr *ip;
const __be16 *ports;
int nhoff;
int rc;
nhoff = skb_network_offset(skb);
if (skb->protocol != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
/* RFS must validate the IP header length before calling us */
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
ip = (const struct iphdr *)(skb->data + nhoff);
if (ip_is_fragment(ip))
return -EPROTONOSUPPORT;
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
ip->daddr, ports[1], ip->saddr, ports[0]);
if (rc)
return rc;
rc = efx_filter_insert_filter(efx, &spec, true);
if (rc < 0)
return rc;
/* Remember this so we can check whether to expire the filter later */
state->rps_flow_id[rc] = flow_id;
channel = efx_get_channel(efx, skb_get_rx_queue(skb));
++channel->rfs_filters_added;
netif_info(efx, rx_status, efx->net_dev,
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
(ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
&ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
rxq_index, flow_id, rc);
return rc;
}
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
unsigned mask = table->size - 1;
unsigned index;
unsigned stop;
if (!spin_trylock_bh(&state->lock))
return false;
index = state->rps_expire_index;
stop = (index + quota) & mask;
while (index != stop) {
if (test_bit(index, table->used_bitmap) &&
table->spec[index].priority == EFX_FILTER_PRI_HINT &&
rps_may_expire_flow(efx->net_dev,
table->spec[index].dmaq_id,
state->rps_flow_id[index], index)) {
netif_info(efx, rx_status, efx->net_dev,
"expiring filter %d [flow %u]\n",
index, state->rps_flow_id[index]);
efx_filter_table_clear_entry(efx, table, index);
}
index = (index + 1) & mask;
}
state->rps_expire_index = stop;
if (table->used == 0)
efx_filter_table_reset_search_depth(table);
spin_unlock_bh(&state->lock);
return true;
}
#endif /* CONFIG_RFS_ACCEL */

View File

@@ -0,0 +1,112 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_FILTER_H
#define EFX_FILTER_H
#include <linux/types.h>
/**
* enum efx_filter_type - type of hardware filter
* @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
* @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
* @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
* @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
* @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
* @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
* @EFX_FILTER_UNSPEC: Match type is unspecified
*
* Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
*/
enum efx_filter_type {
EFX_FILTER_TCP_FULL = 0,
EFX_FILTER_TCP_WILD,
EFX_FILTER_UDP_FULL,
EFX_FILTER_UDP_WILD,
EFX_FILTER_MAC_FULL = 4,
EFX_FILTER_MAC_WILD,
EFX_FILTER_TYPE_COUNT, /* number of specific types */
EFX_FILTER_UNSPEC = 0xf,
};
/**
* enum efx_filter_priority - priority of a hardware filter specification
* @EFX_FILTER_PRI_HINT: Performance hint
* @EFX_FILTER_PRI_MANUAL: Manually configured filter
* @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour
*/
enum efx_filter_priority {
EFX_FILTER_PRI_HINT = 0,
EFX_FILTER_PRI_MANUAL,
EFX_FILTER_PRI_REQUIRED,
};
/**
* enum efx_filter_flags - flags for hardware filter specifications
* @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
* By default, matching packets will be delivered only to the
* specified queue. If this flag is set, they will be delivered
* to a range of queues offset from the specified queue number
* according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
* @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
* any IP filter that matches the same packet. By default, IP
* filters take precedence.
* @EFX_FILTER_FLAG_RX: Filter is for RX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
};
/**
* struct efx_filter_spec - specification for a hardware filter
* @type: Type of match to be performed, from &enum efx_filter_type
* @priority: Priority of the filter, from &enum efx_filter_priority
* @flags: Miscellaneous flags, from &enum efx_filter_flags
* @dmaq_id: Source/target queue index
* @data: Match data (type-dependent)
*
* Use the efx_filter_set_*() functions to initialise the @type and
* @data fields.
*/
struct efx_filter_spec {
u8 type:4;
u8 priority:4;
u8 flags;
u16 dmaq_id;
u32 data[3];
};
static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
enum efx_filter_priority priority,
enum efx_filter_flags flags,
unsigned rxq_id)
{
spec->type = EFX_FILTER_UNSPEC;
spec->priority = priority;
spec->flags = EFX_FILTER_FLAG_RX | flags;
spec->dmaq_id = rxq_id;
}
extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port);
extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port,
__be32 rhost, __be16 rport);
extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
u16 vid, const u8 *addr);
enum {
EFX_FILTER_VID_UNSPEC = 0xffff,
};
#endif /* EFX_FILTER_H */

View File

@@ -0,0 +1,299 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_IO_H
#define EFX_IO_H
#include <linux/io.h>
#include <linux/spinlock.h>
/**************************************************************************
*
* NIC register I/O
*
**************************************************************************
*
* Notes on locking strategy:
*
* Most CSRs are 128-bit (oword) and therefore cannot be read or
* written atomically. Access from the host is buffered by the Bus
* Interface Unit (BIU). Whenever the host reads from the lowest
* address of such a register, or from the address of a different such
* register, the BIU latches the register's value. Subsequent reads
* from higher addresses of the same register will read the latched
* value. Whenever the host writes part of such a register, the BIU
* collects the written value and does not write to the underlying
* register until all 4 dwords have been written. A similar buffering
* scheme applies to host access to the NIC's 64-bit SRAM.
*
* Access to different CSRs and 64-bit SRAM words must be serialised,
* since interleaved access can result in lost writes or lost
* information from read-to-clear fields. We use efx_nic::biu_lock
* for this. (We could use separate locks for read and write, but
* this is not normally a performance bottleneck.)
*
* The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
* 128-bit but are special-cased in the BIU to avoid the need for
* locking in the host:
*
* - They are write-only.
* - The semantics of writing to these registers are such that
* replacing the low 96 bits with zero does not affect functionality.
* - If the host writes to the last dword address of such a register
* (i.e. the high 32 bits) the underlying register will always be
* written. If the collector and the current write together do not
* provide values for all 128 bits of the register, the low 96 bits
* will be written as zero.
* - If the host writes to the address of any other part of such a
* register while the collector already holds values for some other
* register, the write is discarded and the collector maintains its
* current state.
*/
#if BITS_PER_LONG == 64
#define EFX_USE_QWORD_IO 1
#endif
#ifdef EFX_USE_QWORD_IO
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
unsigned int reg)
{
__raw_writeq((__force u64)value, efx->membase + reg);
}
static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
{
return (__force __le64)__raw_readq(efx->membase + reg);
}
#endif
static inline void _efx_writed(struct efx_nic *efx, __le32 value,
unsigned int reg)
{
__raw_writel((__force u32)value, efx->membase + reg);
}
static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
{
return (__force __le32)__raw_readl(efx->membase + reg);
}
/* Write a normal 128-bit CSR, locking as appropriate. */
static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg)
{
unsigned long flags __attribute__ ((unused));
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with " EFX_OWORD_FMT "\n", reg,
EFX_OWORD_VAL(*value));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EFX_USE_QWORD_IO
_efx_writeq(efx, value->u64[0], reg + 0);
_efx_writeq(efx, value->u64[1], reg + 8);
#else
_efx_writed(efx, value->u32[0], reg + 0);
_efx_writed(efx, value->u32[1], reg + 4);
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
wmb();
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
efx_qword_t *value, unsigned int index)
{
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
netif_vdbg(efx, hw, efx->net_dev,
"writing SRAM address %x with " EFX_QWORD_FMT "\n",
addr, EFX_QWORD_VAL(*value));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EFX_USE_QWORD_IO
__raw_writeq((__force u64)value->u64[0], membase + addr);
#else
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
wmb();
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg)
{
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with "EFX_DWORD_FMT"\n",
reg, EFX_DWORD_VAL(*value));
/* No lock required */
_efx_writed(efx, value->u32[0], reg);
wmb();
}
/* Read a 128-bit CSR, locking as appropriate. */
static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg)
{
unsigned long flags __attribute__ ((unused));
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
rmb();
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
spin_unlock_irqrestore(&efx->biu_lock, flags);
netif_vdbg(efx, hw, efx->net_dev,
"read from register %x, got " EFX_OWORD_FMT "\n", reg,
EFX_OWORD_VAL(*value));
}
/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
efx_qword_t *value, unsigned int index)
{
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EFX_USE_QWORD_IO
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
netif_vdbg(efx, hw, efx->net_dev,
"read from SRAM address %x, got "EFX_QWORD_FMT"\n",
addr, EFX_QWORD_VAL(*value));
}
/* Read a 32-bit CSR or SRAM */
static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg)
{
value->u32[0] = _efx_readd(efx, reg);
netif_vdbg(efx, hw, efx->net_dev,
"read from register %x, got "EFX_DWORD_FMT"\n",
reg, EFX_DWORD_VAL(*value));
}
/* Write a 128-bit CSR forming part of a table */
static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg, unsigned int index)
{
efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
}
/* Read a 128-bit CSR forming part of a table */
static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg, unsigned int index)
{
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg, unsigned int index)
{
efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
}
/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg, unsigned int index)
{
efx_readd(efx, value, reg + index * sizeof(efx_dword_t));
}
/* Page-mapped register block size */
#define EFX_PAGE_BLOCK_SIZE 0x2000
/* Calculate offset to page-mapped register block */
#define EFX_PAGED_REG(page, reg) \
((page) * EFX_PAGE_BLOCK_SIZE + (reg))
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg, unsigned int page)
{
reg = EFX_PAGED_REG(page, reg);
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with " EFX_OWORD_FMT "\n", reg,
EFX_OWORD_VAL(*value));
#ifdef EFX_USE_QWORD_IO
_efx_writeq(efx, value->u64[0], reg + 0);
_efx_writeq(efx, value->u64[1], reg + 8);
#else
_efx_writed(efx, value->u32[0], reg + 0);
_efx_writed(efx, value->u32[1], reg + 4);
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
wmb();
}
#define efx_writeo_page(efx, value, reg, page) \
_efx_writeo_page(efx, value, \
reg + \
BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
page)
/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
* RX_DESC_UPD or TX_DESC_UPD)
*/
static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg, unsigned int page)
{
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
}
#define efx_writed_page(efx, value, reg, page) \
_efx_writed_page(efx, value, \
reg + \
BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
&& (reg) != 0xa1c), \
page)
/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
* in the BIU means that writes to TIMER_COMMAND[0] invalidate the
* collector register.
*/
static inline void _efx_writed_page_locked(struct efx_nic *efx,
efx_dword_t *value,
unsigned int reg,
unsigned int page)
{
unsigned long flags __attribute__ ((unused));
if (page == 0) {
spin_lock_irqsave(&efx->biu_lock, flags);
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
spin_unlock_irqrestore(&efx->biu_lock, flags);
} else {
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
}
}
#define efx_writed_page_locked(efx, value, reg, page) \
_efx_writed_page_locked(efx, value, \
reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
page)
#endif /* EFX_IO_H */

View File

@@ -0,0 +1,21 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2009 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_MAC_H
#define EFX_MAC_H
#include "net_driver.h"
extern const struct efx_mac_operations falcon_xmac_operations;
extern const struct efx_mac_operations efx_mcdi_mac_operations;
extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
u32 dma_len, int enable, int clear);
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,130 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2008-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_MCDI_H
#define EFX_MCDI_H
/**
* enum efx_mcdi_state
* @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
* mcdi_lock then they are able to move to MCDI_STATE_RUNNING
* @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that
* moved into this state is allowed to move out of it.
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
* has not yet consumed the result. For all other threads, equivalent to
* MCDI_STATE_RUNNING.
*/
enum efx_mcdi_state {
MCDI_STATE_QUIESCENT,
MCDI_STATE_RUNNING,
MCDI_STATE_COMPLETED,
};
enum efx_mcdi_mode {
MCDI_MODE_POLL,
MCDI_MODE_EVENTS,
};
/**
* struct efx_mcdi_iface
* @state: Interface state. Waited for by mcdi_wq.
* @wq: Wait queue for threads waiting for state != STATE_RUNNING
* @iface_lock: Protects @credits, @seqno, @resprc, @resplen
* @mode: Poll for mcdi completion, or wait for an mcdi_event.
* Serialised by @lock
* @seqno: The next sequence number to use for mcdi requests.
* Serialised by @lock
* @credits: Number of spurious MCDI completion events allowed before we
* trigger a fatal error. Protected by @lock
* @resprc: Returned MCDI completion
* @resplen: Returned payload length
*/
struct efx_mcdi_iface {
atomic_t state;
wait_queue_head_t wq;
spinlock_t iface_lock;
enum efx_mcdi_mode mode;
unsigned int credits;
unsigned int seqno;
unsigned int resprc;
size_t resplen;
};
extern void efx_mcdi_init(struct efx_nic *efx);
extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
size_t inlen, u8 *outbuf, size_t outlen,
size_t *outlen_actual);
extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
extern void efx_mcdi_mode_poll(struct efx_nic *efx);
extern void efx_mcdi_mode_event(struct efx_nic *efx);
extern void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event);
#define MCDI_PTR2(_buf, _ofst) \
(((u8 *)_buf) + _ofst)
#define MCDI_SET_DWORD2(_buf, _ofst, _value) \
EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
EFX_DWORD_0, _value)
#define MCDI_DWORD2(_buf, _ofst) \
EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
EFX_DWORD_0)
#define MCDI_QWORD2(_buf, _ofst) \
EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \
EFX_QWORD_0)
#define MCDI_PTR(_buf, _ofst) \
MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST)
#define MCDI_SET_DWORD(_buf, _ofst, _value) \
MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value)
#define MCDI_DWORD(_buf, _ofst) \
MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
#define MCDI_QWORD(_buf, _ofst) \
MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out);
extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list);
extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
u32 dest_evq);
extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out);
extern int efx_mcdi_nvram_update_start(struct efx_nic *efx,
unsigned int type);
extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length);
extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
loff_t offset, const u8 *buffer,
size_t length);
#define EFX_MCDI_NVRAM_LEN_MAX 128
extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
loff_t offset, size_t length);
extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
unsigned int type);
extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
extern int efx_mcdi_reset_port(struct efx_nic *efx);
extern int efx_mcdi_reset_mc(struct efx_nic *efx);
extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
const u8 *mac, int *id_out);
extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
#endif /* EFX_MCDI_H */

View File

@@ -0,0 +1,145 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2009-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "net_driver.h"
#include "efx.h"
#include "mac.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
static int efx_mcdi_set_mac(struct efx_nic *efx)
{
u32 reject, fcntl;
u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST,
efx->net_dev->dev_addr, ETH_ALEN);
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
/* The MCDI command provides for controlling accept/reject
* of broadcast packets too, but the driver doesn't currently
* expose this. */
reject = (efx->promiscuous) ? 0 :
(1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
switch (efx->wanted_fc) {
case EFX_FC_RX | EFX_FC_TX:
fcntl = MC_CMD_FCNTL_BIDIR;
break;
case EFX_FC_RX:
fcntl = MC_CMD_FCNTL_RESPOND;
break;
default:
fcntl = MC_CMD_FCNTL_OFF;
break;
}
if (efx->wanted_fc & EFX_FC_AUTO)
fcntl = MC_CMD_FCNTL_AUTO;
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
NULL, 0, NULL);
}
static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults)
{
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
size_t outlength;
int rc;
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
if (rc)
goto fail;
*faults = MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT);
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
__func__, rc);
return rc;
}
int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
u32 dma_len, int enable, int clear)
{
u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
int rc;
efx_dword_t *cmd_ptr;
int period = enable ? 1000 : 0;
u32 addr_hi;
u32 addr_lo;
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_LEN != 0);
addr_lo = ((u64)dma_addr) >> 0;
addr_hi = ((u64)dma_addr) >> 32;
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
EFX_POPULATE_DWORD_7(*cmd_ptr,
MC_CMD_MAC_STATS_CMD_DMA, !!enable,
MC_CMD_MAC_STATS_CMD_CLEAR, clear,
MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable,
MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1,
MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
if (rc)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
__func__, enable ? "enable" : "disable", rc);
return rc;
}
static int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
{
int rc;
rc = efx_mcdi_set_mac(efx);
if (rc != 0)
return rc;
/* Restore the multicast hash registers. */
efx->type->push_multicast_hash(efx);
return 0;
}
static bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
{
u32 faults;
int rc = efx_mcdi_get_mac_faults(efx, &faults);
return (rc != 0) || (faults != 0);
}
const struct efx_mac_operations efx_mcdi_mac_operations = {
.reconfigure = efx_mcdi_mac_reconfigure,
.update_stats = efx_port_dummy_op_void,
.check_fault = efx_mcdi_mac_check_fault,
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,754 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2009-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
/*
* Driver for PHY related operations via MCDI.
*/
#include <linux/slab.h>
#include "efx.h"
#include "phy.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "nic.h"
#include "selftest.h"
struct efx_mcdi_phy_data {
u32 flags;
u32 type;
u32 supported_cap;
u32 channel;
u32 port;
u32 stats_mask;
u8 name[20];
u32 media;
u32 mmd_mask;
u8 revision[20];
u32 forced_cap;
};
static int
efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
{
u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
size_t outlen;
int rc;
BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
rc = -EIO;
goto fail;
}
cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
cfg->supported_cap =
MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
sizeof(cfg->name));
cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
sizeof(cfg->revision));
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
u32 flags, u32 loopback_mode,
u32 loopback_speed)
{
u8 inbuf[MC_CMD_SET_LINK_IN_LEN];
int rc;
BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
NULL, 0, NULL);
if (rc)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
{
u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN];
size_t outlen;
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
rc = -EIO;
goto fail;
}
*loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_SUGGESTED);
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
unsigned int prtad, unsigned int devad, u16 addr,
u16 *value_out, u32 *status_out)
{
u8 inbuf[MC_CMD_MDIO_READ_IN_LEN];
u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN];
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
*value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
*status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS);
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
unsigned int prtad, unsigned int devad, u16 addr,
u16 value, u32 *status_out)
{
u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN];
u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN];
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
*status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS);
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
{
u32 result = 0;
switch (media) {
case MC_CMD_MEDIA_KX4:
result |= SUPPORTED_Backplane;
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
result |= SUPPORTED_1000baseKX_Full;
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
result |= SUPPORTED_10000baseKX4_Full;
break;
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
result |= SUPPORTED_FIBRE;
break;
case MC_CMD_MEDIA_BASE_T:
result |= SUPPORTED_TP;
if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
result |= SUPPORTED_10baseT_Half;
if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
result |= SUPPORTED_10baseT_Full;
if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
result |= SUPPORTED_100baseT_Half;
if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
result |= SUPPORTED_100baseT_Full;
if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
result |= SUPPORTED_1000baseT_Half;
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
result |= SUPPORTED_1000baseT_Full;
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
result |= SUPPORTED_10000baseT_Full;
break;
}
if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
result |= SUPPORTED_Pause;
if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
result |= SUPPORTED_Asym_Pause;
if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
result |= SUPPORTED_Autoneg;
return result;
}
static u32 ethtool_to_mcdi_cap(u32 cap)
{
u32 result = 0;
if (cap & SUPPORTED_10baseT_Half)
result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
if (cap & SUPPORTED_10baseT_Full)
result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
if (cap & SUPPORTED_100baseT_Half)
result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
if (cap & SUPPORTED_100baseT_Full)
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
if (cap & SUPPORTED_1000baseT_Half)
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full))
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
if (cap & SUPPORTED_Pause)
result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
if (cap & SUPPORTED_Asym_Pause)
result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
if (cap & SUPPORTED_Autoneg)
result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
return result;
}
static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
enum efx_phy_mode mode, supported;
u32 flags;
/* TODO: Advertise the capabilities supported by this PHY */
supported = 0;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_TXDIS_LBN))
supported |= PHY_MODE_TX_DISABLED;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_LOWPOWER_LBN))
supported |= PHY_MODE_LOW_POWER;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_POWEROFF_LBN))
supported |= PHY_MODE_OFF;
mode = efx->phy_mode & supported;
flags = 0;
if (mode & PHY_MODE_TX_DISABLED)
flags |= (1 << MC_CMD_SET_LINK_TXDIS_LBN);
if (mode & PHY_MODE_LOW_POWER)
flags |= (1 << MC_CMD_SET_LINK_LOWPOWER_LBN);
if (mode & PHY_MODE_OFF)
flags |= (1 << MC_CMD_SET_LINK_POWEROFF_LBN);
return flags;
}
static u32 mcdi_to_ethtool_media(u32 media)
{
switch (media) {
case MC_CMD_MEDIA_XAUI:
case MC_CMD_MEDIA_CX4:
case MC_CMD_MEDIA_KX4:
return PORT_OTHER;
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
return PORT_FIBRE;
case MC_CMD_MEDIA_BASE_T:
return PORT_TP;
default:
return PORT_OTHER;
}
}
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data;
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
u32 caps;
int rc;
/* Initialise and populate phy_data */
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
if (phy_data == NULL)
return -ENOMEM;
rc = efx_mcdi_get_phy_cfg(efx, phy_data);
if (rc != 0)
goto fail;
/* Read initial link advertisement */
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
goto fail;
/* Fill out nic state */
efx->phy_data = phy_data;
efx->phy_type = phy_data->type;
efx->mdio_bus = phy_data->channel;
efx->mdio.prtad = phy_data->port;
efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
efx->mdio.mode_support = 0;
if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
efx->link_advertising =
mcdi_to_ethtool_cap(phy_data->media, caps);
else
phy_data->forced_cap = caps;
/* Assert that we can map efx -> mcdi loopback modes */
BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC);
BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII);
BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS);
BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI);
BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII);
BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII);
BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR);
BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI);
BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR);
BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR);
BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR);
BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR);
BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY);
BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS);
BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS);
BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD);
BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT);
BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS);
BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS);
BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR);
BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR);
BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS);
BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS);
BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR);
BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS);
rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes);
if (rc != 0)
goto fail;
/* The MC indicates that LOOPBACK_NONE is a valid loopback mode,
* but by convention we don't */
efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
/* Set the initial link mode */
efx_mcdi_phy_decode_link(
efx, &efx->link_state,
MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
/* Default to Autonegotiated flow control if the PHY supports it */
efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
efx->wanted_fc |= EFX_FC_AUTO;
efx_link_set_wanted_fc(efx, efx->wanted_fc);
return 0;
fail:
kfree(phy_data);
return rc;
}
int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps = (efx->link_advertising ?
ethtool_to_mcdi_cap(efx->link_advertising) :
phy_cfg->forced_cap);
return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
efx->loopback_mode, 0);
}
void efx_mcdi_phy_decode_link(struct efx_nic *efx,
struct efx_link_state *link_state,
u32 speed, u32 flags, u32 fcntl)
{
switch (fcntl) {
case MC_CMD_FCNTL_AUTO:
WARN_ON(1); /* This is not a link mode */
link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
break;
case MC_CMD_FCNTL_BIDIR:
link_state->fc = EFX_FC_TX | EFX_FC_RX;
break;
case MC_CMD_FCNTL_RESPOND:
link_state->fc = EFX_FC_RX;
break;
default:
WARN_ON(1);
case MC_CMD_FCNTL_OFF:
link_state->fc = 0;
break;
}
link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_LINK_UP_LBN));
link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_FULL_DUPLEX_LBN));
link_state->speed = speed;
}
/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
* supported by the link partner. Warn the user if this isn't the case
*/
void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 rmtadv;
/* The link partner capabilities are only relevant if the
* link supports flow control autonegotiation */
if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
return;
/* If flow control autoneg is supported and enabled, then fine */
if (efx->wanted_fc & EFX_FC_AUTO)
return;
rmtadv = 0;
if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
rmtadv |= ADVERTISED_Pause;
if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
rmtadv |= ADVERTISED_Asym_Pause;
if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
netif_err(efx, link, efx->net_dev,
"warning: link partner doesn't support pause frames");
}
static bool efx_mcdi_phy_poll(struct efx_nic *efx)
{
struct efx_link_state old_state = efx->link_state;
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
int rc;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc) {
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
__func__, rc);
efx->link_state.up = false;
} else {
efx_mcdi_phy_decode_link(
efx, &efx->link_state,
MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
}
return !efx_link_state_equal(&efx->link_state, &old_state);
}
static void efx_mcdi_phy_remove(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
efx->phy_data = NULL;
kfree(phy_data);
}
static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
int rc;
ecmd->supported =
mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
ecmd->advertising = efx->link_advertising;
ethtool_cmd_speed_set(ecmd, efx->link_state.speed);
ecmd->duplex = efx->link_state.fd;
ecmd->port = mcdi_to_ethtool_media(phy_cfg->media);
ecmd->phy_address = phy_cfg->port;
ecmd->transceiver = XCVR_INTERNAL;
ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
ecmd->mdio_support = (efx->mdio.mode_support &
(MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc) {
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
__func__, rc);
return;
}
ecmd->lp_advertising =
mcdi_to_ethtool_cap(phy_cfg->media,
MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
}
static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps;
int rc;
if (ecmd->autoneg) {
caps = (ethtool_to_mcdi_cap(ecmd->advertising) |
1 << MC_CMD_PHY_CAP_AN_LBN);
} else if (ecmd->duplex) {
switch (ethtool_cmd_speed(ecmd)) {
case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
default: return -EINVAL;
}
} else {
switch (ethtool_cmd_speed(ecmd)) {
case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
default: return -EINVAL;
}
}
rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
efx->loopback_mode, 0);
if (rc)
return rc;
if (ecmd->autoneg) {
efx_link_set_advertising(
efx, ecmd->advertising | ADVERTISED_Autoneg);
phy_cfg->forced_cap = 0;
} else {
efx_link_set_advertising(efx, 0);
phy_cfg->forced_cap = caps;
}
return 0;
}
static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
{
u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
size_t outlen;
int rc;
BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
return -EIO;
if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
return -EINVAL;
return 0;
}
static const char *const mcdi_sft9001_cable_diag_names[] = {
"cable.pairA.length",
"cable.pairB.length",
"cable.pairC.length",
"cable.pairD.length",
"cable.pairA.status",
"cable.pairB.status",
"cable.pairC.status",
"cable.pairD.status",
};
static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
int *results)
{
unsigned int retry, i, count = 0;
size_t outlen;
u32 status;
u8 *buf, *ptr;
int rc;
buf = kzalloc(0x100, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
NULL, 0, NULL);
if (rc)
goto out;
/* Wait up to 10s for BIST to finish */
for (retry = 0; retry < 100; ++retry) {
BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
buf, 0x100, &outlen);
if (rc)
goto out;
status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
if (status != MC_CMD_POLL_BIST_RUNNING)
goto finished;
msleep(100);
}
rc = -ETIMEDOUT;
goto out;
finished:
results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
/* SFT9001 specific cable diagnostics output */
if (efx->phy_type == PHY_TYPE_SFT9001B &&
(bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
if (status == MC_CMD_POLL_BIST_PASSED &&
outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
for (i = 0; i < 8; i++) {
results[count + i] =
EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
EFX_DWORD_0);
}
}
count += 8;
}
rc = count;
out:
kfree(buf);
return rc;
}
static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
unsigned flags)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 mode;
int rc;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
if (rc < 0)
return rc;
results += rc;
}
/* If we support both LONG and SHORT, then run each in response to
* break or not. Otherwise, run the one we support */
mode = 0;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) {
if ((flags & ETH_TEST_FL_OFFLINE) &&
(phy_cfg->flags &
(1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)))
mode = MC_CMD_PHY_BIST_CABLE_LONG;
else
mode = MC_CMD_PHY_BIST_CABLE_SHORT;
} else if (phy_cfg->flags &
(1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))
mode = MC_CMD_PHY_BIST_CABLE_LONG;
if (mode != 0) {
rc = efx_mcdi_bist(efx, mode, results);
if (rc < 0)
return rc;
results += rc;
}
return 0;
}
static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
unsigned int index)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
if (index == 0)
return "bist";
--index;
}
if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) |
(1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) {
if (index == 0)
return "cable";
--index;
if (efx->phy_type == PHY_TYPE_SFT9001B) {
if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
return mcdi_sft9001_cable_diag_names[index];
index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
}
}
return NULL;
}
const struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_port_dummy_op_int,
.reconfigure = efx_mcdi_phy_reconfigure,
.poll = efx_mcdi_phy_poll,
.fini = efx_port_dummy_op_void,
.remove = efx_mcdi_phy_remove,
.get_settings = efx_mcdi_phy_get_settings,
.set_settings = efx_mcdi_phy_set_settings,
.test_alive = efx_mcdi_phy_test_alive,
.run_tests = efx_mcdi_phy_run_tests,
.test_name = efx_mcdi_phy_test_name,
};

View File

@@ -0,0 +1,323 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
/*
* Useful functions for working with MDIO clause 45 PHYs
*/
#include <linux/types.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include "net_driver.h"
#include "mdio_10g.h"
#include "workarounds.h"
unsigned efx_mdio_id_oui(u32 id)
{
unsigned oui = 0;
int i;
/* The bits of the OUI are designated a..x, with a=0 and b variable.
* In the id register c is the MSB but the OUI is conventionally
* written as bytes h..a, p..i, x..q. Reorder the bits accordingly. */
for (i = 0; i < 22; ++i)
if (id & (1 << (i + 10)))
oui |= 1 << (i ^ 7);
return oui;
}
int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
int spins, int spintime)
{
u32 ctrl;
/* Catch callers passing values in the wrong units (or just silly) */
EFX_BUG_ON_PARANOID(spins * spintime >= 5000);
efx_mdio_write(port, mmd, MDIO_CTRL1, MDIO_CTRL1_RESET);
/* Wait for the reset bit to clear. */
do {
msleep(spintime);
ctrl = efx_mdio_read(port, mmd, MDIO_CTRL1);
spins--;
} while (spins && (ctrl & MDIO_CTRL1_RESET));
return spins ? spins : -ETIMEDOUT;
}
static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
{
int status;
if (mmd != MDIO_MMD_AN) {
/* Read MMD STATUS2 to check it is responding. */
status = efx_mdio_read(efx, mmd, MDIO_STAT2);
if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) {
netif_err(efx, hw, efx->net_dev,
"PHY MMD %d not responding.\n", mmd);
return -EIO;
}
}
return 0;
}
/* This ought to be ridiculous overkill. We expect it to fail rarely */
#define MDIO45_RESET_TIME 1000 /* ms */
#define MDIO45_RESET_ITERS 100
int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
{
const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
int tries = MDIO45_RESET_ITERS;
int rc = 0;
int in_reset;
while (tries) {
int mask = mmd_mask;
int mmd = 0;
int stat;
in_reset = 0;
while (mask) {
if (mask & 1) {
stat = efx_mdio_read(efx, mmd, MDIO_CTRL1);
if (stat < 0) {
netif_err(efx, hw, efx->net_dev,
"failed to read status of"
" MMD %d\n", mmd);
return -EIO;
}
if (stat & MDIO_CTRL1_RESET)
in_reset |= (1 << mmd);
}
mask = mask >> 1;
mmd++;
}
if (!in_reset)
break;
tries--;
msleep(spintime);
}
if (in_reset != 0) {
netif_err(efx, hw, efx->net_dev,
"not all MMDs came out of reset in time."
" MMDs still in reset: %x\n", in_reset);
rc = -ETIMEDOUT;
}
return rc;
}
int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
{
int mmd = 0, probe_mmd, devs1, devs2;
u32 devices;
/* Historically we have probed the PHYXS to find out what devices are
* present,but that doesn't work so well if the PHYXS isn't expected
* to exist, if so just find the first item in the list supplied. */
probe_mmd = (mmd_mask & MDIO_DEVS_PHYXS) ? MDIO_MMD_PHYXS :
__ffs(mmd_mask);
/* Check all the expected MMDs are present */
devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1);
devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2);
if (devs1 < 0 || devs2 < 0) {
netif_err(efx, hw, efx->net_dev,
"failed to read devices present\n");
return -EIO;
}
devices = devs1 | (devs2 << 16);
if ((devices & mmd_mask) != mmd_mask) {
netif_err(efx, hw, efx->net_dev,
"required MMDs not present: got %x, wanted %x\n",
devices, mmd_mask);
return -ENODEV;
}
netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices);
/* Check all required MMDs are responding and happy. */
while (mmd_mask) {
if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
return -EIO;
mmd_mask = mmd_mask >> 1;
mmd++;
}
return 0;
}
bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
{
/* If the port is in loopback, then we should only consider a subset
* of mmd's */
if (LOOPBACK_INTERNAL(efx))
return true;
else if (LOOPBACK_MASK(efx) & LOOPBACKS_WS)
return false;
else if (efx_phy_mode_disabled(efx->phy_mode))
return false;
else if (efx->loopback_mode == LOOPBACK_PHYXS)
mmd_mask &= ~(MDIO_DEVS_PHYXS |
MDIO_DEVS_PCS |
MDIO_DEVS_PMAPMD |
MDIO_DEVS_AN);
else if (efx->loopback_mode == LOOPBACK_PCS)
mmd_mask &= ~(MDIO_DEVS_PCS |
MDIO_DEVS_PMAPMD |
MDIO_DEVS_AN);
else if (efx->loopback_mode == LOOPBACK_PMAPMD)
mmd_mask &= ~(MDIO_DEVS_PMAPMD |
MDIO_DEVS_AN);
return mdio45_links_ok(&efx->mdio, mmd_mask);
}
void efx_mdio_transmit_disable(struct efx_nic *efx)
{
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
MDIO_PMA_TXDIS, MDIO_PMD_TXDIS_GLOBAL,
efx->phy_mode & PHY_MODE_TX_DISABLED);
}
void efx_mdio_phy_reconfigure(struct efx_nic *efx)
{
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
MDIO_CTRL1, MDIO_PMA_CTRL1_LOOPBACK,
efx->loopback_mode == LOOPBACK_PMAPMD);
efx_mdio_set_flag(efx, MDIO_MMD_PCS,
MDIO_CTRL1, MDIO_PCS_CTRL1_LOOPBACK,
efx->loopback_mode == LOOPBACK_PCS);
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS,
MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK,
efx->loopback_mode == LOOPBACK_PHYXS_WS);
}
static void efx_mdio_set_mmd_lpower(struct efx_nic *efx,
int lpower, int mmd)
{
int stat = efx_mdio_read(efx, mmd, MDIO_STAT1);
netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n",
mmd, lpower);
if (stat & MDIO_STAT1_LPOWERABLE) {
efx_mdio_set_flag(efx, mmd, MDIO_CTRL1,
MDIO_CTRL1_LPOWER, lpower);
}
}
void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
int low_power, unsigned int mmd_mask)
{
int mmd = 0;
mmd_mask &= ~MDIO_DEVS_AN;
while (mmd_mask) {
if (mmd_mask & 1)
efx_mdio_set_mmd_lpower(efx, low_power, mmd);
mmd_mask = (mmd_mask >> 1);
mmd++;
}
}
/**
* efx_mdio_set_settings - Set (some of) the PHY settings over MDIO.
* @efx: Efx NIC
* @ecmd: New settings
*/
int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET };
efx->phy_op->get_settings(efx, &prev);
if (ecmd->advertising == prev.advertising &&
ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) &&
ecmd->duplex == prev.duplex &&
ecmd->port == prev.port &&
ecmd->autoneg == prev.autoneg)
return 0;
/* We can only change these settings for -T PHYs */
if (prev.port != PORT_TP || ecmd->port != PORT_TP)
return -EINVAL;
/* Check that PHY supports these settings */
if (!ecmd->autoneg ||
(ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
return -EINVAL;
efx_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
efx_mdio_an_reconfigure(efx);
return 0;
}
/**
* efx_mdio_an_reconfigure - Push advertising flags and restart autonegotiation
* @efx: Efx NIC
*/
void efx_mdio_an_reconfigure(struct efx_nic *efx)
{
int reg;
WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
/* Set up the base page */
reg = ADVERTISE_CSMA | ADVERTISE_RESV;
if (efx->link_advertising & ADVERTISED_Pause)
reg |= ADVERTISE_PAUSE_CAP;
if (efx->link_advertising & ADVERTISED_Asym_Pause)
reg |= ADVERTISE_PAUSE_ASYM;
efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
/* Set up the (extended) next page */
efx->phy_op->set_npage_adv(efx, efx->link_advertising);
/* Enable and restart AN */
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP;
efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
}
u8 efx_mdio_get_pause(struct efx_nic *efx)
{
BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
if (!(efx->wanted_fc & EFX_FC_AUTO))
return efx->wanted_fc;
WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
return mii_resolve_flowctrl_fdx(
mii_advertise_flowctrl(efx->wanted_fc),
efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
}
int efx_mdio_test_alive(struct efx_nic *efx)
{
int rc;
int devad = __ffs(efx->mdio.mmds);
u16 physid1, physid2;
mutex_lock(&efx->mac_lock);
physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
(physid2 == 0x0000) || (physid2 == 0xffff)) {
netif_err(efx, hw, efx->net_dev,
"no MDIO PHY present with ID %d\n", efx->mdio.prtad);
rc = -EINVAL;
} else {
rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
}
mutex_unlock(&efx->mac_lock);
return rc;
}

View File

@@ -0,0 +1,112 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_MDIO_10G_H
#define EFX_MDIO_10G_H
#include <linux/mdio.h>
/*
* Helper functions for doing 10G MDIO as specified in IEEE 802.3 clause 45.
*/
#include "efx.h"
static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
extern unsigned efx_mdio_id_oui(u32 id);
static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
{
return efx->mdio.mdio_read(efx->net_dev, efx->mdio.prtad, devad, addr);
}
static inline void
efx_mdio_write(struct efx_nic *efx, int devad, int addr, int value)
{
efx->mdio.mdio_write(efx->net_dev, efx->mdio.prtad, devad, addr, value);
}
static inline u32 efx_mdio_read_id(struct efx_nic *efx, int mmd)
{
u16 id_low = efx_mdio_read(efx, mmd, MDIO_DEVID2);
u16 id_hi = efx_mdio_read(efx, mmd, MDIO_DEVID1);
return (id_hi << 16) | (id_low);
}
static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
{
int i, lane_status;
bool sync;
for (i = 0; i < 2; ++i)
lane_status = efx_mdio_read(efx, MDIO_MMD_PHYXS,
MDIO_PHYXS_LNSTAT);
sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN);
if (!sync)
netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n",
lane_status);
return sync;
}
extern const char *efx_mdio_mmd_name(int mmd);
/*
* Reset a specific MMD and wait for reset to clear.
* Return number of spins left (>0) on success, -%ETIMEDOUT on failure.
*
* This function will sleep
*/
extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
int spins, int spintime);
/* As efx_mdio_check_mmd but for multiple MMDs */
int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
/* Check the link status of specified mmds in bit mask */
extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
/* Generic transmit disable support though PMAPMD */
extern void efx_mdio_transmit_disable(struct efx_nic *efx);
/* Generic part of reconfigure: set/clear loopback bits */
extern void efx_mdio_phy_reconfigure(struct efx_nic *efx);
/* Set the power state of the specified MMDs */
extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
int low_power, unsigned int mmd_mask);
/* Set (some of) the PHY settings over MDIO */
extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
/* Push advertising flags and restart autonegotiation */
extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
/* Get pause parameters from AN if available (otherwise return
* requested pause parameters)
*/
u8 efx_mdio_get_pause(struct efx_nic *efx);
/* Wait for specified MMDs to exit reset within a timeout */
extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx,
unsigned int mmd_mask);
/* Set or clear flag, debouncing */
static inline void
efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
int mask, bool state)
{
mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
}
/* Liveness self-test for MDIO PHYs */
extern int efx_mdio_test_alive(struct efx_nic *efx);
#endif /* EFX_MDIO_10G_H */

View File

@@ -0,0 +1,693 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include "net_driver.h"
#include "spi.h"
#include "efx.h"
#include "nic.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
#define EFX_SPI_VERIFY_BUF_LEN 16
struct efx_mtd_partition {
struct mtd_info mtd;
union {
struct {
bool updating;
u8 nvram_type;
u16 fw_subtype;
} mcdi;
size_t offset;
};
const char *type_name;
char name[IFNAMSIZ + 20];
};
struct efx_mtd_ops {
int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, u8 *buffer);
int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, const u8 *buffer);
int (*sync)(struct mtd_info *mtd);
};
struct efx_mtd {
struct list_head node;
struct efx_nic *efx;
const struct efx_spi_device *spi;
const char *name;
const struct efx_mtd_ops *ops;
size_t n_parts;
struct efx_mtd_partition part[0];
};
#define efx_for_each_partition(part, efx_mtd) \
for ((part) = &(efx_mtd)->part[0]; \
(part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
(part)++)
#define to_efx_mtd_partition(mtd) \
container_of(mtd, struct efx_mtd_partition, mtd)
static int falcon_mtd_probe(struct efx_nic *efx);
static int siena_mtd_probe(struct efx_nic *efx);
/* SPI utilities */
static int
efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
{
struct efx_mtd *efx_mtd = part->mtd.priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
u8 status;
int rc, i;
/* Wait up to 4s for flash/EEPROM to finish a slow operation. */
for (i = 0; i < 40; i++) {
__set_current_state(uninterruptible ?
TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
schedule_timeout(HZ / 10);
rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
&status, sizeof(status));
if (rc)
return rc;
if (!(status & SPI_STATUS_NRDY))
return 0;
if (signal_pending(current))
return -EINTR;
}
pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
return -ETIMEDOUT;
}
static int
efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
{
const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
SPI_STATUS_BP0);
u8 status;
int rc;
rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
&status, sizeof(status));
if (rc)
return rc;
if (!(status & unlock_mask))
return 0; /* already unlocked */
rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
if (rc)
return rc;
rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
if (rc)
return rc;
status &= ~unlock_mask;
rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
NULL, sizeof(status));
if (rc)
return rc;
rc = falcon_spi_wait_write(efx, spi);
if (rc)
return rc;
return 0;
}
static int
efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
{
struct efx_mtd *efx_mtd = part->mtd.priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
unsigned pos, block_len;
u8 empty[EFX_SPI_VERIFY_BUF_LEN];
u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
int rc;
if (len != spi->erase_size)
return -EINVAL;
if (spi->erase_command == 0)
return -EOPNOTSUPP;
rc = efx_spi_unlock(efx, spi);
if (rc)
return rc;
rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
if (rc)
return rc;
rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
NULL, 0);
if (rc)
return rc;
rc = efx_spi_slow_wait(part, false);
/* Verify the entire region has been wiped */
memset(empty, 0xff, sizeof(empty));
for (pos = 0; pos < len; pos += block_len) {
block_len = min(len - pos, sizeof(buffer));
rc = falcon_spi_read(efx, spi, start + pos, block_len,
NULL, buffer);
if (rc)
return rc;
if (memcmp(empty, buffer, block_len))
return -EIO;
/* Avoid locking up the system */
cond_resched();
if (signal_pending(current))
return -EINTR;
}
return rc;
}
/* MTD interface */
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
{
struct efx_mtd *efx_mtd = mtd->priv;
int rc;
rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
if (rc == 0) {
erase->state = MTD_ERASE_DONE;
} else {
erase->state = MTD_ERASE_FAILED;
erase->fail_addr = 0xffffffff;
}
mtd_erase_callback(erase);
return rc;
}
static void efx_mtd_sync(struct mtd_info *mtd)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
int rc;
rc = efx_mtd->ops->sync(mtd);
if (rc)
pr_err("%s: %s sync failed (%d)\n",
part->name, efx_mtd->name, rc);
}
static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
{
int rc;
for (;;) {
rc = mtd_device_unregister(&part->mtd);
if (rc != -EBUSY)
break;
ssleep(1);
}
WARN_ON(rc);
}
static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
{
struct efx_mtd_partition *part;
efx_for_each_partition(part, efx_mtd)
efx_mtd_remove_partition(part);
list_del(&efx_mtd->node);
kfree(efx_mtd);
}
static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
{
struct efx_mtd_partition *part;
efx_for_each_partition(part, efx_mtd)
if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
snprintf(part->name, sizeof(part->name),
"%s %s:%02x", efx_mtd->efx->name,
part->type_name, part->mcdi.fw_subtype);
else
snprintf(part->name, sizeof(part->name),
"%s %s", efx_mtd->efx->name,
part->type_name);
}
static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
{
struct efx_mtd_partition *part;
efx_mtd->efx = efx;
efx_mtd_rename_device(efx_mtd);
efx_for_each_partition(part, efx_mtd) {
part->mtd.writesize = 1;
part->mtd.owner = THIS_MODULE;
part->mtd.priv = efx_mtd;
part->mtd.name = part->name;
part->mtd.erase = efx_mtd_erase;
part->mtd.read = efx_mtd->ops->read;
part->mtd.write = efx_mtd->ops->write;
part->mtd.sync = efx_mtd_sync;
if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
}
list_add(&efx_mtd->node, &efx->mtd_list);
return 0;
fail:
while (part != &efx_mtd->part[0]) {
--part;
efx_mtd_remove_partition(part);
}
/* mtd_device_register() returns 1 if the MTD table is full */
return -ENOMEM;
}
void efx_mtd_remove(struct efx_nic *efx)
{
struct efx_mtd *efx_mtd, *next;
WARN_ON(efx_dev_registered(efx));
list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
efx_mtd_remove_device(efx_mtd);
}
void efx_mtd_rename(struct efx_nic *efx)
{
struct efx_mtd *efx_mtd;
ASSERT_RTNL();
list_for_each_entry(efx_mtd, &efx->mtd_list, node)
efx_mtd_rename_device(efx_mtd);
}
int efx_mtd_probe(struct efx_nic *efx)
{
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
return siena_mtd_probe(efx);
else
return falcon_mtd_probe(efx);
}
/* Implementation of MTD operations for Falcon */
static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = falcon_spi_read(efx, spi, part->offset + start, len,
retlen, buffer);
mutex_unlock(&nic_data->spi_lock);
return rc;
}
static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = efx_spi_erase(part, part->offset + start, len);
mutex_unlock(&nic_data->spi_lock);
return rc;
}
static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, const u8 *buffer)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = falcon_spi_write(efx, spi, part->offset + start, len,
retlen, buffer);
mutex_unlock(&nic_data->spi_lock);
return rc;
}
static int falcon_mtd_sync(struct mtd_info *mtd)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
mutex_lock(&nic_data->spi_lock);
rc = efx_spi_slow_wait(part, true);
mutex_unlock(&nic_data->spi_lock);
return rc;
}
static struct efx_mtd_ops falcon_mtd_ops = {
.read = falcon_mtd_read,
.erase = falcon_mtd_erase,
.write = falcon_mtd_write,
.sync = falcon_mtd_sync,
};
static int falcon_mtd_probe(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
struct efx_spi_device *spi;
struct efx_mtd *efx_mtd;
int rc = -ENODEV;
ASSERT_RTNL();
spi = &nic_data->spi_flash;
if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
GFP_KERNEL);
if (!efx_mtd)
return -ENOMEM;
efx_mtd->spi = spi;
efx_mtd->name = "flash";
efx_mtd->ops = &falcon_mtd_ops;
efx_mtd->n_parts = 1;
efx_mtd->part[0].mtd.type = MTD_NORFLASH;
efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
efx_mtd->part[0].mtd.erasesize = spi->erase_size;
efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
efx_mtd->part[0].type_name = "sfc_flash_bootrom";
rc = efx_mtd_probe_device(efx, efx_mtd);
if (rc) {
kfree(efx_mtd);
return rc;
}
}
spi = &nic_data->spi_eeprom;
if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
GFP_KERNEL);
if (!efx_mtd)
return -ENOMEM;
efx_mtd->spi = spi;
efx_mtd->name = "EEPROM";
efx_mtd->ops = &falcon_mtd_ops;
efx_mtd->n_parts = 1;
efx_mtd->part[0].mtd.type = MTD_RAM;
efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
efx_mtd->part[0].mtd.size =
min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
EFX_EEPROM_BOOTCONFIG_START;
efx_mtd->part[0].mtd.erasesize = spi->erase_size;
efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
efx_mtd->part[0].type_name = "sfc_bootconfig";
rc = efx_mtd_probe_device(efx, efx_mtd);
if (rc) {
kfree(efx_mtd);
return rc;
}
}
return rc;
}
/* Implementation of MTD operations for Siena */
static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
loff_t offset = start;
loff_t end = min_t(loff_t, start + len, mtd->size);
size_t chunk;
int rc = 0;
while (offset < end) {
chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
buffer, chunk);
if (rc)
goto out;
offset += chunk;
buffer += chunk;
}
out:
*retlen = offset - start;
return rc;
}
static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
loff_t end = min_t(loff_t, start + len, mtd->size);
size_t chunk = part->mtd.erasesize;
int rc = 0;
if (!part->mcdi.updating) {
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
if (rc)
goto out;
part->mcdi.updating = 1;
}
/* The MCDI interface can in fact do multiple erase blocks at once;
* but erasing may be slow, so we make multiple calls here to avoid
* tripping the MCDI RPC timeout. */
while (offset < end) {
rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
chunk);
if (rc)
goto out;
offset += chunk;
}
out:
return rc;
}
static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, const u8 *buffer)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
loff_t offset = start;
loff_t end = min_t(loff_t, start + len, mtd->size);
size_t chunk;
int rc = 0;
if (!part->mcdi.updating) {
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
if (rc)
goto out;
part->mcdi.updating = 1;
}
while (offset < end) {
chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
buffer, chunk);
if (rc)
goto out;
offset += chunk;
buffer += chunk;
}
out:
*retlen = offset - start;
return rc;
}
static int siena_mtd_sync(struct mtd_info *mtd)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
int rc = 0;
if (part->mcdi.updating) {
part->mcdi.updating = 0;
rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
}
return rc;
}
static struct efx_mtd_ops siena_mtd_ops = {
.read = siena_mtd_read,
.erase = siena_mtd_erase,
.write = siena_mtd_write,
.sync = siena_mtd_sync,
};
struct siena_nvram_type_info {
int port;
const char *name;
};
static struct siena_nvram_type_info siena_nvram_types[] = {
[MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
[MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
[MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
[MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
[MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
[MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
[MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
[MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
[MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
[MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
[MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
[MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
};
static int siena_mtd_probe_partition(struct efx_nic *efx,
struct efx_mtd *efx_mtd,
unsigned int part_id,
unsigned int type)
{
struct efx_mtd_partition *part = &efx_mtd->part[part_id];
struct siena_nvram_type_info *info;
size_t size, erase_size;
bool protected;
int rc;
if (type >= ARRAY_SIZE(siena_nvram_types))
return -ENODEV;
info = &siena_nvram_types[type];
if (info->port != efx_port_num(efx))
return -ENODEV;
rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
if (rc)
return rc;
if (protected)
return -ENODEV; /* hide it */
part->mcdi.nvram_type = type;
part->type_name = info->name;
part->mtd.type = MTD_NORFLASH;
part->mtd.flags = MTD_CAP_NORFLASH;
part->mtd.size = size;
part->mtd.erasesize = erase_size;
return 0;
}
static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
struct efx_mtd *efx_mtd)
{
struct efx_mtd_partition *part;
uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN /
sizeof(uint16_t)];
int rc;
rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list);
if (rc)
return rc;
efx_for_each_partition(part, efx_mtd)
part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
return 0;
}
static int siena_mtd_probe(struct efx_nic *efx)
{
struct efx_mtd *efx_mtd;
int rc = -ENODEV;
u32 nvram_types;
unsigned int type;
ASSERT_RTNL();
rc = efx_mcdi_nvram_types(efx, &nvram_types);
if (rc)
return rc;
efx_mtd = kzalloc(sizeof(*efx_mtd) +
hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
GFP_KERNEL);
if (!efx_mtd)
return -ENOMEM;
efx_mtd->name = "Siena NVRAM manager";
efx_mtd->ops = &siena_mtd_ops;
type = 0;
efx_mtd->n_parts = 0;
while (nvram_types != 0) {
if (nvram_types & 1) {
rc = siena_mtd_probe_partition(efx, efx_mtd,
efx_mtd->n_parts, type);
if (rc == 0)
efx_mtd->n_parts++;
else if (rc != -ENODEV)
goto fail;
}
type++;
nvram_types >>= 1;
}
rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
if (rc)
goto fail;
rc = efx_mtd_probe_device(efx, efx_mtd);
fail:
if (rc)
kfree(efx_mtd);
return rc;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,273 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_NIC_H
#define EFX_NIC_H
#include <linux/i2c-algo-bit.h>
#include "net_driver.h"
#include "efx.h"
#include "mcdi.h"
#include "spi.h"
/*
* Falcon hardware control
*/
enum {
EFX_REV_FALCON_A0 = 0,
EFX_REV_FALCON_A1 = 1,
EFX_REV_FALCON_B0 = 2,
EFX_REV_SIENA_A0 = 3,
};
static inline int efx_nic_rev(struct efx_nic *efx)
{
return efx->type->revision;
}
extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
static inline bool efx_nic_has_mc(struct efx_nic *efx)
{
return efx_nic_rev(efx) >= EFX_REV_SIENA_A0;
}
/* NIC has two interlinked PCI functions for the same port. */
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
{
return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
}
enum {
PHY_TYPE_NONE = 0,
PHY_TYPE_TXC43128 = 1,
PHY_TYPE_88E1111 = 2,
PHY_TYPE_SFX7101 = 3,
PHY_TYPE_QT2022C2 = 4,
PHY_TYPE_PM8358 = 6,
PHY_TYPE_SFT9001A = 8,
PHY_TYPE_QT2025C = 9,
PHY_TYPE_SFT9001B = 10,
};
#define FALCON_XMAC_LOOPBACKS \
((1 << LOOPBACK_XGMII) | \
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI))
#define FALCON_GMAC_LOOPBACKS \
(1 << LOOPBACK_GMAC)
/**
* struct falcon_board_type - board operations and type information
* @id: Board type id, as found in NVRAM
* @ref_model: Model number of Solarflare reference design
* @gen_type: Generic board type description
* @init: Allocate resources and initialise peripheral hardware
* @init_phy: Do board-specific PHY initialisation
* @fini: Shut down hardware and free resources
* @set_id_led: Set state of identifying LED or revert to automatic function
* @monitor: Board-specific health check function
*/
struct falcon_board_type {
u8 id;
const char *ref_model;
const char *gen_type;
int (*init) (struct efx_nic *nic);
void (*init_phy) (struct efx_nic *efx);
void (*fini) (struct efx_nic *nic);
void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode);
int (*monitor) (struct efx_nic *nic);
};
/**
* struct falcon_board - board information
* @type: Type of board
* @major: Major rev. ('A', 'B' ...)
* @minor: Minor rev. (0, 1, ...)
* @i2c_adap: I2C adapter for on-board peripherals
* @i2c_data: Data for bit-banging algorithm
* @hwmon_client: I2C client for hardware monitor
* @ioexp_client: I2C client for power/port control
*/
struct falcon_board {
const struct falcon_board_type *type;
int major;
int minor;
struct i2c_adapter i2c_adap;
struct i2c_algo_bit_data i2c_data;
struct i2c_client *hwmon_client, *ioexp_client;
};
/**
* struct falcon_nic_data - Falcon NIC state
* @pci_dev2: Secondary function of Falcon A
* @board: Board state and functions
* @stats_disable_count: Nest count for disabling statistics fetches
* @stats_pending: Is there a pending DMA of MAC statistics.
* @stats_timer: A timer for regularly fetching MAC statistics.
* @stats_dma_done: Pointer to the flag which indicates DMA completion.
* @spi_flash: SPI flash device
* @spi_eeprom: SPI EEPROM device
* @spi_lock: SPI bus lock
* @mdio_lock: MDIO bus lock
* @xmac_poll_required: XMAC link state needs polling
*/
struct falcon_nic_data {
struct pci_dev *pci_dev2;
struct falcon_board board;
unsigned int stats_disable_count;
bool stats_pending;
struct timer_list stats_timer;
u32 *stats_dma_done;
struct efx_spi_device spi_flash;
struct efx_spi_device spi_eeprom;
struct mutex spi_lock;
struct mutex mdio_lock;
bool xmac_poll_required;
};
static inline struct falcon_board *falcon_board(struct efx_nic *efx)
{
struct falcon_nic_data *data = efx->nic_data;
return &data->board;
}
/**
* struct siena_nic_data - Siena NIC state
* @mcdi: Management-Controller-to-Driver Interface
* @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
* @wol_filter_id: Wake-on-LAN packet filter id
*/
struct siena_nic_data {
struct efx_mcdi_iface mcdi;
void __iomem *mcdi_smem;
int wol_filter_id;
};
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
/**************************************************************************
*
* Externs
*
**************************************************************************
*/
extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */
extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue);
extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue);
extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue);
extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue);
/* RX data path */
extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue);
extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
/* Event data path */
extern int efx_nic_probe_eventq(struct efx_channel *channel);
extern void efx_nic_init_eventq(struct efx_channel *channel);
extern void efx_nic_fini_eventq(struct efx_channel *channel);
extern void efx_nic_remove_eventq(struct efx_channel *channel);
extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
extern bool efx_nic_event_present(struct efx_channel *channel);
/* MAC/PHY */
extern void falcon_drain_tx_fifo(struct efx_nic *efx);
extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
/* Interrupts and test events */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
extern void efx_nic_generate_test_event(struct efx_channel *channel);
extern void efx_nic_generate_fill_event(struct efx_channel *channel);
extern void efx_nic_generate_interrupt(struct efx_nic *efx);
extern void efx_nic_disable_interrupts(struct efx_nic *efx);
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
extern void falcon_irq_ack_a1(struct efx_nic *efx);
#define EFX_IRQ_MOD_RESOLUTION 5
/* Global Resources */
extern int efx_nic_flush_queues(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx);
extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
extern void efx_nic_init_common(struct efx_nic *efx);
extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len);
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
/* Tests */
struct efx_nic_register_test {
unsigned address;
efx_oword_t mask;
};
extern int efx_nic_test_registers(struct efx_nic *efx,
const struct efx_nic_register_test *regs,
size_t n_regs);
extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
/**************************************************************************
*
* Falcon MAC stats
*
**************************************************************************
*/
#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
/* Retrieve statistic from statistics block */
#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
(efx)->mac_stats.efx_stat += le16_to_cpu( \
*((__force __le16 *) \
(efx->stats_buffer.addr + \
FALCON_STAT_OFFSET(falcon_stat)))); \
else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
(efx)->mac_stats.efx_stat += le32_to_cpu( \
*((__force __le32 *) \
(efx->stats_buffer.addr + \
FALCON_STAT_OFFSET(falcon_stat)))); \
else \
(efx)->mac_stats.efx_stat += le64_to_cpu( \
*((__force __le64 *) \
(efx->stats_buffer.addr + \
FALCON_STAT_OFFSET(falcon_stat)))); \
} while (0)
#define FALCON_MAC_STATS_SIZE 0x100
#define MAC_DATA_LBN 0
#define MAC_DATA_WIDTH 32
extern void efx_nic_generate_event(struct efx_channel *channel,
efx_qword_t *event);
extern void falcon_poll_xmac(struct efx_nic *efx);
#endif /* EFX_NIC_H */

View File

@@ -0,0 +1,67 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2007-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_PHY_H
#define EFX_PHY_H
/****************************************************************************
* 10Xpress (SFX7101) PHY
*/
extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
/****************************************************************************
* AMCC/Quake QT202x PHYs
*/
extern const struct efx_phy_operations falcon_qt202x_phy_ops;
/* These PHYs provide various H/W control states for LEDs */
#define QUAKE_LED_LINK_INVAL (0)
#define QUAKE_LED_LINK_STAT (1)
#define QUAKE_LED_LINK_ACT (2)
#define QUAKE_LED_LINK_ACTSTAT (3)
#define QUAKE_LED_OFF (4)
#define QUAKE_LED_ON (5)
#define QUAKE_LED_LINK_INPUT (6) /* Pin is an input. */
/* What link the LED tracks */
#define QUAKE_LED_TXLINK (0)
#define QUAKE_LED_RXLINK (8)
extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
/****************************************************************************
* Transwitch CX4 retimer
*/
extern const struct efx_phy_operations falcon_txc_phy_ops;
#define TXC_GPIO_DIR_INPUT 0
#define TXC_GPIO_DIR_OUTPUT 1
extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
/****************************************************************************
* Siena managed PHYs
*/
extern const struct efx_phy_operations efx_mcdi_phy_ops;
extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
unsigned int prtad, unsigned int devad,
u16 addr, u16 *value_out, u32 *status_out);
extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
unsigned int prtad, unsigned int devad,
u16 addr, u16 value, u32 *status_out);
extern void efx_mcdi_phy_decode_link(struct efx_nic *efx,
struct efx_link_state *link_state,
u32 speed, u32 flags, u32 fcntl);
extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx);
extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
#endif

View File

@@ -0,0 +1,462 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
/*
* Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details
*/
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include "efx.h"
#include "mdio_10g.h"
#include "phy.h"
#include "nic.h"
#define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS | \
MDIO_DEVS_PMAPMD | \
MDIO_DEVS_PHYXS)
#define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) | \
(1 << LOOPBACK_PMAPMD) | \
(1 << LOOPBACK_PHYXS_WS))
/****************************************************************************/
/* Quake-specific MDIO registers */
#define MDIO_QUAKE_LED0_REG (0xD006)
/* QT2025C only */
#define PCS_FW_HEARTBEAT_REG 0xd7ee
#define PCS_FW_HEARTB_LBN 0
#define PCS_FW_HEARTB_WIDTH 8
#define PCS_FW_PRODUCT_CODE_1 0xd7f0
#define PCS_FW_VERSION_1 0xd7f3
#define PCS_FW_BUILD_1 0xd7f6
#define PCS_UC8051_STATUS_REG 0xd7fd
#define PCS_UC_STATUS_LBN 0
#define PCS_UC_STATUS_WIDTH 8
#define PCS_UC_STATUS_FW_SAVE 0x20
#define PMA_PMD_MODE_REG 0xc301
#define PMA_PMD_RXIN_SEL_LBN 6
#define PMA_PMD_FTX_CTRL2_REG 0xc309
#define PMA_PMD_FTX_STATIC_LBN 13
#define PMA_PMD_VEND1_REG 0xc001
#define PMA_PMD_VEND1_LBTXD_LBN 15
#define PCS_VEND1_REG 0xc000
#define PCS_VEND1_LBTXD_LBN 5
void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
{
int addr = MDIO_QUAKE_LED0_REG + led;
efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
}
struct qt202x_phy_data {
enum efx_phy_mode phy_mode;
bool bug17190_in_bad_state;
unsigned long bug17190_timer;
u32 firmware_ver;
};
#define QT2022C2_MAX_RESET_TIME 500
#define QT2022C2_RESET_WAIT 10
#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
#define QT2025C_HEARTB_WAIT 100
#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
#define QT2025C_FWSTART_WAIT 100
#define BUG17190_INTERVAL (2 * HZ)
static int qt2025c_wait_heartbeat(struct efx_nic *efx)
{
unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
int reg, old_counter = 0;
/* Wait for firmware heartbeat to start */
for (;;) {
int counter;
reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG);
if (reg < 0)
return reg;
counter = ((reg >> PCS_FW_HEARTB_LBN) &
((1 << PCS_FW_HEARTB_WIDTH) - 1));
if (old_counter == 0)
old_counter = counter;
else if (counter != old_counter)
break;
if (time_after(jiffies, timeout)) {
/* Some cables have EEPROMs that conflict with the
* PHY's on-board EEPROM so it cannot load firmware */
netif_err(efx, hw, efx->net_dev,
"If an SFP+ direct attach cable is"
" connected, please check that it complies"
" with the SFP+ specification\n");
return -ETIMEDOUT;
}
msleep(QT2025C_HEARTB_WAIT);
}
return 0;
}
static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
{
unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
int reg;
/* Wait for firmware status to look good */
for (;;) {
reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
if (reg < 0)
return reg;
if ((reg &
((1 << PCS_UC_STATUS_WIDTH) - 1) << PCS_UC_STATUS_LBN) >=
PCS_UC_STATUS_FW_SAVE)
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
msleep(QT2025C_FWSTART_WAIT);
}
return 0;
}
static void qt2025c_restart_firmware(struct efx_nic *efx)
{
/* Restart microcontroller execution of firmware from RAM */
efx_mdio_write(efx, 3, 0xe854, 0x00c0);
efx_mdio_write(efx, 3, 0xe854, 0x0040);
msleep(50);
}
static int qt2025c_wait_reset(struct efx_nic *efx)
{
int rc;
rc = qt2025c_wait_heartbeat(efx);
if (rc != 0)
return rc;
rc = qt2025c_wait_fw_status_good(efx);
if (rc == -ETIMEDOUT) {
/* Bug 17689: occasionally heartbeat starts but firmware status
* code never progresses beyond 0x00. Try again, once, after
* restarting execution of the firmware image. */
netif_dbg(efx, hw, efx->net_dev,
"bashing QT2025C microcontroller\n");
qt2025c_restart_firmware(efx);
rc = qt2025c_wait_heartbeat(efx);
if (rc != 0)
return rc;
rc = qt2025c_wait_fw_status_good(efx);
}
return rc;
}
static void qt2025c_firmware_id(struct efx_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
u8 firmware_id[9];
size_t i;
for (i = 0; i < sizeof(firmware_id); i++)
firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
PCS_FW_PRODUCT_CODE_1 + i);
netif_info(efx, probe, efx->net_dev,
"QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
(firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
firmware_id[3] >> 4, firmware_id[3] & 0xf,
firmware_id[4], firmware_id[5],
firmware_id[6], firmware_id[7], firmware_id[8]);
phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
((firmware_id[3] & 0x0f) << 16) |
(firmware_id[4] << 8) | firmware_id[5];
}
static void qt2025c_bug17190_workaround(struct efx_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
/* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
* layers up, but PCS down (no block_lock). If we notice this state
* persisting for a couple of seconds, we switch PMA/PMD loopback
* briefly on and then off again, which is normally sufficient to
* recover it.
*/
if (efx->link_state.up ||
!efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
phy_data->bug17190_in_bad_state = false;
return;
}
if (!phy_data->bug17190_in_bad_state) {
phy_data->bug17190_in_bad_state = true;
phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
return;
}
if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n");
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_PMA_CTRL1_LOOPBACK, true);
msleep(100);
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_PMA_CTRL1_LOOPBACK, false);
phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
}
}
static int qt2025c_select_phy_mode(struct efx_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
struct falcon_board *board = falcon_board(efx);
int reg, rc, i;
uint16_t phy_op_mode;
/* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
* Self-Configure mode. Don't attempt any switching if we encounter
* older firmware. */
if (phy_data->firmware_ver < 0x02000100)
return 0;
/* In general we will get optimal behaviour in "SFP+ Self-Configure"
* mode; however, that powers down most of the PHY when no module is
* present, so we must use a different mode (any fixed mode will do)
* to be sure that loopbacks will work. */
phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
/* Only change mode if really necessary */
reg = efx_mdio_read(efx, 1, 0xc319);
if ((reg & 0x0038) == phy_op_mode)
return 0;
netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n",
phy_op_mode);
/* This sequence replicates the register writes configured in the boot
* EEPROM (including the differences between board revisions), except
* that the operating mode is changed, and the PHY is prevented from
* unnecessarily reloading the main firmware image again. */
efx_mdio_write(efx, 1, 0xc300, 0x0000);
/* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
* STOPs onto the firmware/module I2C bus to reset it, varies across
* board revisions, as the bus is connected to different GPIO/LED
* outputs on the PHY.) */
if (board->major == 0 && board->minor < 2) {
efx_mdio_write(efx, 1, 0xc303, 0x4498);
for (i = 0; i < 9; i++) {
efx_mdio_write(efx, 1, 0xc303, 0x4488);
efx_mdio_write(efx, 1, 0xc303, 0x4480);
efx_mdio_write(efx, 1, 0xc303, 0x4490);
efx_mdio_write(efx, 1, 0xc303, 0x4498);
}
} else {
efx_mdio_write(efx, 1, 0xc303, 0x0920);
efx_mdio_write(efx, 1, 0xd008, 0x0004);
for (i = 0; i < 9; i++) {
efx_mdio_write(efx, 1, 0xc303, 0x0900);
efx_mdio_write(efx, 1, 0xd008, 0x0005);
efx_mdio_write(efx, 1, 0xc303, 0x0920);
efx_mdio_write(efx, 1, 0xd008, 0x0004);
}
efx_mdio_write(efx, 1, 0xc303, 0x4900);
}
efx_mdio_write(efx, 1, 0xc303, 0x4900);
efx_mdio_write(efx, 1, 0xc302, 0x0004);
efx_mdio_write(efx, 1, 0xc316, 0x0013);
efx_mdio_write(efx, 1, 0xc318, 0x0054);
efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
efx_mdio_write(efx, 1, 0xc31a, 0x0098);
efx_mdio_write(efx, 3, 0x0026, 0x0e00);
efx_mdio_write(efx, 3, 0x0027, 0x0013);
efx_mdio_write(efx, 3, 0x0028, 0xa528);
efx_mdio_write(efx, 1, 0xd006, 0x000a);
efx_mdio_write(efx, 1, 0xd007, 0x0009);
efx_mdio_write(efx, 1, 0xd008, 0x0004);
/* This additional write is not present in the boot EEPROM. It
* prevents the PHY's internal boot ROM doing another pointless (and
* slow) reload of the firmware image (the microcontroller's code
* memory is not affected by the microcontroller reset). */
efx_mdio_write(efx, 1, 0xc317, 0x00ff);
/* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
* restart doesn't reset it. We need to do that ourselves. */
efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
1 << PMA_PMD_RXIN_SEL_LBN, false);
efx_mdio_write(efx, 1, 0xc300, 0x0002);
msleep(20);
/* Restart microcontroller execution of firmware from RAM */
qt2025c_restart_firmware(efx);
/* Wait for the microcontroller to be ready again */
rc = qt2025c_wait_reset(efx);
if (rc < 0) {
netif_err(efx, hw, efx->net_dev,
"PHY microcontroller reset during mode switch "
"timed out\n");
return rc;
}
return 0;
}
static int qt202x_reset_phy(struct efx_nic *efx)
{
int rc;
if (efx->phy_type == PHY_TYPE_QT2025C) {
/* Wait for the reset triggered by falcon_reset_hw()
* to complete */
rc = qt2025c_wait_reset(efx);
if (rc < 0)
goto fail;
} else {
/* Reset the PHYXS MMD. This is documented as doing
* a complete soft reset. */
rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
QT2022C2_MAX_RESET_TIME /
QT2022C2_RESET_WAIT,
QT2022C2_RESET_WAIT);
if (rc < 0)
goto fail;
}
/* Wait 250ms for the PHY to complete bootup */
msleep(250);
falcon_board(efx)->type->init_phy(efx);
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n");
return rc;
}
static int qt202x_phy_probe(struct efx_nic *efx)
{
struct qt202x_phy_data *phy_data;
phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
phy_data->phy_mode = efx->phy_mode;
phy_data->bug17190_in_bad_state = false;
phy_data->bug17190_timer = 0;
efx->mdio.mmds = QT202X_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
return 0;
}
static int qt202x_phy_init(struct efx_nic *efx)
{
u32 devid;
int rc;
rc = qt202x_reset_phy(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev, "PHY init failed\n");
return rc;
}
devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
netif_info(efx, probe, efx->net_dev,
"PHY ID reg %x (OUI %06x model %02x revision %x)\n",
devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
efx_mdio_id_rev(devid));
if (efx->phy_type == PHY_TYPE_QT2025C)
qt2025c_firmware_id(efx);
return 0;
}
static int qt202x_link_ok(struct efx_nic *efx)
{
return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
}
static bool qt202x_phy_poll(struct efx_nic *efx)
{
bool was_up = efx->link_state.up;
efx->link_state.up = qt202x_link_ok(efx);
efx->link_state.speed = 10000;
efx->link_state.fd = true;
efx->link_state.fc = efx->wanted_fc;
if (efx->phy_type == PHY_TYPE_QT2025C)
qt2025c_bug17190_workaround(efx);
return efx->link_state.up != was_up;
}
static int qt202x_phy_reconfigure(struct efx_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
if (efx->phy_type == PHY_TYPE_QT2025C) {
int rc = qt2025c_select_phy_mode(efx);
if (rc)
return rc;
/* There are several different register bits which can
* disable TX (and save power) on direct-attach cables
* or optical transceivers, varying somewhat between
* firmware versions. Only 'static mode' appears to
* cover everything. */
mdio_set_flag(
&efx->mdio, efx->mdio.prtad, MDIO_MMD_PMAPMD,
PMA_PMD_FTX_CTRL2_REG, 1 << PMA_PMD_FTX_STATIC_LBN,
efx->phy_mode & PHY_MODE_TX_DISABLED ||
efx->phy_mode & PHY_MODE_LOW_POWER ||
efx->loopback_mode == LOOPBACK_PCS ||
efx->loopback_mode == LOOPBACK_PMAPMD);
} else {
/* Reset the PHY when moving from tx off to tx on */
if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
(phy_data->phy_mode & PHY_MODE_TX_DISABLED))
qt202x_reset_phy(efx);
efx_mdio_transmit_disable(efx);
}
efx_mdio_phy_reconfigure(efx);
phy_data->phy_mode = efx->phy_mode;
return 0;
}
static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
static void qt202x_phy_remove(struct efx_nic *efx)
{
/* Free the context block */
kfree(efx->phy_data);
efx->phy_data = NULL;
}
const struct efx_phy_operations falcon_qt202x_phy_ops = {
.probe = qt202x_phy_probe,
.init = qt202x_phy_init,
.reconfigure = qt202x_phy_reconfigure,
.poll = qt202x_phy_poll,
.fini = efx_port_dummy_op_void,
.remove = qt202x_phy_remove,
.get_settings = qt202x_phy_get_settings,
.set_settings = efx_mdio_set_settings,
.test_alive = efx_mdio_test_alive,
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,749 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2005-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/prefetch.h>
#include <net/ip.h>
#include <net/checksum.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
/* Number of RX descriptors pushed at once. */
#define EFX_RX_BATCH 8
/* Maximum size of a buffer sharing a page */
#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 64u
/*
* rx_alloc_method - RX buffer allocation method
*
* This driver supports two methods for allocating and using RX buffers:
* each RX buffer may be backed by an skb or by an order-n page.
*
* When GRO is in use then the second method has a lower overhead,
* since we don't have to allocate then free skbs on reassembled frames.
*
* Values:
* - RX_ALLOC_METHOD_AUTO = 0
* - RX_ALLOC_METHOD_SKB = 1
* - RX_ALLOC_METHOD_PAGE = 2
*
* The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
* controlled by the parameters below.
*
* - Since pushing and popping descriptors are separated by the rx_queue
* size, so the watermarks should be ~rxd_size.
* - The performance win by using page-based allocation for GRO is less
* than the performance hit of using page-based allocation of non-GRO,
* so the watermarks should reflect this.
*
* Per channel we maintain a single variable, updated by each channel:
*
* rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
* RX_ALLOC_FACTOR_SKB)
* Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
* limits the hysteresis), and update the allocation strategy:
*
* rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
* RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
*/
static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
#define RX_ALLOC_LEVEL_GRO 0x2000
#define RX_ALLOC_LEVEL_MAX 0x3000
#define RX_ALLOC_FACTOR_GRO 1
#define RX_ALLOC_FACTOR_SKB (-2)
/* This is the percentage fill level below which new RX descriptors
* will be added to the RX descriptor ring.
*/
static unsigned int rx_refill_threshold = 90;
/* This is the percentage fill level to which an RX queue will be refilled
* when the "RX refill threshold" is reached.
*/
static unsigned int rx_refill_limit = 95;
/*
* RX maximum head room required.
*
* This must be at least 1 to prevent overflow and at least 2 to allow
* pipelined receives.
*/
#define EFX_RXD_HEAD_ROOM 2
/* Offset of ethernet header within page */
static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
struct efx_rx_buffer *buf)
{
/* Offset is always within one page, so we don't need to consider
* the page order.
*/
return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
efx->type->rx_buffer_hash_size);
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
return PAGE_SIZE << efx->rx_buffer_order;
}
static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
{
if (buf->is_page)
return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
else
return ((u8 *)buf->u.skb->data +
efx->type->rx_buffer_hash_size);
}
static inline u32 efx_rx_buf_hash(const u8 *eh)
{
/* The ethernet header is always directly after any hash. */
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
return __le32_to_cpup((const __le32 *)(eh - 4));
#else
const u8 *data = eh - 4;
return ((u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
(u32)data[3] << 24);
#endif
}
/**
* efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
*
* @rx_queue: Efx RX queue
*
* This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
* struct efx_rx_buffer for each one. Return a negative error code or 0
* on success. May fail having only inserted fewer than EFX_RX_BATCH
* buffers.
*/
static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
struct net_device *net_dev = efx->net_dev;
struct efx_rx_buffer *rx_buf;
struct sk_buff *skb;
int skb_len = efx->rx_buffer_len;
unsigned index, count;
for (count = 0; count < EFX_RX_BATCH; ++count) {
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
if (unlikely(!skb))
return -ENOMEM;
/* Adjust the SKB for padding and checksum */
skb_reserve(skb, NET_IP_ALIGN);
rx_buf->len = skb_len - NET_IP_ALIGN;
rx_buf->is_page = false;
skb->ip_summed = CHECKSUM_UNNECESSARY;
rx_buf->dma_addr = pci_map_single(efx->pci_dev,
skb->data, rx_buf->len,
PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(efx->pci_dev,
rx_buf->dma_addr))) {
dev_kfree_skb_any(skb);
rx_buf->u.skb = NULL;
return -EIO;
}
++rx_queue->added_count;
++rx_queue->alloc_skb_count;
}
return 0;
}
/**
* efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
*
* @rx_queue: Efx RX queue
*
* This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
* and populates struct efx_rx_buffers for each one. Return a negative error
* code or 0 on success. If a single page can be split between two buffers,
* then the page will either be inserted fully, or not at at all.
*/
static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
struct page *page;
void *page_addr;
struct efx_rx_page_state *state;
dma_addr_t dma_addr;
unsigned index, count;
/* We can split a page between two buffers */
BUILD_BUG_ON(EFX_RX_BATCH & 1);
for (count = 0; count < EFX_RX_BATCH; ++count) {
page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
dma_addr = pci_map_page(efx->pci_dev, page, 0,
efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
__free_pages(page, efx->rx_buffer_order);
return -EIO;
}
page_addr = page_address(page);
state = page_addr;
state->refcnt = 0;
state->dma_addr = dma_addr;
page_addr += sizeof(struct efx_rx_page_state);
dma_addr += sizeof(struct efx_rx_page_state);
split:
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->u.page = page;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
rx_buf->is_page = true;
++rx_queue->added_count;
++rx_queue->alloc_page_count;
++state->refcnt;
if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
/* Use the second half of the page */
get_page(page);
dma_addr += (PAGE_SIZE >> 1);
page_addr += (PAGE_SIZE >> 1);
++count;
goto split;
}
}
return 0;
}
static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
if (rx_buf->is_page && rx_buf->u.page) {
struct efx_rx_page_state *state;
state = page_address(rx_buf->u.page);
if (--state->refcnt == 0) {
pci_unmap_page(efx->pci_dev,
state->dma_addr,
efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
}
} else if (!rx_buf->is_page && rx_buf->u.skb) {
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
rx_buf->len, PCI_DMA_FROMDEVICE);
}
}
static void efx_free_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
if (rx_buf->is_page && rx_buf->u.page) {
__free_pages(rx_buf->u.page, efx->rx_buffer_order);
rx_buf->u.page = NULL;
} else if (!rx_buf->is_page && rx_buf->u.skb) {
dev_kfree_skb_any(rx_buf->u.skb);
rx_buf->u.skb = NULL;
}
}
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
efx_free_rx_buffer(rx_queue->efx, rx_buf);
}
/* Attempt to resurrect the other receive buffer that used to share this page,
* which had previously been passed up to the kernel and freed. */
static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
struct efx_rx_page_state *state = page_address(rx_buf->u.page);
struct efx_rx_buffer *new_buf;
unsigned fill_level, index;
/* +1 because efx_rx_packet() incremented removed_count. +1 because
* we'd like to insert an additional descriptor whilst leaving
* EFX_RXD_HEAD_ROOM for the non-recycle path */
fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
if (unlikely(fill_level > rx_queue->max_fill)) {
/* We could place "state" on a list, and drain the list in
* efx_fast_push_rx_descriptors(). For now, this will do. */
return;
}
++state->refcnt;
get_page(rx_buf->u.page);
index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
new_buf->u.page = rx_buf->u.page;
new_buf->len = rx_buf->len;
new_buf->is_page = true;
++rx_queue->added_count;
}
/* Recycle the given rx buffer directly back into the rx_queue. There is
* always room to add this buffer, because we've just popped a buffer. */
static void efx_recycle_rx_buffer(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf)
{
struct efx_nic *efx = channel->efx;
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
struct efx_rx_buffer *new_buf;
unsigned index;
if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
page_count(rx_buf->u.page) == 1)
efx_resurrect_rx_buffer(rx_queue, rx_buf);
index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
memcpy(new_buf, rx_buf, sizeof(*new_buf));
rx_buf->u.page = NULL;
++rx_queue->added_count;
}
/**
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@fast_fill_limit. If there is insufficient atomic
* memory to do so, a slow fill will be scheduled.
*
* The caller must provide serialisation (none is used here). In practise,
* this means this function must run from the NAPI handler, or be called
* when NAPI is disabled.
*/
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
{
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
unsigned fill_level;
int space, rc = 0;
/* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
if (fill_level >= rx_queue->fast_fill_trigger)
goto out;
/* Record minimum fill level */
if (unlikely(fill_level < rx_queue->min_fill)) {
if (fill_level)
rx_queue->min_fill = fill_level;
}
space = rx_queue->fast_fill_limit - fill_level;
if (space < EFX_RX_BATCH)
goto out;
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filling descriptor ring from"
" level %d to level %d using %s allocation\n",
efx_rx_queue_index(rx_queue), fill_level,
rx_queue->fast_fill_limit,
channel->rx_alloc_push_pages ? "page" : "skb");
do {
if (channel->rx_alloc_push_pages)
rc = efx_init_rx_buffers_page(rx_queue);
else
rc = efx_init_rx_buffers_skb(rx_queue);
if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */
if (rx_queue->added_count == rx_queue->removed_count)
efx_schedule_slow_fill(rx_queue);
goto out;
}
} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filled descriptor ring "
"to level %d\n", efx_rx_queue_index(rx_queue),
rx_queue->added_count - rx_queue->removed_count);
out:
if (rx_queue->notified_count != rx_queue->added_count)
efx_nic_notify_rx_desc(rx_queue);
}
void efx_rx_slow_fill(unsigned long context)
{
struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
/* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(channel);
++rx_queue->slow_fill_count;
}
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
int len, bool *discard,
bool *leak_packet)
{
struct efx_nic *efx = rx_queue->efx;
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
if (likely(len <= max_len))
return;
/* The packet must be discarded, but this is only a fatal error
* if the caller indicated it was
*/
*discard = true;
if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
" RX queue %d seriously overlength "
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
efx_rx_queue_index(rx_queue), len, max_len,
efx->type->rx_buffer_padding);
/* If this buffer was skb-allocated, then the meta
* data at the end of the skb will be trashed. So
* we have no choice but to leak the fragment.
*/
*leak_packet = !rx_buf->is_page;
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
" RX queue %d overlength RX event "
"(0x%x > 0x%x)\n",
efx_rx_queue_index(rx_queue), len, max_len);
}
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
/* Pass a received packet up through the generic GRO stack
*
* Handles driverlink veto, and passes the fragment up via
* the appropriate GRO method
*/
static void efx_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
const u8 *eh, bool checksummed)
{
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
/* Pass the skb/page into the GRO engine */
if (rx_buf->is_page) {
struct efx_nic *efx = channel->efx;
struct page *page = rx_buf->u.page;
struct sk_buff *skb;
rx_buf->u.page = NULL;
skb = napi_get_frags(napi);
if (!skb) {
put_page(page);
return;
}
if (efx->net_dev->features & NETIF_F_RXHASH)
skb->rxhash = efx_rx_buf_hash(eh);
skb_shinfo(skb)->frags[0].page = page;
skb_shinfo(skb)->frags[0].page_offset =
efx_rx_buf_offset(efx, rx_buf);
skb_shinfo(skb)->frags[0].size = rx_buf->len;
skb_shinfo(skb)->nr_frags = 1;
skb->len = rx_buf->len;
skb->data_len = rx_buf->len;
skb->truesize += rx_buf->len;
skb->ip_summed =
checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
skb_record_rx_queue(skb, channel->channel);
gro_result = napi_gro_frags(napi);
} else {
struct sk_buff *skb = rx_buf->u.skb;
EFX_BUG_ON_PARANOID(!checksummed);
rx_buf->u.skb = NULL;
gro_result = napi_gro_receive(napi, skb);
}
if (gro_result == GRO_NORMAL) {
channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
} else if (gro_result != GRO_DROP) {
channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
channel->irq_mod_score += 2;
}
}
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_rx_buffer *rx_buf;
bool leak_packet = false;
rx_buf = efx_rx_buffer(rx_queue, index);
/* This allows the refill path to post another buffer.
* EFX_RXD_HEAD_ROOM ensures that the slot we are using
* isn't overwritten yet.
*/
rx_queue->removed_count++;
/* Validate the length encoded in the event vs the descriptor pushed */
efx_rx_packet__check_len(rx_queue, rx_buf, len,
&discard, &leak_packet);
netif_vdbg(efx, rx_status, efx->net_dev,
"RX queue %d received id %x at %llx+%x %s%s\n",
efx_rx_queue_index(rx_queue), index,
(unsigned long long)rx_buf->dma_addr, len,
(checksummed ? " [SUMMED]" : ""),
(discard ? " [DISCARD]" : ""));
/* Discard packet, if instructed to do so */
if (unlikely(discard)) {
if (unlikely(leak_packet))
channel->n_skbuff_leaks++;
else
efx_recycle_rx_buffer(channel, rx_buf);
/* Don't hold off the previous receive */
rx_buf = NULL;
goto out;
}
/* Release card resources - assumes all RX buffers consumed in-order
* per RX queue
*/
efx_unmap_rx_buffer(efx, rx_buf);
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
*/
prefetch(efx_rx_buf_eh(efx, rx_buf));
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.
*/
rx_buf->len = len - efx->type->rx_buffer_hash_size;
out:
if (channel->rx_pkt)
__efx_rx_packet(channel,
channel->rx_pkt, channel->rx_pkt_csummed);
channel->rx_pkt = rx_buf;
channel->rx_pkt_csummed = checksummed;
}
/* Handle a received packet. Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, bool checksummed)
{
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
u8 *eh = efx_rx_buf_eh(efx, rx_buf);
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
efx_loopback_rx_packet(efx, eh, rx_buf->len);
efx_free_rx_buffer(efx, rx_buf);
return;
}
if (!rx_buf->is_page) {
skb = rx_buf->u.skb;
prefetch(skb_shinfo(skb));
skb_reserve(skb, efx->type->rx_buffer_hash_size);
skb_put(skb, rx_buf->len);
if (efx->net_dev->features & NETIF_F_RXHASH)
skb->rxhash = efx_rx_buf_hash(eh);
/* Move past the ethernet header. rx_buf->data still points
* at the ethernet header */
skb->protocol = eth_type_trans(skb, efx->net_dev);
skb_record_rx_queue(skb, channel->channel);
}
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
checksummed = false;
if (likely(checksummed || rx_buf->is_page)) {
efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
return;
}
/* We now own the SKB */
skb = rx_buf->u.skb;
rx_buf->u.skb = NULL;
/* Set the SKB flags */
skb_checksum_none_assert(skb);
/* Pass the packet up */
netif_receive_skb(skb);
/* Update allocation strategy method */
channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
}
void efx_rx_strategy(struct efx_channel *channel)
{
enum efx_rx_alloc_method method = rx_alloc_method;
/* Only makes sense to use page based allocation if GRO is enabled */
if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
method = RX_ALLOC_METHOD_SKB;
} else if (method == RX_ALLOC_METHOD_AUTO) {
/* Constrain the rx_alloc_level */
if (channel->rx_alloc_level < 0)
channel->rx_alloc_level = 0;
else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
/* Decide on the allocation method */
method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
}
/* Push the option */
channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
}
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int entries;
int rc;
/* Create the smallest power-of-two aligned ring */
entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
rx_queue->ptr_mask = entries - 1;
netif_dbg(efx, probe, efx->net_dev,
"creating RX queue %d size %#x mask %#x\n",
efx_rx_queue_index(rx_queue), efx->rxq_entries,
rx_queue->ptr_mask);
/* Allocate RX buffers */
rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
GFP_KERNEL);
if (!rx_queue->buffer)
return -ENOMEM;
rc = efx_nic_probe_rx(rx_queue);
if (rc) {
kfree(rx_queue->buffer);
rx_queue->buffer = NULL;
}
return rc;
}
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int max_fill, trigger, limit;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
/* Initialise ptr fields */
rx_queue->added_count = 0;
rx_queue->notified_count = 0;
rx_queue->removed_count = 0;
rx_queue->min_fill = -1U;
/* Initialise limit fields */
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
limit = max_fill * min(rx_refill_limit, 100U) / 100U;
rx_queue->max_fill = max_fill;
rx_queue->fast_fill_trigger = trigger;
rx_queue->fast_fill_limit = limit;
/* Set up RX descriptor ring */
efx_nic_init_rx(rx_queue);
}
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
{
int i;
struct efx_rx_buffer *rx_buf;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
del_timer_sync(&rx_queue->slow_fill);
efx_nic_fini_rx(rx_queue);
/* Release RX buffers NB start at index 0 not current HW ptr */
if (rx_queue->buffer) {
for (i = 0; i <= rx_queue->ptr_mask; i++) {
rx_buf = efx_rx_buffer(rx_queue, i);
efx_fini_rx_buffer(rx_queue, rx_buf);
}
}
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
{
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
efx_nic_remove_rx(rx_queue);
kfree(rx_queue->buffer);
rx_queue->buffer = NULL;
}
module_param(rx_alloc_method, int, 0644);
MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring fast/slow fill threshold (%)");

View File

@@ -0,0 +1,761 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/kernel_stat.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <asm/io.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
/*
* Loopback test packet structure
*
* The self-test should stress every RSS vector, and unfortunately
* Falcon only performs RSS on TCP/UDP packets.
*/
struct efx_loopback_payload {
struct ethhdr header;
struct iphdr ip;
struct udphdr udp;
__be16 iteration;
const char msg[64];
} __packed;
/* Loopback test source MAC address */
static const unsigned char payload_source[ETH_ALEN] = {
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
};
static const char payload_msg[] =
"Hello world! This is an Efx loopback test in progress!";
/* Interrupt mode names */
static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
static const char *efx_interrupt_mode_names[] = {
[EFX_INT_MODE_MSIX] = "MSI-X",
[EFX_INT_MODE_MSI] = "MSI",
[EFX_INT_MODE_LEGACY] = "legacy",
};
#define INT_MODE(efx) \
STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
/**
* efx_loopback_state - persistent state during a loopback selftest
* @flush: Drop all packets in efx_loopback_rx_packet
* @packet_count: Number of packets being used in this test
* @skbs: An array of skbs transmitted
* @offload_csum: Checksums are being offloaded
* @rx_good: RX good packet count
* @rx_bad: RX bad packet count
* @payload: Payload used in tests
*/
struct efx_loopback_state {
bool flush;
int packet_count;
struct sk_buff **skbs;
bool offload_csum;
atomic_t rx_good;
atomic_t rx_bad;
struct efx_loopback_payload payload;
};
/**************************************************************************
*
* MII, NVRAM and register tests
*
**************************************************************************/
static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
if (efx->phy_op->test_alive) {
rc = efx->phy_op->test_alive(efx);
tests->phy_alive = rc ? -1 : 1;
}
return rc;
}
static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
if (efx->type->test_nvram) {
rc = efx->type->test_nvram(efx);
tests->nvram = rc ? -1 : 1;
}
return rc;
}
static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
/* Test register access */
if (efx->type->test_registers) {
rc = efx->type->test_registers(efx);
tests->registers = rc ? -1 : 1;
}
return rc;
}
/**************************************************************************
*
* Interrupt and event queue testing
*
**************************************************************************/
/* Test generation and receipt of interrupts */
static int efx_test_interrupts(struct efx_nic *efx,
struct efx_self_tests *tests)
{
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
/* Reset interrupt flag */
efx->last_irq_cpu = -1;
smp_wmb();
efx_nic_generate_interrupt(efx);
/* Wait for arrival of test interrupt. */
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
schedule_timeout_uninterruptible(HZ / 10);
if (efx->last_irq_cpu >= 0)
goto success;
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
return -ETIMEDOUT;
success:
netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
INT_MODE(efx),
efx->last_irq_cpu);
tests->interrupt = 1;
return 0;
}
/* Test generation and receipt of interrupting events */
static int efx_test_eventq_irq(struct efx_channel *channel,
struct efx_self_tests *tests)
{
struct efx_nic *efx = channel->efx;
unsigned int read_ptr, count;
tests->eventq_dma[channel->channel] = -1;
tests->eventq_int[channel->channel] = -1;
tests->eventq_poll[channel->channel] = -1;
read_ptr = channel->eventq_read_ptr;
channel->efx->last_irq_cpu = -1;
smp_wmb();
efx_nic_generate_test_event(channel);
/* Wait for arrival of interrupt */
count = 0;
do {
schedule_timeout_uninterruptible(HZ / 100);
if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
goto eventq_ok;
} while (++count < 2);
netif_err(efx, drv, efx->net_dev,
"channel %d timed out waiting for event queue\n",
channel->channel);
/* See if interrupt arrived */
if (channel->efx->last_irq_cpu >= 0) {
netif_err(efx, drv, efx->net_dev,
"channel %d saw interrupt on CPU%d "
"during event queue test\n", channel->channel,
raw_smp_processor_id());
tests->eventq_int[channel->channel] = 1;
}
/* Check to see if event was received even if interrupt wasn't */
if (efx_nic_event_present(channel)) {
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n", channel->channel);
tests->eventq_dma[channel->channel] = 1;
}
return -ETIMEDOUT;
eventq_ok:
netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n",
channel->channel);
tests->eventq_dma[channel->channel] = 1;
tests->eventq_int[channel->channel] = 1;
tests->eventq_poll[channel->channel] = 1;
return 0;
}
static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
unsigned flags)
{
int rc;
if (!efx->phy_op->run_tests)
return 0;
mutex_lock(&efx->mac_lock);
rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
return rc;
}
/**************************************************************************
*
* Loopback testing
* NB Only one loopback test can be executing concurrently.
*
**************************************************************************/
/* Loopback test RX callback
* This is called for each received packet during loopback testing.
*/
void efx_loopback_rx_packet(struct efx_nic *efx,
const char *buf_ptr, int pkt_len)
{
struct efx_loopback_state *state = efx->loopback_selftest;
struct efx_loopback_payload *received;
struct efx_loopback_payload *payload;
BUG_ON(!buf_ptr);
/* If we are just flushing, then drop the packet */
if ((state == NULL) || state->flush)
return;
payload = &state->payload;
received = (struct efx_loopback_payload *) buf_ptr;
received->ip.saddr = payload->ip.saddr;
if (state->offload_csum)
received->ip.check = payload->ip.check;
/* Check that header exists */
if (pkt_len < sizeof(received->header)) {
netif_err(efx, drv, efx->net_dev,
"saw runt RX packet (length %d) in %s loopback "
"test\n", pkt_len, LOOPBACK_MODE(efx));
goto err;
}
/* Check that the ethernet header exists */
if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
netif_err(efx, drv, efx->net_dev,
"saw non-loopback RX packet in %s loopback test\n",
LOOPBACK_MODE(efx));
goto err;
}
/* Check packet length */
if (pkt_len != sizeof(*payload)) {
netif_err(efx, drv, efx->net_dev,
"saw incorrect RX packet length %d (wanted %d) in "
"%s loopback test\n", pkt_len, (int)sizeof(*payload),
LOOPBACK_MODE(efx));
goto err;
}
/* Check that IP header matches */
if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
netif_err(efx, drv, efx->net_dev,
"saw corrupted IP header in %s loopback test\n",
LOOPBACK_MODE(efx));
goto err;
}
/* Check that msg and padding matches */
if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
netif_err(efx, drv, efx->net_dev,
"saw corrupted RX packet in %s loopback test\n",
LOOPBACK_MODE(efx));
goto err;
}
/* Check that iteration matches */
if (received->iteration != payload->iteration) {
netif_err(efx, drv, efx->net_dev,
"saw RX packet from iteration %d (wanted %d) in "
"%s loopback test\n", ntohs(received->iteration),
ntohs(payload->iteration), LOOPBACK_MODE(efx));
goto err;
}
/* Increase correct RX count */
netif_vdbg(efx, drv, efx->net_dev,
"got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
atomic_inc(&state->rx_good);
return;
err:
#ifdef EFX_ENABLE_DEBUG
if (atomic_read(&state->rx_bad) == 0) {
netif_err(efx, drv, efx->net_dev, "received packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
buf_ptr, pkt_len, 0);
netif_err(efx, drv, efx->net_dev, "expected packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
&state->payload, sizeof(state->payload), 0);
}
#endif
atomic_inc(&state->rx_bad);
}
/* Initialise an efx_selftest_state for a new iteration */
static void efx_iterate_state(struct efx_nic *efx)
{
struct efx_loopback_state *state = efx->loopback_selftest;
struct net_device *net_dev = efx->net_dev;
struct efx_loopback_payload *payload = &state->payload;
/* Initialise the layerII header */
memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
payload->header.h_proto = htons(ETH_P_IP);
/* saddr set later and used as incrementing count */
payload->ip.daddr = htonl(INADDR_LOOPBACK);
payload->ip.ihl = 5;
payload->ip.check = htons(0xdead);
payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
payload->ip.version = IPVERSION;
payload->ip.protocol = IPPROTO_UDP;
/* Initialise udp header */
payload->udp.source = 0;
payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
sizeof(struct iphdr));
payload->udp.check = 0; /* checksum ignored */
/* Fill out payload */
payload->iteration = htons(ntohs(payload->iteration) + 1);
memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
/* Fill out remaining state members */
atomic_set(&state->rx_good, 0);
atomic_set(&state->rx_bad, 0);
smp_wmb();
}
static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
struct efx_loopback_state *state = efx->loopback_selftest;
struct efx_loopback_payload *payload;
struct sk_buff *skb;
int i;
netdev_tx_t rc;
/* Transmit N copies of buffer */
for (i = 0; i < state->packet_count; i++) {
/* Allocate an skb, holding an extra reference for
* transmit completion counting */
skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
if (!skb)
return -ENOMEM;
state->skbs[i] = skb;
skb_get(skb);
/* Copy the payload in, incrementing the source address to
* exercise the rss vectors */
payload = ((struct efx_loopback_payload *)
skb_put(skb, sizeof(state->payload)));
memcpy(payload, &state->payload, sizeof(state->payload));
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
/* Ensure everything we've written is visible to the
* interrupt handler. */
smp_wmb();
if (efx_dev_registered(efx))
netif_tx_lock_bh(efx->net_dev);
rc = efx_enqueue_skb(tx_queue, skb);
if (efx_dev_registered(efx))
netif_tx_unlock_bh(efx->net_dev);
if (rc != NETDEV_TX_OK) {
netif_err(efx, drv, efx->net_dev,
"TX queue %d could not transmit packet %d of "
"%d in %s loopback test\n", tx_queue->queue,
i + 1, state->packet_count,
LOOPBACK_MODE(efx));
/* Defer cleaning up the other skbs for the caller */
kfree_skb(skb);
return -EPIPE;
}
}
return 0;
}
static int efx_poll_loopback(struct efx_nic *efx)
{
struct efx_loopback_state *state = efx->loopback_selftest;
struct efx_channel *channel;
/* NAPI polling is not enabled, so process channels
* synchronously */
efx_for_each_channel(channel, efx) {
if (channel->work_pending)
efx_process_channel_now(channel);
}
return atomic_read(&state->rx_good) == state->packet_count;
}
static int efx_end_loopback(struct efx_tx_queue *tx_queue,
struct efx_loopback_self_tests *lb_tests)
{
struct efx_nic *efx = tx_queue->efx;
struct efx_loopback_state *state = efx->loopback_selftest;
struct sk_buff *skb;
int tx_done = 0, rx_good, rx_bad;
int i, rc = 0;
if (efx_dev_registered(efx))
netif_tx_lock_bh(efx->net_dev);
/* Count the number of tx completions, and decrement the refcnt. Any
* skbs not already completed will be free'd when the queue is flushed */
for (i=0; i < state->packet_count; i++) {
skb = state->skbs[i];
if (skb && !skb_shared(skb))
++tx_done;
dev_kfree_skb_any(skb);
}
if (efx_dev_registered(efx))
netif_tx_unlock_bh(efx->net_dev);
/* Check TX completion and received packet counts */
rx_good = atomic_read(&state->rx_good);
rx_bad = atomic_read(&state->rx_bad);
if (tx_done != state->packet_count) {
/* Don't free the skbs; they will be picked up on TX
* overflow or channel teardown.
*/
netif_err(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d "
"TX completion events in %s loopback test\n",
tx_queue->queue, tx_done, state->packet_count,
LOOPBACK_MODE(efx));
rc = -ETIMEDOUT;
/* Allow to fall through so we see the RX errors as well */
}
/* We may always be up to a flush away from our desired packet total */
if (rx_good != state->packet_count) {
netif_dbg(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d "
"received packets in %s loopback test\n",
tx_queue->queue, rx_good, state->packet_count,
LOOPBACK_MODE(efx));
rc = -ETIMEDOUT;
/* Fall through */
}
/* Update loopback test structure */
lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
lb_tests->tx_done[tx_queue->queue] += tx_done;
lb_tests->rx_good += rx_good;
lb_tests->rx_bad += rx_bad;
return rc;
}
static int
efx_test_loopback(struct efx_tx_queue *tx_queue,
struct efx_loopback_self_tests *lb_tests)
{
struct efx_nic *efx = tx_queue->efx;
struct efx_loopback_state *state = efx->loopback_selftest;
int i, begin_rc, end_rc;
for (i = 0; i < 3; i++) {
/* Determine how many packets to send */
state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
state->skbs = kzalloc(sizeof(state->skbs[0]) *
state->packet_count, GFP_KERNEL);
if (!state->skbs)
return -ENOMEM;
state->flush = false;
netif_dbg(efx, drv, efx->net_dev,
"TX queue %d testing %s loopback with %d packets\n",
tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count);
efx_iterate_state(efx);
begin_rc = efx_begin_loopback(tx_queue);
/* This will normally complete very quickly, but be
* prepared to wait up to 100 ms. */
msleep(1);
if (!efx_poll_loopback(efx)) {
msleep(100);
efx_poll_loopback(efx);
}
end_rc = efx_end_loopback(tx_queue, lb_tests);
kfree(state->skbs);
if (begin_rc || end_rc) {
/* Wait a while to ensure there are no packets
* floating around after a failure. */
schedule_timeout_uninterruptible(HZ / 10);
return begin_rc ? begin_rc : end_rc;
}
}
netif_dbg(efx, drv, efx->net_dev,
"TX queue %d passed %s loopback test with a burst length "
"of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count);
return 0;
}
/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
* any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
* to delay and retry. Therefore, it's safer to just poll directly. Wait
* for link up and any faults to dissipate. */
static int efx_wait_for_link(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
int count, link_up_count = 0;
bool link_up;
for (count = 0; count < 40; count++) {
schedule_timeout_uninterruptible(HZ / 10);
if (efx->type->monitor != NULL) {
mutex_lock(&efx->mac_lock);
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
} else {
struct efx_channel *channel = efx_get_channel(efx, 0);
if (channel->work_pending)
efx_process_channel_now(channel);
}
mutex_lock(&efx->mac_lock);
link_up = link_state->up;
if (link_up)
link_up = !efx->mac_op->check_fault(efx);
mutex_unlock(&efx->mac_lock);
if (link_up) {
if (++link_up_count == 2)
return 0;
} else {
link_up_count = 0;
}
}
return -ETIMEDOUT;
}
static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
unsigned int loopback_modes)
{
enum efx_loopback_mode mode;
struct efx_loopback_state *state;
struct efx_channel *channel = efx_get_channel(efx, 0);
struct efx_tx_queue *tx_queue;
int rc = 0;
/* Set the port loopback_selftest member. From this point on
* all received packets will be dropped. Mark the state as
* "flushing" so all inflight packets are dropped */
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
BUG_ON(efx->loopback_selftest);
state->flush = true;
efx->loopback_selftest = state;
/* Test all supported loopback modes */
for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
if (!(loopback_modes & (1 << mode)))
continue;
/* Move the port into the specified loopback mode. */
state->flush = true;
mutex_lock(&efx->mac_lock);
efx->loopback_mode = mode;
rc = __efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"unable to move into %s loopback\n",
LOOPBACK_MODE(efx));
goto out;
}
rc = efx_wait_for_link(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"loopback %s never came up\n",
LOOPBACK_MODE(efx));
goto out;
}
/* Test all enabled types of TX queue */
efx_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue &
EFX_TXQ_TYPE_OFFLOAD);
rc = efx_test_loopback(tx_queue,
&tests->loopback[mode]);
if (rc)
goto out;
}
}
out:
/* Remove the flush. The caller will remove the loopback setting */
state->flush = true;
efx->loopback_selftest = NULL;
wmb();
kfree(state);
return rc;
}
/**************************************************************************
*
* Entry point
*
*************************************************************************/
int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
unsigned flags)
{
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
enum reset_type reset_method = RESET_TYPE_INVISIBLE;
struct efx_channel *channel;
int rc_test = 0, rc_reset = 0, rc;
/* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */
rc = efx_test_phy_alive(efx, tests);
if (rc && !rc_test)
rc_test = rc;
rc = efx_test_nvram(efx, tests);
if (rc && !rc_test)
rc_test = rc;
rc = efx_test_interrupts(efx, tests);
if (rc && !rc_test)
rc_test = rc;
efx_for_each_channel(channel, efx) {
rc = efx_test_eventq_irq(channel, tests);
if (rc && !rc_test)
rc_test = rc;
}
if (rc_test)
return rc_test;
if (!(flags & ETH_TEST_FL_OFFLINE))
return efx_test_phy(efx, tests, flags);
/* Offline (i.e. disruptive) testing
* This checks MAC and PHY loopback on the specified port. */
/* Detach the device so the kernel doesn't transmit during the
* loopback test and the watchdog timeout doesn't fire.
*/
netif_device_detach(efx->net_dev);
mutex_lock(&efx->mac_lock);
if (efx->loopback_modes) {
/* We need the 312 clock from the PHY to test the XMAC
* registers, so move into XGMII loopback if available */
if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
efx->loopback_mode = LOOPBACK_XGMII;
else
efx->loopback_mode = __ffs(efx->loopback_modes);
}
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
/* free up all consumers of SRAM (including all the queues) */
efx_reset_down(efx, reset_method);
rc = efx_test_chip(efx, tests);
if (rc && !rc_test)
rc_test = rc;
/* reset the chip to recover from the register test */
rc_reset = efx->type->reset(efx, reset_method);
/* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE;
rc = efx_reset_up(efx, reset_method, rc_reset == 0);
if (rc && !rc_reset)
rc_reset = rc;
if (rc_reset) {
netif_err(efx, drv, efx->net_dev,
"Unable to recover from chip test\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
return rc_reset;
}
rc = efx_test_phy(efx, tests, flags);
if (rc && !rc_test)
rc_test = rc;
rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
if (rc && !rc_test)
rc_test = rc;
/* restore the PHY to the previous state */
mutex_lock(&efx->mac_lock);
efx->phy_mode = phy_mode;
efx->loopback_mode = loopback_mode;
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
netif_device_attach(efx->net_dev);
return rc_test;
}

View File

@@ -0,0 +1,53 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_SELFTEST_H
#define EFX_SELFTEST_H
#include "net_driver.h"
/*
* Self tests
*/
struct efx_loopback_self_tests {
int tx_sent[EFX_TXQ_TYPES];
int tx_done[EFX_TXQ_TYPES];
int rx_good;
int rx_bad;
};
#define EFX_MAX_PHY_TESTS 20
/* Efx self test results
* For fields which are not counters, 1 indicates success and -1
* indicates failure.
*/
struct efx_self_tests {
/* online tests */
int phy_alive;
int nvram;
int interrupt;
int eventq_dma[EFX_MAX_CHANNELS];
int eventq_int[EFX_MAX_CHANNELS];
int eventq_poll[EFX_MAX_CHANNELS];
/* offline tests */
int registers;
int phy_ext[EFX_MAX_PHY_TESTS];
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
};
extern void efx_loopback_rx_packet(struct efx_nic *efx,
const char *buf_ptr, int pkt_len);
extern int efx_selftest(struct efx_nic *efx,
struct efx_self_tests *tests,
unsigned flags);
#endif /* EFX_SELFTEST_H */

View File

@@ -0,0 +1,676 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/random.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
#include "mac.h"
#include "spi.h"
#include "regs.h"
#include "io.h"
#include "phy.h"
#include "workarounds.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
static void siena_init_wol(struct efx_nic *efx);
static void siena_push_irq_moderation(struct efx_channel *channel)
{
efx_dword_t timer_cmd;
if (channel->irq_moderation)
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE,
FFE_CZ_TIMER_MODE_INT_HLDOFF,
FRF_CZ_TC_TIMER_VAL,
channel->irq_moderation - 1);
else
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE,
FFE_CZ_TIMER_MODE_DIS,
FRF_CZ_TC_TIMER_VAL, 0);
efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
channel->channel);
}
static void siena_push_multicast_hash(struct efx_nic *efx)
{
WARN_ON(!mutex_is_locked(&efx->mac_lock));
efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
efx->multicast_hash.byte, sizeof(efx->multicast_hash),
NULL, 0, NULL);
}
static int siena_mdio_write(struct net_device *net_dev,
int prtad, int devad, u16 addr, u16 value)
{
struct efx_nic *efx = netdev_priv(net_dev);
uint32_t status;
int rc;
rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
addr, value, &status);
if (rc)
return rc;
if (status != MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return 0;
}
static int siena_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
{
struct efx_nic *efx = netdev_priv(net_dev);
uint16_t value;
uint32_t status;
int rc;
rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
addr, &value, &status);
if (rc)
return rc;
if (status != MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return (int)value;
}
/* This call is responsible for hooking in the MAC and PHY operations */
static int siena_probe_port(struct efx_nic *efx)
{
int rc;
/* Hook in PHY operations table */
efx->phy_op = &efx_mcdi_phy_ops;
/* Set up MDIO structure for PHY */
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->mdio.mdio_read = siena_mdio_read;
efx->mdio.mdio_write = siena_mdio_write;
/* Fill out MDIO structure, loopback modes, and initial link state */
rc = efx->phy_op->probe(efx);
if (rc != 0)
return rc;
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
MC_CMD_MAC_NSTATS * sizeof(u64));
if (rc)
return rc;
netif_dbg(efx, probe, efx->net_dev,
"stats buffer at %llx (virt %p phys %llx)\n",
(u64)efx->stats_buffer.dma_addr,
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
return 0;
}
static void siena_remove_port(struct efx_nic *efx)
{
efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
static const struct efx_nic_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_CZ_USR_EV_CFG,
EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_RX_CFG,
EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) },
{ FR_AZ_TX_CFG,
EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) },
{ FR_AZ_TX_RESERVED,
EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
{ FR_AZ_SRM_TX_DC_CFG,
EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_RX_DC_CFG,
EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_RX_DC_PF_WM,
EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_BZ_DP_CTRL,
EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_BZ_RX_RSS_TKEY,
EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
{ FR_CZ_RX_RSS_IPV6_REG1,
EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
{ FR_CZ_RX_RSS_IPV6_REG2,
EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
{ FR_CZ_RX_RSS_IPV6_REG3,
EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
};
static int siena_test_registers(struct efx_nic *efx)
{
return efx_nic_test_registers(efx, siena_register_tests,
ARRAY_SIZE(siena_register_tests));
}
/**************************************************************************
*
* Device reset
*
**************************************************************************
*/
static enum reset_type siena_map_reset_reason(enum reset_type reason)
{
return RESET_TYPE_ALL;
}
static int siena_map_reset_flags(u32 *flags)
{
enum {
SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER |
ETH_RESET_OFFLOAD | ETH_RESET_MAC |
ETH_RESET_PHY),
SIENA_RESET_MC = (SIENA_RESET_PORT |
ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT),
};
if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) {
*flags &= ~SIENA_RESET_MC;
return RESET_TYPE_WORLD;
}
if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) {
*flags &= ~SIENA_RESET_PORT;
return RESET_TYPE_ALL;
}
/* no invisible reset implemented */
return -EINVAL;
}
static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
{
int rc;
/* Recover from a failed assertion pre-reset */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
return rc;
if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
else
return efx_mcdi_reset_port(efx);
}
static int siena_probe_nvconfig(struct efx_nic *efx)
{
return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL);
}
static int siena_probe_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
bool already_attached = 0;
efx_oword_t reg;
int rc;
/* Allocate storage for hardware specific data */
nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
if (!nic_data)
return -ENOMEM;
efx->nic_data = nic_data;
if (efx_nic_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev,
"Siena FPGA not supported\n");
rc = -ENODEV;
goto fail1;
}
efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
/* Initialise MCDI */
nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
FR_CZ_MC_TREG_SMEM,
FR_CZ_MC_TREG_SMEM_STEP *
FR_CZ_MC_TREG_SMEM_ROWS);
if (!nic_data->mcdi_smem) {
netif_err(efx, probe, efx->net_dev,
"could not map MCDI at %llx+%x\n",
(unsigned long long)efx->membase_phys +
FR_CZ_MC_TREG_SMEM,
FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
rc = -ENOMEM;
goto fail1;
}
efx_mcdi_init(efx);
/* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
goto fail2;
/* Let the BMC know that the driver is now in charge of link and
* filter settings. We must do this before we reset the NIC */
rc = efx_mcdi_drv_attach(efx, true, &already_attached);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"Unable to register driver with MCPU\n");
goto fail2;
}
if (already_attached)
/* Not a fatal error */
netif_err(efx, probe, efx->net_dev,
"Host already registered with MCPU\n");
/* Now we can reset the NIC */
rc = siena_reset_hw(efx, RESET_TYPE_ALL);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
}
siena_init_wol(efx);
/* Allocate memory for INT_KER */
rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
if (rc)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
netif_dbg(efx, probe, efx->net_dev,
"INT_KER at %llx (virt %p phys %llx)\n",
(unsigned long long)efx->irq_status.dma_addr,
efx->irq_status.addr,
(unsigned long long)virt_to_phys(efx->irq_status.addr));
/* Read in the non-volatile configuration */
rc = siena_probe_nvconfig(efx);
if (rc == -EINVAL) {
netif_err(efx, probe, efx->net_dev,
"NVRAM is invalid therefore using defaults\n");
efx->phy_type = PHY_TYPE_NONE;
efx->mdio.prtad = MDIO_PRTAD_NONE;
} else if (rc) {
goto fail5;
}
return 0;
fail5:
efx_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
efx_mcdi_drv_attach(efx, false, NULL);
fail2:
iounmap(nic_data->mcdi_smem);
fail1:
kfree(efx->nic_data);
return rc;
}
/* This call performs hardware-specific global initialisation, such as
* defining the descriptor cache sizes and number of RSS channels.
* It does not set up any buffers, descriptor rings or event queues.
*/
static int siena_init_nic(struct efx_nic *efx)
{
efx_oword_t temp;
int rc;
/* Recover from a failed assertion post-reset */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
return rc;
/* Squash TX of packets of 16 bytes or less */
efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
* descriptors (which is bad).
*/
efx_reado(efx, &temp, FR_AZ_TX_CFG);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1);
efx_writeo(efx, &temp, FR_AZ_TX_CFG);
efx_reado(efx, &temp, FR_AZ_RX_CFG);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
/* Enable hash insertion. This is broken for the 'Falcon' hash
* if IPv6 hashing is also enabled, so also select Toeplitz
* TCP/IPv4 and IPv4 hashes. */
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
/* Set hash key for IPv4 */
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
/* Enable IPv6 RSS */
BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
if (rc)
return rc;
/* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
efx_nic_init_common(efx);
return 0;
}
static void siena_remove_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data = efx->nic_data;
efx_nic_free_buffer(efx, &efx->irq_status);
siena_reset_hw(efx, RESET_TYPE_ALL);
/* Relinquish the device back to the BMC */
if (efx_nic_has_mc(efx))
efx_mcdi_drv_attach(efx, false, NULL);
/* Tear down the private nic state */
iounmap(nic_data->mcdi_smem);
kfree(nic_data);
efx->nic_data = NULL;
}
#define STATS_GENERATION_INVALID ((__force __le64)(-1))
static int siena_try_update_nic_stats(struct efx_nic *efx)
{
__le64 *dma_stats;
struct efx_mac_stats *mac_stats;
__le64 generation_start, generation_end;
mac_stats = &efx->mac_stats;
dma_stats = efx->stats_buffer.addr;
generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
if (generation_end == STATS_GENERATION_INVALID)
return 0;
rmb();
#define MAC_STAT(M, D) \
mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
MAC_STAT(tx_bytes, TX_BYTES);
MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
mac_stats->tx_good_bytes = (mac_stats->tx_bytes -
mac_stats->tx_bad_bytes);
MAC_STAT(tx_packets, TX_PKTS);
MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
MAC_STAT(tx_pause, TX_PAUSE_PKTS);
MAC_STAT(tx_control, TX_CONTROL_PKTS);
MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
MAC_STAT(tx_lt64, TX_LT64_PKTS);
MAC_STAT(tx_64, TX_64_PKTS);
MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
mac_stats->tx_collision = 0;
MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
mac_stats->tx_collision = (mac_stats->tx_single_collision +
mac_stats->tx_multiple_collision +
mac_stats->tx_excessive_collision +
mac_stats->tx_late_collision);
MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
MAC_STAT(rx_bytes, RX_BYTES);
MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
mac_stats->rx_good_bytes = (mac_stats->rx_bytes -
mac_stats->rx_bad_bytes);
MAC_STAT(rx_packets, RX_PKTS);
MAC_STAT(rx_good, RX_GOOD_PKTS);
MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
MAC_STAT(rx_pause, RX_PAUSE_PKTS);
MAC_STAT(rx_control, RX_CONTROL_PKTS);
MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
MAC_STAT(rx_64, RX_64_PKTS);
MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
mac_stats->rx_bad_lt64 = 0;
mac_stats->rx_bad_64_to_15xx = 0;
mac_stats->rx_bad_15xx_to_jumbo = 0;
MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
mac_stats->rx_missed = 0;
MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
mac_stats->rx_good_lt64 = 0;
efx->n_rx_nodesc_drop_cnt =
le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
#undef MAC_STAT
rmb();
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
if (generation_end != generation_start)
return -EAGAIN;
return 0;
}
static void siena_update_nic_stats(struct efx_nic *efx)
{
int retry;
/* If we're unlucky enough to read statistics wduring the DMA, wait
* up to 10ms for it to finish (typically takes <500us) */
for (retry = 0; retry < 100; ++retry) {
if (siena_try_update_nic_stats(efx) == 0)
return;
udelay(100);
}
/* Use the old values instead */
}
static void siena_start_nic_stats(struct efx_nic *efx)
{
__le64 *dma_stats = efx->stats_buffer.addr;
dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
}
static void siena_stop_nic_stats(struct efx_nic *efx)
{
efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
}
/**************************************************************************
*
* Wake on LAN
*
**************************************************************************
*/
static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
{
struct siena_nic_data *nic_data = efx->nic_data;
wol->supported = WAKE_MAGIC;
if (nic_data->wol_filter_id != -1)
wol->wolopts = WAKE_MAGIC;
else
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
static int siena_set_wol(struct efx_nic *efx, u32 type)
{
struct siena_nic_data *nic_data = efx->nic_data;
int rc;
if (type & ~WAKE_MAGIC)
return -EINVAL;
if (type & WAKE_MAGIC) {
if (nic_data->wol_filter_id != -1)
efx_mcdi_wol_filter_remove(efx,
nic_data->wol_filter_id);
rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
&nic_data->wol_filter_id);
if (rc)
goto fail;
pci_wake_from_d3(efx->pci_dev, true);
} else {
rc = efx_mcdi_wol_filter_reset(efx);
nic_data->wol_filter_id = -1;
pci_wake_from_d3(efx->pci_dev, false);
if (rc)
goto fail;
}
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n",
__func__, type, rc);
return rc;
}
static void siena_init_wol(struct efx_nic *efx)
{
struct siena_nic_data *nic_data = efx->nic_data;
int rc;
rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
if (rc != 0) {
/* If it failed, attempt to get into a synchronised
* state with MC by resetting any set WoL filters */
efx_mcdi_wol_filter_reset(efx);
nic_data->wol_filter_id = -1;
} else if (nic_data->wol_filter_id != -1) {
pci_wake_from_d3(efx->pci_dev, true);
}
}
/**************************************************************************
*
* Revision-dependent attributes used by efx.c and nic.c
*
**************************************************************************
*/
const struct efx_nic_type siena_a0_nic_type = {
.probe = siena_probe_nic,
.remove = siena_remove_nic,
.init = siena_init_nic,
.fini = efx_port_dummy_op_void,
.monitor = NULL,
.map_reset_reason = siena_map_reset_reason,
.map_reset_flags = siena_map_reset_flags,
.reset = siena_reset_hw,
.probe_port = siena_probe_port,
.remove_port = siena_remove_port,
.prepare_flush = efx_port_dummy_op_void,
.update_stats = siena_update_nic_stats,
.start_stats = siena_start_nic_stats,
.stop_stats = siena_stop_nic_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
.push_multicast_hash = siena_push_multicast_hash,
.reconfigure_port = efx_mcdi_phy_reconfigure,
.get_wol = siena_get_wol,
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
.test_registers = siena_test_registers,
.test_nvram = efx_mcdi_nvram_test_all,
.default_mac_ops = &efx_mcdi_mac_operations,
.revision = EFX_REV_SIENA_A0,
.mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
* channels */
.tx_dc_base = 0x88000,
.rx_dc_base = 0x68000,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE),
};

View File

@@ -0,0 +1,99 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005 Fen Systems Ltd.
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_SPI_H
#define EFX_SPI_H
#include "net_driver.h"
/**************************************************************************
*
* Basic SPI command set and bit definitions
*
*************************************************************************/
#define SPI_WRSR 0x01 /* Write status register */
#define SPI_WRITE 0x02 /* Write data to memory array */
#define SPI_READ 0x03 /* Read data from memory array */
#define SPI_WRDI 0x04 /* Reset write enable latch */
#define SPI_RDSR 0x05 /* Read status register */
#define SPI_WREN 0x06 /* Set write enable latch */
#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
/**
* struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
* @device_id: Controller's id for the device
* @size: Size (in bytes)
* @addr_len: Number of address bytes in read/write commands
* @munge_address: Flag whether addresses should be munged.
* Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
* use bit 3 of the command byte as address bit A8, rather
* than having a two-byte address. If this flag is set, then
* commands should be munged in this way.
* @erase_command: Erase command (or 0 if sector erase not needed).
* @erase_size: Erase sector size (in bytes)
* Erase commands affect sectors with this size and alignment.
* This must be a power of two.
* @block_size: Write block size (in bytes).
* Write commands are limited to blocks with this size and alignment.
*/
struct efx_spi_device {
int device_id;
unsigned int size;
unsigned int addr_len;
unsigned int munge_address:1;
u8 erase_command;
unsigned int erase_size;
unsigned int block_size;
};
static inline bool efx_spi_present(const struct efx_spi_device *spi)
{
return spi->size != 0;
}
int falcon_spi_cmd(struct efx_nic *efx,
const struct efx_spi_device *spi, unsigned int command,
int address, const void* in, void *out, size_t len);
int falcon_spi_wait_write(struct efx_nic *efx,
const struct efx_spi_device *spi);
int falcon_spi_read(struct efx_nic *efx,
const struct efx_spi_device *spi, loff_t start,
size_t len, size_t *retlen, u8 *buffer);
int falcon_spi_write(struct efx_nic *efx,
const struct efx_spi_device *spi, loff_t start,
size_t len, size_t *retlen, const u8 *buffer);
/*
* SFC4000 flash is partitioned into:
* 0-0x400 chip and board config (see falcon_hwdefs.h)
* 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
* 0x8000-end boot code (mapped to PCI expansion ROM)
* SFC4000 small EEPROM (size < 0x400) is used for VPD only.
* SFC4000 large EEPROM (size >= 0x400) is partitioned into:
* 0-0x400 chip and board config
* configurable VPD
* 0x800-0x1800 boot config
* Aside from the chip and board config, all of these are optional and may
* be absent or truncated depending on the devices used.
*/
#define FALCON_NVCONFIG_END 0x400U
#define FALCON_FLASH_BOOTCODE_START 0x8000U
#define EFX_EEPROM_BOOTCONFIG_START 0x800U
#define EFX_EEPROM_BOOTCONFIG_END 0x1800U
#endif /* EFX_SPI_H */

View File

@@ -0,0 +1,494 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2007-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/delay.h>
#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "efx.h"
#include "mdio_10g.h"
#include "nic.h"
#include "phy.h"
#include "workarounds.h"
/* We expect these MMDs to be in the package. */
#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
MDIO_DEVS_PCS | \
MDIO_DEVS_PHYXS | \
MDIO_DEVS_AN)
#define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
(1 << LOOPBACK_PCS) | \
(1 << LOOPBACK_PMAPMD) | \
(1 << LOOPBACK_PHYXS_WS))
/* We complain if we fail to see the link partner as 10G capable this many
* times in a row (must be > 1 as sampling the autoneg. registers is racy)
*/
#define MAX_BAD_LP_TRIES (5)
/* Extended control register */
#define PMA_PMD_XCONTROL_REG 49152
#define PMA_PMD_EXT_GMII_EN_LBN 1
#define PMA_PMD_EXT_GMII_EN_WIDTH 1
#define PMA_PMD_EXT_CLK_OUT_LBN 2
#define PMA_PMD_EXT_CLK_OUT_WIDTH 1
#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
#define PMA_PMD_EXT_CLK312_WIDTH 1
#define PMA_PMD_EXT_LPOWER_LBN 12
#define PMA_PMD_EXT_LPOWER_WIDTH 1
#define PMA_PMD_EXT_ROBUST_LBN 14
#define PMA_PMD_EXT_ROBUST_WIDTH 1
#define PMA_PMD_EXT_SSR_LBN 15
#define PMA_PMD_EXT_SSR_WIDTH 1
/* extended status register */
#define PMA_PMD_XSTATUS_REG 49153
#define PMA_PMD_XSTAT_MDIX_LBN 14
#define PMA_PMD_XSTAT_FLP_LBN (12)
/* LED control register */
#define PMA_PMD_LED_CTRL_REG 49159
#define PMA_PMA_LED_ACTIVITY_LBN (3)
/* LED function override register */
#define PMA_PMD_LED_OVERR_REG 49161
/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/
#define PMA_PMD_LED_LINK_LBN (0)
#define PMA_PMD_LED_SPEED_LBN (2)
#define PMA_PMD_LED_TX_LBN (4)
#define PMA_PMD_LED_RX_LBN (6)
/* Override settings */
#define PMA_PMD_LED_AUTO (0) /* H/W control */
#define PMA_PMD_LED_ON (1)
#define PMA_PMD_LED_OFF (2)
#define PMA_PMD_LED_FLASH (3)
#define PMA_PMD_LED_MASK 3
/* All LEDs under hardware control */
/* Green and Amber under hardware control, Red off */
#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
#define PMA_PMD_SPEED_ENABLE_REG 49192
#define PMA_PMD_100TX_ADV_LBN 1
#define PMA_PMD_100TX_ADV_WIDTH 1
#define PMA_PMD_1000T_ADV_LBN 2
#define PMA_PMD_1000T_ADV_WIDTH 1
#define PMA_PMD_10000T_ADV_LBN 3
#define PMA_PMD_10000T_ADV_WIDTH 1
#define PMA_PMD_SPEED_LBN 4
#define PMA_PMD_SPEED_WIDTH 4
/* Misc register defines */
#define PCS_CLOCK_CTRL_REG 55297
#define PLL312_RST_N_LBN 2
#define PCS_SOFT_RST2_REG 55302
#define SERDES_RST_N_LBN 13
#define XGXS_RST_N_LBN 12
#define PCS_TEST_SELECT_REG 55303 /* PRM 10.5.8 */
#define CLK312_EN_LBN 3
/* PHYXS registers */
#define PHYXS_XCONTROL_REG 49152
#define PHYXS_RESET_LBN 15
#define PHYXS_RESET_WIDTH 1
#define PHYXS_TEST1 (49162)
#define LOOPBACK_NEAR_LBN (8)
#define LOOPBACK_NEAR_WIDTH (1)
/* Boot status register */
#define PCS_BOOT_STATUS_REG 53248
#define PCS_BOOT_FATAL_ERROR_LBN 0
#define PCS_BOOT_PROGRESS_LBN 1
#define PCS_BOOT_PROGRESS_WIDTH 2
#define PCS_BOOT_PROGRESS_INIT 0
#define PCS_BOOT_PROGRESS_WAIT_MDIO 1
#define PCS_BOOT_PROGRESS_CHECKSUM 2
#define PCS_BOOT_PROGRESS_JUMP 3
#define PCS_BOOT_DOWNLOAD_WAIT_LBN 3
#define PCS_BOOT_CODE_STARTED_LBN 4
/* 100M/1G PHY registers */
#define GPHY_XCONTROL_REG 49152
#define GPHY_ISOLATE_LBN 10
#define GPHY_ISOLATE_WIDTH 1
#define GPHY_DUPLEX_LBN 8
#define GPHY_DUPLEX_WIDTH 1
#define GPHY_LOOPBACK_NEAR_LBN 14
#define GPHY_LOOPBACK_NEAR_WIDTH 1
#define C22EXT_STATUS_REG 49153
#define C22EXT_STATUS_LINK_LBN 2
#define C22EXT_STATUS_LINK_WIDTH 1
#define C22EXT_MSTSLV_CTRL 49161
#define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8
#define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9
#define C22EXT_MSTSLV_STATUS 49162
#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10
#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11
/* Time to wait between powering down the LNPGA and turning off the power
* rails */
#define LNPGA_PDOWN_WAIT (HZ / 5)
struct tenxpress_phy_data {
enum efx_loopback_mode loopback_mode;
enum efx_phy_mode phy_mode;
int bad_lp_tries;
};
static int tenxpress_init(struct efx_nic *efx)
{
/* Enable 312.5 MHz clock */
efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
1 << CLK312_EN_LBN);
/* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
1 << PMA_PMA_LED_ACTIVITY_LBN, true);
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
SFX7101_PMA_PMD_LED_DEFAULT);
return 0;
}
static int tenxpress_phy_probe(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data;
/* Allocate phy private storage */
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
phy_data->phy_mode = efx->phy_mode;
efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45;
efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
ADVERTISED_10000baseT_Full);
return 0;
}
static int tenxpress_phy_init(struct efx_nic *efx)
{
int rc;
falcon_board(efx)->type->init_phy(efx);
if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
return rc;
rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
return rc;
}
rc = tenxpress_init(efx);
if (rc < 0)
return rc;
/* Reinitialise flow control settings */
efx_link_set_wanted_fc(efx, efx->wanted_fc);
efx_mdio_an_reconfigure(efx);
schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
/* Let XGXS and SerDes out of reset */
falcon_reset_xaui(efx);
return 0;
}
/* Perform a "special software reset" on the PHY. The caller is
* responsible for saving and restoring the PHY hardware registers
* properly, and masking/unmasking LASI */
static int tenxpress_special_reset(struct efx_nic *efx)
{
int rc, reg;
/* The XGMAC clock is driven from the SFX7101 312MHz clock, so
* a special software reset can glitch the XGMAC sufficiently for stats
* requests to fail. */
falcon_stop_nic_stats(efx);
/* Initiate reset */
reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
reg |= (1 << PMA_PMD_EXT_SSR_LBN);
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
mdelay(200);
/* Wait for the blocks to come out of reset */
rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
goto out;
/* Try and reconfigure the device */
rc = tenxpress_init(efx);
if (rc < 0)
goto out;
/* Wait for the XGXS state machine to churn */
mdelay(10);
out:
falcon_start_nic_stats(efx);
return rc;
}
static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
{
struct tenxpress_phy_data *pd = efx->phy_data;
bool bad_lp;
int reg;
if (link_ok) {
bad_lp = false;
} else {
/* Check that AN has started but not completed. */
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_STAT1);
if (!(reg & MDIO_AN_STAT1_LPABLE))
return; /* LP status is unknown */
bad_lp = !(reg & MDIO_AN_STAT1_COMPLETE);
if (bad_lp)
pd->bad_lp_tries++;
}
/* Nothing to do if all is well and was previously so. */
if (!pd->bad_lp_tries)
return;
/* Use the RX (red) LED as an error indicator once we've seen AN
* failure several times in a row, and also log a message. */
if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
PMA_PMD_LED_OVERR_REG);
reg &= ~(PMA_PMD_LED_MASK << PMA_PMD_LED_RX_LBN);
if (!bad_lp) {
reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN;
} else {
reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN;
netif_err(efx, link, efx->net_dev,
"appears to be plugged into a port"
" that is not 10GBASE-T capable. The PHY"
" supports 10GBASE-T ONLY, so no link can"
" be established\n");
}
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
PMA_PMD_LED_OVERR_REG, reg);
pd->bad_lp_tries = bad_lp;
}
}
static bool sfx7101_link_ok(struct efx_nic *efx)
{
return efx_mdio_links_ok(efx,
MDIO_DEVS_PMAPMD |
MDIO_DEVS_PCS |
MDIO_DEVS_PHYXS);
}
static void tenxpress_ext_loopback(struct efx_nic *efx)
{
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
1 << LOOPBACK_NEAR_LBN,
efx->loopback_mode == LOOPBACK_PHYXS);
}
static void tenxpress_low_power(struct efx_nic *efx)
{
efx_mdio_set_mmds_lpower(
efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
TENXPRESS_REQUIRED_DEVS);
}
static int tenxpress_phy_reconfigure(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
bool phy_mode_change, loop_reset;
if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
phy_data->phy_mode = efx->phy_mode;
return 0;
}
phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
phy_data->phy_mode != PHY_MODE_NORMAL);
loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, LOOPBACKS_EXTERNAL(efx)) ||
LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
if (loop_reset || phy_mode_change) {
tenxpress_special_reset(efx);
falcon_reset_xaui(efx);
}
tenxpress_low_power(efx);
efx_mdio_transmit_disable(efx);
efx_mdio_phy_reconfigure(efx);
tenxpress_ext_loopback(efx);
efx_mdio_an_reconfigure(efx);
phy_data->loopback_mode = efx->loopback_mode;
phy_data->phy_mode = efx->phy_mode;
return 0;
}
static void
tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
/* Poll for link state changes */
static bool tenxpress_phy_poll(struct efx_nic *efx)
{
struct efx_link_state old_state = efx->link_state;
efx->link_state.up = sfx7101_link_ok(efx);
efx->link_state.speed = 10000;
efx->link_state.fd = true;
efx->link_state.fc = efx_mdio_get_pause(efx);
sfx7101_check_bad_lp(efx, efx->link_state.up);
return !efx_link_state_equal(&efx->link_state, &old_state);
}
static void sfx7101_phy_fini(struct efx_nic *efx)
{
int reg;
/* Power down the LNPGA */
reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
/* Waiting here ensures that the board fini, which can turn
* off the power to the PHY, won't get run until the LNPGA
* powerdown has been given long enough to complete. */
schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
}
static void tenxpress_phy_remove(struct efx_nic *efx)
{
kfree(efx->phy_data);
efx->phy_data = NULL;
}
/* Override the RX, TX and link LEDs */
void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
int reg;
switch (mode) {
case EFX_LED_OFF:
reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) |
(PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) |
(PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN);
break;
case EFX_LED_ON:
reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) |
(PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) |
(PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
break;
default:
reg = SFX7101_PMA_PMD_LED_DEFAULT;
break;
}
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
}
static const char *const sfx7101_test_names[] = {
"bist"
};
static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index)
{
if (index < ARRAY_SIZE(sfx7101_test_names))
return sfx7101_test_names[index];
return NULL;
}
static int
sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
{
int rc;
if (!(flags & ETH_TEST_FL_OFFLINE))
return 0;
/* BIST is automatically run after a special software reset */
rc = tenxpress_special_reset(efx);
results[0] = rc ? -1 : 1;
efx_mdio_an_reconfigure(efx);
return rc;
}
static void
tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
u32 adv = 0, lpa = 0;
int reg;
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
if (reg & MDIO_AN_10GBT_CTRL_ADV10G)
adv |= ADVERTISED_10000baseT_Full;
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
if (reg & MDIO_AN_10GBT_STAT_LP10G)
lpa |= ADVERTISED_10000baseT_Full;
mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
/* In loopback, the PHY automatically brings up the correct interface,
* but doesn't advertise the correct speed. So override it */
if (LOOPBACK_EXTERNAL(efx))
ethtool_cmd_speed_set(ecmd, SPEED_10000);
}
static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
if (!ecmd->autoneg)
return -EINVAL;
return efx_mdio_set_settings(efx, ecmd);
}
static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
{
efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
MDIO_AN_10GBT_CTRL_ADV10G,
advertising & ADVERTISED_10000baseT_Full);
}
const struct efx_phy_operations falcon_sfx7101_phy_ops = {
.probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
.poll = tenxpress_phy_poll,
.fini = sfx7101_phy_fini,
.remove = tenxpress_phy_remove,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
.test_alive = efx_mdio_test_alive,
.test_name = sfx7101_test_name,
.run_tests = sfx7101_run_tests,
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,560 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
/*
* Driver for Transwitch/Mysticom CX4 retimer
* see www.transwitch.com, part is TXC-43128
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include "efx.h"
#include "mdio_10g.h"
#include "phy.h"
#include "nic.h"
/* We expect these MMDs to be in the package */
#define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \
MDIO_DEVS_PMAPMD | \
MDIO_DEVS_PHYXS)
#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \
(1 << LOOPBACK_PMAPMD) | \
(1 << LOOPBACK_PHYXS_WS))
/**************************************************************************
*
* Compile-time config
*
**************************************************************************
*/
#define TXCNAME "TXC43128"
/* Total length of time we'll wait for the PHY to come out of reset (ms) */
#define TXC_MAX_RESET_TIME 500
/* Interval between checks (ms) */
#define TXC_RESET_WAIT 10
/* How long to run BIST (us) */
#define TXC_BIST_DURATION 50
/**************************************************************************
*
* Register definitions
*
**************************************************************************
*/
/* Command register */
#define TXC_GLRGS_GLCMD 0xc004
/* Useful bits in command register */
/* Lane power-down */
#define TXC_GLCMD_L01PD_LBN 5
#define TXC_GLCMD_L23PD_LBN 6
/* Limited SW reset: preserves configuration but
* initiates a logic reset. Self-clearing */
#define TXC_GLCMD_LMTSWRST_LBN 14
/* Signal Quality Control */
#define TXC_GLRGS_GSGQLCTL 0xc01a
/* Enable bit */
#define TXC_GSGQLCT_SGQLEN_LBN 15
/* Lane selection */
#define TXC_GSGQLCT_LNSL_LBN 13
#define TXC_GSGQLCT_LNSL_WIDTH 2
/* Analog TX control */
#define TXC_ALRGS_ATXCTL 0xc040
/* Lane power-down */
#define TXC_ATXCTL_TXPD3_LBN 15
#define TXC_ATXCTL_TXPD2_LBN 14
#define TXC_ATXCTL_TXPD1_LBN 13
#define TXC_ATXCTL_TXPD0_LBN 12
/* Amplitude on lanes 0, 1 */
#define TXC_ALRGS_ATXAMP0 0xc041
/* Amplitude on lanes 2, 3 */
#define TXC_ALRGS_ATXAMP1 0xc042
/* Bit position of value for lane 0 (or 2) */
#define TXC_ATXAMP_LANE02_LBN 3
/* Bit position of value for lane 1 (or 3) */
#define TXC_ATXAMP_LANE13_LBN 11
#define TXC_ATXAMP_1280_mV 0
#define TXC_ATXAMP_1200_mV 8
#define TXC_ATXAMP_1120_mV 12
#define TXC_ATXAMP_1060_mV 14
#define TXC_ATXAMP_0820_mV 25
#define TXC_ATXAMP_0720_mV 26
#define TXC_ATXAMP_0580_mV 27
#define TXC_ATXAMP_0440_mV 28
#define TXC_ATXAMP_0820_BOTH \
((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \
| (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN))
#define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */
/* Preemphasis on lanes 0, 1 */
#define TXC_ALRGS_ATXPRE0 0xc043
/* Preemphasis on lanes 2, 3 */
#define TXC_ALRGS_ATXPRE1 0xc044
#define TXC_ATXPRE_NONE 0
#define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */
#define TXC_ALRGS_ARXCTL 0xc045
/* Lane power-down */
#define TXC_ARXCTL_RXPD3_LBN 15
#define TXC_ARXCTL_RXPD2_LBN 14
#define TXC_ARXCTL_RXPD1_LBN 13
#define TXC_ARXCTL_RXPD0_LBN 12
/* Main control */
#define TXC_MRGS_CTL 0xc340
/* Bits in main control */
#define TXC_MCTL_RESET_LBN 15 /* Self clear */
#define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */
#define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */
/* GPIO output */
#define TXC_GPIO_OUTPUT 0xc346
#define TXC_GPIO_DIR 0xc348
/* Vendor-specific BIST registers */
#define TXC_BIST_CTL 0xc280
#define TXC_BIST_TXFRMCNT 0xc281
#define TXC_BIST_RX0FRMCNT 0xc282
#define TXC_BIST_RX1FRMCNT 0xc283
#define TXC_BIST_RX2FRMCNT 0xc284
#define TXC_BIST_RX3FRMCNT 0xc285
#define TXC_BIST_RX0ERRCNT 0xc286
#define TXC_BIST_RX1ERRCNT 0xc287
#define TXC_BIST_RX2ERRCNT 0xc288
#define TXC_BIST_RX3ERRCNT 0xc289
/* BIST type (controls bit patter in test) */
#define TXC_BIST_CTRL_TYPE_LBN 10
#define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */
#define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */
#define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */
#define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */
/* Set this to 1 for 10 bit and 0 for 8 bit */
#define TXC_BIST_CTRL_B10EN_LBN 12
/* Enable BIST (write 0 to disable) */
#define TXC_BIST_CTRL_ENAB_LBN 13
/* Stop BIST (self-clears when stop complete) */
#define TXC_BIST_CTRL_STOP_LBN 14
/* Start BIST (cleared by writing 1 to STOP) */
#define TXC_BIST_CTRL_STRT_LBN 15
/* Mt. Diablo test configuration */
#define TXC_MTDIABLO_CTRL 0xc34f
#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10
struct txc43128_data {
unsigned long bug10934_timer;
enum efx_phy_mode phy_mode;
enum efx_loopback_mode loopback_mode;
};
/* The PHY sometimes needs a reset to bring the link back up. So long as
* it reports link down, we reset it every 5 seconds.
*/
#define BUG10934_RESET_INTERVAL (5 * HZ)
/* Perform a reset that doesn't clear configuration changes */
static void txc_reset_logic(struct efx_nic *efx);
/* Set the output value of a gpio */
void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
{
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
}
/* Set up the GPIO direction register */
void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
{
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
}
/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
* global reset (it's less clear what reset of other MMDs does).*/
static int txc_reset_phy(struct efx_nic *efx)
{
int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
TXC_RESET_WAIT);
if (rc < 0)
goto fail;
/* Check that all the MMDs we expect are present and responding. */
rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
if (rc < 0)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n");
return rc;
}
/* Run a single BIST on one MMD */
static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
{
int ctrl, bctl;
int lane;
int rc = 0;
/* Set PMA to test into loopback using Mt Diablo reg as per app note */
ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
/* The BIST app. note lists these as 3 distinct steps. */
/* Set the BIST type */
bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* Set the BSTEN bit in the BIST Control register to enable */
bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* Set the BSTRT bit in the BIST Control register */
efx_mdio_write(efx, mmd, TXC_BIST_CTL,
bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
/* Wait. */
udelay(TXC_BIST_DURATION);
/* Set the BSTOP bit in the BIST Control register */
bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* The STOP bit should go off when things have stopped */
while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL);
/* Check all the error counts are 0 and all the frame counts are
non-zero */
for (lane = 0; lane < 4; lane++) {
int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
if (count != 0) {
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
"Lane %d had %d errs\n", lane, count);
rc = -EIO;
}
count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
if (count == 0) {
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
"Lane %d got 0 frames\n", lane);
rc = -EIO;
}
}
if (rc == 0)
netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
/* Disable BIST */
efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
/* Turn off loopback */
ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
return rc;
}
static int txc_bist(struct efx_nic *efx)
{
return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
}
/* Push the non-configurable defaults into the PHY. This must be
* done after every full reset */
static void txc_apply_defaults(struct efx_nic *efx)
{
int mctrl;
/* Turn amplitude down and preemphasis off on the host side
* (PHY<->MAC) as this is believed less likely to upset Falcon
* and no adverse effects have been noted. It probably also
* saves a picowatt or two */
/* Turn off preemphasis */
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
/* Turn down the amplitude */
efx_mdio_write(efx, MDIO_MMD_PHYXS,
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
efx_mdio_write(efx, MDIO_MMD_PHYXS,
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
/* Set the line side amplitude and preemphasis to the databook
* defaults as an erratum causes them to be 0 on at least some
* PHY rev.s */
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
/* Set up the LEDs */
mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
/* Set the Green and Red LEDs to their default modes */
mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
/* Databook recommends doing this after configuration changes */
txc_reset_logic(efx);
falcon_board(efx)->type->init_phy(efx);
}
static int txc43128_phy_probe(struct efx_nic *efx)
{
struct txc43128_data *phy_data;
/* Allocate phy private storage */
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
phy_data->phy_mode = efx->phy_mode;
efx->mdio.mmds = TXC_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
return 0;
}
/* Initialisation entry point for this PHY driver */
static int txc43128_phy_init(struct efx_nic *efx)
{
int rc;
rc = txc_reset_phy(efx);
if (rc < 0)
return rc;
rc = txc_bist(efx);
if (rc < 0)
return rc;
txc_apply_defaults(efx);
return 0;
}
/* Set the lane power down state in the global registers */
static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
{
int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
ctl &= ~pd;
else
ctl |= pd;
efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
}
/* Set the lane power down state in the analog control registers */
static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
{
int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
| (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
| (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
txctl &= ~txpd;
rxctl &= ~rxpd;
} else {
txctl |= txpd;
rxctl |= rxpd;
}
efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
}
static void txc_set_power(struct efx_nic *efx)
{
/* According to the data book, all the MMDs can do low power */
efx_mdio_set_mmds_lpower(efx,
!!(efx->phy_mode & PHY_MODE_LOW_POWER),
TXC_REQUIRED_DEVS);
/* Global register bank is in PCS, PHY XS. These control the host
* side and line side settings respectively. */
txc_glrgs_lane_power(efx, MDIO_MMD_PCS);
txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS);
/* Analog register bank in PMA/PMD, PHY XS */
txc_analog_lane_power(efx, MDIO_MMD_PMAPMD);
txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
}
static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
{
int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
int tries = 50;
val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
while (tries--) {
val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
break;
udelay(1);
}
if (!tries)
netif_info(efx, hw, efx->net_dev,
TXCNAME " Logic reset timed out!\n");
}
/* Perform a logic reset. This preserves the configuration registers
* and is needed for some configuration changes to take effect */
static void txc_reset_logic(struct efx_nic *efx)
{
/* The data sheet claims we can do the logic reset on either the
* PCS or the PHYXS and the result is a reset of both host- and
* line-side logic. */
txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
}
static bool txc43128_phy_read_link(struct efx_nic *efx)
{
return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
}
static int txc43128_phy_reconfigure(struct efx_nic *efx)
{
struct txc43128_data *phy_data = efx->phy_data;
enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
txc_reset_phy(efx);
txc_apply_defaults(efx);
falcon_reset_xaui(efx);
mode_change &= ~PHY_MODE_TX_DISABLED;
}
efx_mdio_transmit_disable(efx);
efx_mdio_phy_reconfigure(efx);
if (mode_change & PHY_MODE_LOW_POWER)
txc_set_power(efx);
/* The data sheet claims this is required after every reconfiguration
* (note at end of 7.1), but we mustn't do it when nothing changes as
* it glitches the link, and reconfigure gets called on link change,
* so we get an IRQ storm on link up. */
if (loop_change || mode_change)
txc_reset_logic(efx);
phy_data->phy_mode = efx->phy_mode;
phy_data->loopback_mode = efx->loopback_mode;
return 0;
}
static void txc43128_phy_fini(struct efx_nic *efx)
{
/* Disable link events */
efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
}
static void txc43128_phy_remove(struct efx_nic *efx)
{
kfree(efx->phy_data);
efx->phy_data = NULL;
}
/* Periodic callback: this exists mainly to poll link status as we
* don't use LASI interrupts */
static bool txc43128_phy_poll(struct efx_nic *efx)
{
struct txc43128_data *data = efx->phy_data;
bool was_up = efx->link_state.up;
efx->link_state.up = txc43128_phy_read_link(efx);
efx->link_state.speed = 10000;
efx->link_state.fd = true;
efx->link_state.fc = efx->wanted_fc;
if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) {
data->bug10934_timer = jiffies;
} else {
if (time_after_eq(jiffies, (data->bug10934_timer +
BUG10934_RESET_INTERVAL))) {
data->bug10934_timer = jiffies;
txc_reset_logic(efx);
}
}
return efx->link_state.up != was_up;
}
static const char *txc43128_test_names[] = {
"bist"
};
static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index)
{
if (index < ARRAY_SIZE(txc43128_test_names))
return txc43128_test_names[index];
return NULL;
}
static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
{
int rc;
if (!(flags & ETH_TEST_FL_OFFLINE))
return 0;
rc = txc_reset_phy(efx);
if (rc < 0)
return rc;
rc = txc_bist(efx);
txc_apply_defaults(efx);
results[0] = rc ? -1 : 1;
return rc;
}
static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
const struct efx_phy_operations falcon_txc_phy_ops = {
.probe = txc43128_phy_probe,
.init = txc43128_phy_init,
.reconfigure = txc43128_phy_reconfigure,
.poll = txc43128_phy_poll,
.fini = txc43128_phy_fini,
.remove = txc43128_phy_remove,
.get_settings = txc43128_get_settings,
.set_settings = efx_mdio_set_settings,
.test_alive = efx_mdio_test_alive,
.run_tests = txc43128_run_tests,
.test_name = txc43128_test_name,
};

View File

@@ -0,0 +1,61 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_WORKAROUNDS_H
#define EFX_WORKAROUNDS_H
/*
* Hardware workarounds.
* Bug numbers are from Solarflare's Bugzilla.
*/
#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
#define EFX_WORKAROUND_10G(efx) 1
/* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
/* RX PCIe double split performance issue */
#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
/* Bit-bashed I2C reads cause performance drop */
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
* or a PCIe error (bug 11028) */
#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
/* Transmit flow control may get disabled */
#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB
/* Truncated IPv4 packets can confuse the TX packet parser */
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
/* Legacy ISR read can return zero once */
#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
/* Write combining and sriov=enabled are incompatible */
#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
/* Spurious parity errors in TSORT buffers */
#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
/* Unaligned read request >512 bytes after aligning may break TSORT */
#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
/* iSCSI parsing errors */
#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
/* RX events go missing */
#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A
/* RX_RESET on A1 */
#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A
/* Increase filter depth to avoid RX_RESET */
#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
/* Flushes may never complete */
#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
/* Leak overlength packets rather than free */
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
#endif /* EFX_WORKAROUNDS_H */