Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
net/sched/cls_api.c has overlapping changes to a call to nlmsg_parse(), one (from 'net') added rtm_tca_policy instead of NULL to the 5th argument, and another (from 'net-next') added cb->extack instead of NULL to the 6th argument. net/ipv4/ipmr_base.c is a case of a bug fix in 'net' being done to code which moved (to mr_table_dump)) in 'net-next'. Thanks to David Ahern for the heads up. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -320,9 +320,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
|
||||
phydev->advertising = phydev->supported;
|
||||
|
||||
/* The internal PHY has its link interrupts routed to the
|
||||
* Ethernet MAC ISRs
|
||||
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
|
||||
* that prevents the signaling of link UP interrupts when
|
||||
* the link operates at 10Mbps, so fallback to polling for
|
||||
* those versions of GENET.
|
||||
*/
|
||||
if (priv->internal_phy)
|
||||
if (priv->internal_phy && !GENET_IS_V5(priv))
|
||||
dev->phydev->irq = PHY_IGNORE_INTERRUPT;
|
||||
|
||||
return 0;
|
||||
|
@@ -452,6 +452,10 @@ struct bufdesc_ex {
|
||||
* initialisation.
|
||||
*/
|
||||
#define FEC_QUIRK_MIB_CLEAR (1 << 15)
|
||||
/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
|
||||
* those FIFO receive registers are resolved in other platforms.
|
||||
*/
|
||||
#define FEC_QUIRK_HAS_FRREG (1 << 16)
|
||||
|
||||
struct bufdesc_prop {
|
||||
int qid;
|
||||
|
@@ -91,14 +91,16 @@ static struct platform_device_id fec_devtype[] = {
|
||||
.driver_data = 0,
|
||||
}, {
|
||||
.name = "imx25-fec",
|
||||
.driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
|
||||
.driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
|
||||
FEC_QUIRK_HAS_FRREG,
|
||||
}, {
|
||||
.name = "imx27-fec",
|
||||
.driver_data = FEC_QUIRK_MIB_CLEAR,
|
||||
.driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
|
||||
}, {
|
||||
.name = "imx28-fec",
|
||||
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
|
||||
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
|
||||
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
|
||||
FEC_QUIRK_HAS_FRREG,
|
||||
}, {
|
||||
.name = "imx6q-fec",
|
||||
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
|
||||
@@ -2162,7 +2164,13 @@ static void fec_enet_get_regs(struct net_device *ndev,
|
||||
memset(buf, 0, regs->len);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
|
||||
off = fec_enet_register_offset[i] / 4;
|
||||
off = fec_enet_register_offset[i];
|
||||
|
||||
if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
|
||||
!(fep->quirks & FEC_QUIRK_HAS_FRREG))
|
||||
continue;
|
||||
|
||||
off >>= 2;
|
||||
buf[off] = readl(&theregs[off]);
|
||||
}
|
||||
}
|
||||
|
@@ -433,10 +433,9 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
|
||||
|
||||
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
|
||||
struct mlx5_wq_cyc *wq,
|
||||
u16 pi, u16 frag_pi)
|
||||
u16 pi, u16 nnops)
|
||||
{
|
||||
struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
|
||||
u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
|
||||
|
||||
edge_wi = wi + nnops;
|
||||
|
||||
@@ -455,15 +454,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
struct mlx5e_umr_wqe *umr_wqe;
|
||||
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
|
||||
u16 pi, frag_pi;
|
||||
u16 pi, contig_wqebbs_room;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
|
||||
|
||||
if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
|
||||
mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
|
||||
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
|
||||
if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
|
||||
mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
|
||||
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
}
|
||||
|
||||
|
@@ -290,10 +290,9 @@ dma_unmap_wqe_err:
|
||||
|
||||
static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
|
||||
struct mlx5_wq_cyc *wq,
|
||||
u16 pi, u16 frag_pi)
|
||||
u16 pi, u16 nnops)
|
||||
{
|
||||
struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
|
||||
u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
|
||||
|
||||
edge_wi = wi + nnops;
|
||||
|
||||
@@ -348,8 +347,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
|
||||
struct mlx5e_sq_stats *stats = sq->stats;
|
||||
u16 headlen, ihs, contig_wqebbs_room;
|
||||
u16 ds_cnt, ds_cnt_inl = 0;
|
||||
u16 headlen, ihs, frag_pi;
|
||||
u8 num_wqebbs, opcode;
|
||||
u32 num_bytes;
|
||||
int num_dma;
|
||||
@@ -386,9 +385,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
|
||||
if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
|
||||
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
|
||||
if (unlikely(contig_wqebbs_room < num_wqebbs)) {
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
|
||||
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
|
||||
}
|
||||
|
||||
@@ -636,7 +635,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
|
||||
struct mlx5e_sq_stats *stats = sq->stats;
|
||||
u16 headlen, ihs, pi, frag_pi;
|
||||
u16 headlen, ihs, pi, contig_wqebbs_room;
|
||||
u16 ds_cnt, ds_cnt_inl = 0;
|
||||
u8 num_wqebbs, opcode;
|
||||
u32 num_bytes;
|
||||
@@ -672,13 +671,14 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
|
||||
if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
|
||||
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
|
||||
if (unlikely(contig_wqebbs_room < num_wqebbs)) {
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
|
||||
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
|
||||
}
|
||||
|
||||
mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
|
||||
mlx5i_sq_fetch_wqe(sq, &wqe, pi);
|
||||
|
||||
/* fill wqe */
|
||||
wi = &sq->db.wqe_info[pi];
|
||||
|
@@ -273,7 +273,7 @@ static void eq_pf_process(struct mlx5_eq *eq)
|
||||
case MLX5_PFAULT_SUBTYPE_WQE:
|
||||
/* WQE based event */
|
||||
pfault->type =
|
||||
be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
|
||||
(be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
|
||||
pfault->token =
|
||||
be32_to_cpu(pf_eqe->wqe.token);
|
||||
pfault->wqe.wq_num =
|
||||
|
@@ -245,7 +245,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
|
||||
return ERR_PTR(res);
|
||||
}
|
||||
|
||||
/* Context will be freed by wait func after completion */
|
||||
/* Context should be freed by the caller after completion. */
|
||||
return context;
|
||||
}
|
||||
|
||||
@@ -418,10 +418,8 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
|
||||
cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
|
||||
cmd.flags = htonl(flags);
|
||||
context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
|
||||
if (IS_ERR(context)) {
|
||||
err = PTR_ERR(context);
|
||||
goto out;
|
||||
}
|
||||
if (IS_ERR(context))
|
||||
return PTR_ERR(context);
|
||||
|
||||
err = mlx5_fpga_ipsec_cmd_wait(context);
|
||||
if (err)
|
||||
@@ -435,6 +433,7 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(context);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -110,12 +110,11 @@ struct mlx5i_tx_wqe {
|
||||
|
||||
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
|
||||
struct mlx5i_tx_wqe **wqe,
|
||||
u16 *pi)
|
||||
u16 pi)
|
||||
{
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
|
||||
*pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
*wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
|
||||
*wqe = mlx5_wq_cyc_get_wqe(wq, pi);
|
||||
memset(*wqe, 0, sizeof(**wqe));
|
||||
}
|
||||
|
||||
|
@@ -39,11 +39,6 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
|
||||
return (u32)wq->fbc.sz_m1 + 1;
|
||||
}
|
||||
|
||||
u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
|
||||
{
|
||||
return wq->fbc.frag_sz_m1 + 1;
|
||||
}
|
||||
|
||||
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
|
||||
{
|
||||
return wq->fbc.sz_m1 + 1;
|
||||
|
@@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *wqc, struct mlx5_wq_cyc *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl);
|
||||
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
|
||||
u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
|
||||
|
||||
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *qpc, struct mlx5_wq_qp *wq,
|
||||
@@ -140,11 +139,6 @@ static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
|
||||
return ctr & wq->fbc.sz_m1;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
|
||||
{
|
||||
return ctr & wq->fbc.frag_sz_m1;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
|
||||
{
|
||||
return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
|
||||
@@ -160,6 +154,11 @@ static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
|
||||
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
|
||||
}
|
||||
|
||||
static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix)
|
||||
{
|
||||
return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1;
|
||||
}
|
||||
|
||||
static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
|
||||
{
|
||||
int equal = (cc1 == cc2);
|
||||
|
@@ -1055,6 +1055,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
|
||||
err_driver_init:
|
||||
mlxsw_thermal_fini(mlxsw_core->thermal);
|
||||
err_thermal_init:
|
||||
mlxsw_hwmon_fini(mlxsw_core->hwmon);
|
||||
err_hwmon_init:
|
||||
if (!reload)
|
||||
devlink_unregister(devlink);
|
||||
@@ -1088,6 +1089,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
|
||||
if (mlxsw_core->driver->fini)
|
||||
mlxsw_core->driver->fini(mlxsw_core);
|
||||
mlxsw_thermal_fini(mlxsw_core->thermal);
|
||||
mlxsw_hwmon_fini(mlxsw_core->hwmon);
|
||||
if (!reload)
|
||||
devlink_unregister(devlink);
|
||||
mlxsw_emad_fini(mlxsw_core);
|
||||
|
@@ -359,6 +359,10 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
struct mlxsw_thermal;
|
||||
|
@@ -303,8 +303,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
|
||||
struct device *hwmon_dev;
|
||||
int err;
|
||||
|
||||
mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon),
|
||||
GFP_KERNEL);
|
||||
mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
|
||||
if (!mlxsw_hwmon)
|
||||
return -ENOMEM;
|
||||
mlxsw_hwmon->core = mlxsw_core;
|
||||
@@ -321,10 +320,9 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
|
||||
mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
|
||||
mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
|
||||
|
||||
hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev,
|
||||
"mlxsw",
|
||||
mlxsw_hwmon,
|
||||
mlxsw_hwmon->groups);
|
||||
hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
|
||||
"mlxsw", mlxsw_hwmon,
|
||||
mlxsw_hwmon->groups);
|
||||
if (IS_ERR(hwmon_dev)) {
|
||||
err = PTR_ERR(hwmon_dev);
|
||||
goto err_hwmon_register;
|
||||
@@ -337,5 +335,12 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
|
||||
err_hwmon_register:
|
||||
err_fans_init:
|
||||
err_temp_init:
|
||||
kfree(mlxsw_hwmon);
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
|
||||
{
|
||||
hwmon_device_unregister(mlxsw_hwmon->hwmon_dev);
|
||||
kfree(mlxsw_hwmon);
|
||||
}
|
||||
|
@@ -133,9 +133,9 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
|
||||
{
|
||||
unsigned int val, timeout = 10;
|
||||
|
||||
/* Wait for the issued mac table command to be completed, or timeout.
|
||||
* When the command read from ANA_TABLES_MACACCESS is
|
||||
* MACACCESS_CMD_IDLE, the issued command completed successfully.
|
||||
/* Wait for the issued vlan table command to be completed, or timeout.
|
||||
* When the command read from ANA_TABLES_VLANACCESS is
|
||||
* VLANACCESS_CMD_IDLE, the issued command completed successfully.
|
||||
*/
|
||||
do {
|
||||
val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
|
||||
|
@@ -399,12 +399,14 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
|
||||
|
||||
switch (off) {
|
||||
case offsetof(struct iphdr, daddr):
|
||||
set_ip_addr->ipv4_dst_mask = mask;
|
||||
set_ip_addr->ipv4_dst = exact;
|
||||
set_ip_addr->ipv4_dst_mask |= mask;
|
||||
set_ip_addr->ipv4_dst &= ~mask;
|
||||
set_ip_addr->ipv4_dst |= exact & mask;
|
||||
break;
|
||||
case offsetof(struct iphdr, saddr):
|
||||
set_ip_addr->ipv4_src_mask = mask;
|
||||
set_ip_addr->ipv4_src = exact;
|
||||
set_ip_addr->ipv4_src_mask |= mask;
|
||||
set_ip_addr->ipv4_src &= ~mask;
|
||||
set_ip_addr->ipv4_src |= exact & mask;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@@ -418,11 +420,12 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
|
||||
}
|
||||
|
||||
static void
|
||||
nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
|
||||
nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
|
||||
struct nfp_fl_set_ipv6_addr *ip6)
|
||||
{
|
||||
ip6->ipv6[idx % 4].mask = mask;
|
||||
ip6->ipv6[idx % 4].exact = exact;
|
||||
ip6->ipv6[word].mask |= mask;
|
||||
ip6->ipv6[word].exact &= ~mask;
|
||||
ip6->ipv6[word].exact |= exact & mask;
|
||||
|
||||
ip6->reserved = cpu_to_be16(0);
|
||||
ip6->head.jump_id = opcode_tag;
|
||||
@@ -435,6 +438,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
|
||||
struct nfp_fl_set_ipv6_addr *ip_src)
|
||||
{
|
||||
__be32 exact, mask;
|
||||
u8 word;
|
||||
|
||||
/* We are expecting tcf_pedit to return a big endian value */
|
||||
mask = (__force __be32)~tcf_pedit_mask(action, idx);
|
||||
@@ -443,17 +447,20 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
|
||||
if (exact & ~mask)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (off < offsetof(struct ipv6hdr, saddr))
|
||||
if (off < offsetof(struct ipv6hdr, saddr)) {
|
||||
return -EOPNOTSUPP;
|
||||
else if (off < offsetof(struct ipv6hdr, daddr))
|
||||
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
|
||||
} else if (off < offsetof(struct ipv6hdr, daddr)) {
|
||||
word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
|
||||
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
|
||||
exact, mask, ip_src);
|
||||
else if (off < offsetof(struct ipv6hdr, daddr) +
|
||||
sizeof(struct in6_addr))
|
||||
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
|
||||
} else if (off < offsetof(struct ipv6hdr, daddr) +
|
||||
sizeof(struct in6_addr)) {
|
||||
word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
|
||||
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
|
||||
exact, mask, ip_dst);
|
||||
else
|
||||
} else {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -511,7 +518,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
|
||||
struct nfp_fl_set_eth set_eth;
|
||||
enum pedit_header_type htype;
|
||||
int idx, nkeys, err;
|
||||
size_t act_size;
|
||||
size_t act_size = 0;
|
||||
u32 offset, cmd;
|
||||
u8 ip_proto = 0;
|
||||
|
||||
@@ -569,7 +576,9 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
|
||||
act_size = sizeof(set_eth);
|
||||
memcpy(nfp_action, &set_eth, act_size);
|
||||
*a_len += act_size;
|
||||
} else if (set_ip_addr.head.len_lw) {
|
||||
}
|
||||
if (set_ip_addr.head.len_lw) {
|
||||
nfp_action += act_size;
|
||||
act_size = sizeof(set_ip_addr);
|
||||
memcpy(nfp_action, &set_ip_addr, act_size);
|
||||
*a_len += act_size;
|
||||
@@ -577,10 +586,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
|
||||
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
|
||||
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
|
||||
nfp_fl_csum_l4_to_flag(ip_proto);
|
||||
} else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
|
||||
}
|
||||
if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
|
||||
/* TC compiles set src and dst IPv6 address as a single action,
|
||||
* the hardware requires this to be 2 separate actions.
|
||||
*/
|
||||
nfp_action += act_size;
|
||||
act_size = sizeof(set_ip6_src);
|
||||
memcpy(nfp_action, &set_ip6_src, act_size);
|
||||
*a_len += act_size;
|
||||
@@ -593,6 +604,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
|
||||
/* Hardware will automatically fix TCP/UDP checksum. */
|
||||
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
|
||||
} else if (set_ip6_dst.head.len_lw) {
|
||||
nfp_action += act_size;
|
||||
act_size = sizeof(set_ip6_dst);
|
||||
memcpy(nfp_action, &set_ip6_dst, act_size);
|
||||
*a_len += act_size;
|
||||
@@ -600,13 +612,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
|
||||
/* Hardware will automatically fix TCP/UDP checksum. */
|
||||
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
|
||||
} else if (set_ip6_src.head.len_lw) {
|
||||
nfp_action += act_size;
|
||||
act_size = sizeof(set_ip6_src);
|
||||
memcpy(nfp_action, &set_ip6_src, act_size);
|
||||
*a_len += act_size;
|
||||
|
||||
/* Hardware will automatically fix TCP/UDP checksum. */
|
||||
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
|
||||
} else if (set_tport.head.len_lw) {
|
||||
}
|
||||
if (set_tport.head.len_lw) {
|
||||
nfp_action += act_size;
|
||||
act_size = sizeof(set_tport);
|
||||
memcpy(nfp_action, &set_tport, act_size);
|
||||
*a_len += act_size;
|
||||
|
@@ -228,7 +228,7 @@ static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
|
||||
attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
|
||||
GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
|
||||
(GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
|
||||
QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
|
||||
QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
|
||||
GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
|
||||
|
||||
out:
|
||||
|
@@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
|
||||
|
||||
qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
|
||||
ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
|
||||
ql_write_nvram_reg(qdev, spir,
|
||||
((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -6528,17 +6528,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
|
||||
struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
|
||||
struct net_device *dev = tp->dev;
|
||||
u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
|
||||
int work_done= 0;
|
||||
int work_done;
|
||||
u16 status;
|
||||
|
||||
status = rtl_get_events(tp);
|
||||
rtl_ack_events(tp, status & ~tp->event_slow);
|
||||
|
||||
if (status & RTL_EVENT_NAPI_RX)
|
||||
work_done = rtl_rx(dev, tp, (u32) budget);
|
||||
work_done = rtl_rx(dev, tp, (u32) budget);
|
||||
|
||||
if (status & RTL_EVENT_NAPI_TX)
|
||||
rtl_tx(dev, tp);
|
||||
rtl_tx(dev, tp);
|
||||
|
||||
if (status & tp->event_slow) {
|
||||
enable_mask &= ~tp->event_slow;
|
||||
@@ -7071,20 +7069,12 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
|
||||
{
|
||||
unsigned int flags;
|
||||
|
||||
switch (tp->mac_version) {
|
||||
case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
|
||||
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
|
||||
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
|
||||
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
|
||||
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
|
||||
flags = PCI_IRQ_LEGACY;
|
||||
break;
|
||||
case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_40:
|
||||
/* This version was reported to have issues with resume
|
||||
* from suspend when using MSI-X
|
||||
*/
|
||||
flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
|
||||
break;
|
||||
default:
|
||||
} else {
|
||||
flags = PCI_IRQ_ALL_TYPES;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user