Merge branches 'core', 'cxgb4', 'ipath', 'iser', 'lockdep', 'mlx4', 'nes', 'ocrdma', 'qib' and 'raw-qp' into for-linus
This commit is contained in:

@@ -50,7 +50,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
|
||||
struct ib_cq *ibcq;
|
||||
|
||||
if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
|
||||
printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
|
||||
pr_warn("Unexpected event type %d "
|
||||
"on CQ %06x\n", type, cq->cqn);
|
||||
return;
|
||||
}
|
||||
@@ -222,6 +222,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
|
||||
uar = &dev->priv_uar;
|
||||
}
|
||||
|
||||
if (dev->eq_table)
|
||||
vector = dev->eq_table[vector % ibdev->num_comp_vectors];
|
||||
|
||||
err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
|
||||
cq->db.dma, &cq->mcq, vector, 0);
|
||||
if (err)
|
||||
@@ -463,7 +466,7 @@ static void dump_cqe(void *cqe)
|
||||
{
|
||||
__be32 *buf = cqe;
|
||||
|
||||
printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
|
||||
be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
|
||||
be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
|
||||
@@ -473,7 +476,7 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
|
||||
struct ib_wc *wc)
|
||||
{
|
||||
if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
|
||||
printk(KERN_DEBUG "local QP operation err "
|
||||
pr_debug("local QP operation err "
|
||||
"(QPN %06x, WQE index %x, vendor syndrome %02x, "
|
||||
"opcode = %02x)\n",
|
||||
be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
|
||||
@@ -576,7 +579,7 @@ repoll:
|
||||
|
||||
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
|
||||
is_send)) {
|
||||
printk(KERN_WARNING "Completion for NOP opcode detected!\n");
|
||||
pr_warn("Completion for NOP opcode detected!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -606,7 +609,7 @@ repoll:
|
||||
mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
|
||||
be32_to_cpu(cqe->vlan_my_qpn));
|
||||
if (unlikely(!mqp)) {
|
||||
printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
|
||||
pr_warn("CQ %06x with entry for unknown QPN %06x\n",
|
||||
cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -789,7 +789,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
||||
list_del(&ge->list);
|
||||
kfree(ge);
|
||||
} else
|
||||
printk(KERN_WARNING "could not find mgid entry\n");
|
||||
pr_warn("could not find mgid entry\n");
|
||||
|
||||
mutex_unlock(&mqp->mutex);
|
||||
|
||||
@@ -902,7 +902,7 @@ static void update_gids_task(struct work_struct *work)
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox)) {
|
||||
printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox));
|
||||
pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -913,7 +913,7 @@ static void update_gids_task(struct work_struct *work)
|
||||
1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
|
||||
MLX4_CMD_NATIVE);
|
||||
if (err)
|
||||
printk(KERN_WARNING "set port command failed\n");
|
||||
pr_warn("set port command failed\n");
|
||||
else {
|
||||
memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
|
||||
event.device = &gw->dev->ib_dev;
|
||||
@@ -1076,18 +1076,98 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
|
||||
{
|
||||
char name[32];
|
||||
int eq_per_port = 0;
|
||||
int added_eqs = 0;
|
||||
int total_eqs = 0;
|
||||
int i, j, eq;
|
||||
|
||||
/* Init eq table */
|
||||
ibdev->eq_table = NULL;
|
||||
ibdev->eq_added = 0;
|
||||
|
||||
/* Legacy mode? */
|
||||
if (dev->caps.comp_pool == 0)
|
||||
return;
|
||||
|
||||
eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
|
||||
dev->caps.num_ports);
|
||||
|
||||
/* Init eq table */
|
||||
added_eqs = 0;
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
|
||||
added_eqs += eq_per_port;
|
||||
|
||||
total_eqs = dev->caps.num_comp_vectors + added_eqs;
|
||||
|
||||
ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
|
||||
if (!ibdev->eq_table)
|
||||
return;
|
||||
|
||||
ibdev->eq_added = added_eqs;
|
||||
|
||||
eq = 0;
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
|
||||
for (j = 0; j < eq_per_port; j++) {
|
||||
sprintf(name, "mlx4-ib-%d-%d@%s",
|
||||
i, j, dev->pdev->bus->name);
|
||||
/* Set IRQ for specific name (per ring) */
|
||||
if (mlx4_assign_eq(dev, name, &ibdev->eq_table[eq])) {
|
||||
/* Use legacy (same as mlx4_en driver) */
|
||||
pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
|
||||
ibdev->eq_table[eq] =
|
||||
(eq % dev->caps.num_comp_vectors);
|
||||
}
|
||||
eq++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Fill the reset of the vector with legacy EQ */
|
||||
for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
|
||||
ibdev->eq_table[eq++] = i;
|
||||
|
||||
/* Advertise the new number of EQs to clients */
|
||||
ibdev->ib_dev.num_comp_vectors = total_eqs;
|
||||
}
|
||||
|
||||
static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
|
||||
{
|
||||
int i;
|
||||
int total_eqs;
|
||||
|
||||
/* Reset the advertised EQ number */
|
||||
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
|
||||
|
||||
/* Free only the added eqs */
|
||||
for (i = 0; i < ibdev->eq_added; i++) {
|
||||
/* Don't free legacy eqs if used */
|
||||
if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
|
||||
continue;
|
||||
mlx4_release_eq(dev, ibdev->eq_table[i]);
|
||||
}
|
||||
|
||||
total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added;
|
||||
memset(ibdev->eq_table, 0, total_eqs * sizeof(int));
|
||||
kfree(ibdev->eq_table);
|
||||
|
||||
ibdev->eq_table = NULL;
|
||||
ibdev->eq_added = 0;
|
||||
}
|
||||
|
||||
static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_ib_dev *ibdev;
|
||||
int num_ports = 0;
|
||||
int i;
|
||||
int i, j;
|
||||
int err;
|
||||
struct mlx4_ib_iboe *iboe;
|
||||
|
||||
printk_once(KERN_INFO "%s", mlx4_ib_version);
|
||||
pr_info_once("%s", mlx4_ib_version);
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
printk(KERN_WARNING "IB not yet supported in SRIOV\n");
|
||||
pr_warn("IB not yet supported in SRIOV\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1210,6 +1290,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
|
||||
}
|
||||
|
||||
mlx4_ib_alloc_eqs(dev, ibdev);
|
||||
|
||||
spin_lock_init(&iboe->lock);
|
||||
|
||||
if (init_node_data(ibdev))
|
||||
@@ -1241,9 +1323,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
goto err_reg;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
|
||||
for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
|
||||
if (device_create_file(&ibdev->ib_dev.dev,
|
||||
mlx4_class_attributes[i]))
|
||||
mlx4_class_attributes[j]))
|
||||
goto err_notif;
|
||||
}
|
||||
|
||||
@@ -1253,7 +1335,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
|
||||
err_notif:
|
||||
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
|
||||
printk(KERN_WARNING "failure unregistering notifier\n");
|
||||
pr_warn("failure unregistering notifier\n");
|
||||
flush_workqueue(wq);
|
||||
|
||||
err_reg:
|
||||
@@ -1288,7 +1370,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
|
||||
ib_unregister_device(&ibdev->ib_dev);
|
||||
if (ibdev->iboe.nb.notifier_call) {
|
||||
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
|
||||
printk(KERN_WARNING "failure unregistering notifier\n");
|
||||
pr_warn("failure unregistering notifier\n");
|
||||
ibdev->iboe.nb.notifier_call = NULL;
|
||||
}
|
||||
iounmap(ibdev->uar_map);
|
||||
@@ -1298,6 +1380,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
|
||||
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
|
||||
mlx4_CLOSE_PORT(dev, p);
|
||||
|
||||
mlx4_ib_free_eqs(dev, ibdev);
|
||||
|
||||
mlx4_uar_free(dev, &ibdev->priv_uar);
|
||||
mlx4_pd_free(dev, ibdev->priv_pdn);
|
||||
ib_dealloc_device(&ibdev->ib_dev);
|
||||
|
@@ -202,6 +202,8 @@ struct mlx4_ib_dev {
|
||||
bool ib_active;
|
||||
struct mlx4_ib_iboe iboe;
|
||||
int counters[MLX4_MAX_PORTS];
|
||||
int *eq_table;
|
||||
int eq_added;
|
||||
};
|
||||
|
||||
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
|
||||
|
@@ -338,7 +338,7 @@ int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
|
||||
|
||||
err = mlx4_SYNC_TPT(mdev);
|
||||
if (err)
|
||||
printk(KERN_WARNING "mlx4_ib: SYNC_TPT error %d when "
|
||||
pr_warn("SYNC_TPT error %d when "
|
||||
"unmapping FMRs\n", err);
|
||||
|
||||
return 0;
|
||||
|
@@ -84,6 +84,11 @@ enum {
|
||||
MLX4_IB_CACHE_LINE_SIZE = 64,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_RAW_QP_MTU = 7,
|
||||
MLX4_RAW_QP_MSGMAX = 31,
|
||||
};
|
||||
|
||||
static const __be32 mlx4_ib_opcode[] = {
|
||||
[IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
|
||||
[IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
|
||||
@@ -256,7 +261,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
|
||||
event.event = IB_EVENT_QP_ACCESS_ERR;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
|
||||
pr_warn("Unexpected event type %d "
|
||||
"on QP %06x\n", type, qp->qpn);
|
||||
return;
|
||||
}
|
||||
@@ -573,7 +578,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
if (sqpn) {
|
||||
qpn = sqpn;
|
||||
} else {
|
||||
err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
|
||||
/* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
|
||||
* BlueFlame setup flow wrongly causes VLAN insertion. */
|
||||
if (init_attr->qp_type == IB_QPT_RAW_PACKET)
|
||||
err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
|
||||
else
|
||||
err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
|
||||
if (err)
|
||||
goto err_wrid;
|
||||
}
|
||||
@@ -715,7 +725,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
|
||||
if (qp->state != IB_QPS_RESET)
|
||||
if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
|
||||
MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
|
||||
printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n",
|
||||
pr_warn("modify QP %06x to RESET failed.\n",
|
||||
qp->mqp.qpn);
|
||||
|
||||
get_cqs(qp, &send_cq, &recv_cq);
|
||||
@@ -791,6 +801,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
|
||||
case IB_QPT_RC:
|
||||
case IB_QPT_UC:
|
||||
case IB_QPT_UD:
|
||||
case IB_QPT_RAW_PACKET:
|
||||
{
|
||||
qp = kzalloc(sizeof *qp, GFP_KERNEL);
|
||||
if (!qp)
|
||||
@@ -872,7 +883,8 @@ static int to_mlx4_st(enum ib_qp_type type)
|
||||
case IB_QPT_XRC_INI:
|
||||
case IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC;
|
||||
case IB_QPT_SMI:
|
||||
case IB_QPT_GSI: return MLX4_QP_ST_MLX;
|
||||
case IB_QPT_GSI:
|
||||
case IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX;
|
||||
default: return -1;
|
||||
}
|
||||
}
|
||||
@@ -946,7 +958,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
|
||||
|
||||
if (ah->ah_flags & IB_AH_GRH) {
|
||||
if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
|
||||
printk(KERN_ERR "sgid_index (%u) too large. max is %d\n",
|
||||
pr_err("sgid_index (%u) too large. max is %d\n",
|
||||
ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);
|
||||
return -1;
|
||||
}
|
||||
@@ -1042,6 +1054,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
||||
|
||||
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
|
||||
context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
|
||||
else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
|
||||
context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
|
||||
else if (ibqp->qp_type == IB_QPT_UD) {
|
||||
if (qp->flags & MLX4_IB_QP_LSO)
|
||||
context->mtu_msgmax = (IB_MTU_4096 << 5) |
|
||||
@@ -1050,7 +1064,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
||||
context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
|
||||
} else if (attr_mask & IB_QP_PATH_MTU) {
|
||||
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
|
||||
printk(KERN_ERR "path MTU (%u) is invalid\n",
|
||||
pr_err("path MTU (%u) is invalid\n",
|
||||
attr->path_mtu);
|
||||
goto out;
|
||||
}
|
||||
@@ -1200,7 +1214,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
||||
if (cur_state == IB_QPS_INIT &&
|
||||
new_state == IB_QPS_RTR &&
|
||||
(ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
|
||||
ibqp->qp_type == IB_QPT_UD)) {
|
||||
ibqp->qp_type == IB_QPT_UD ||
|
||||
ibqp->qp_type == IB_QPT_RAW_PACKET)) {
|
||||
context->pri_path.sched_queue = (qp->port - 1) << 6;
|
||||
if (is_qp0(dev, qp))
|
||||
context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
|
||||
@@ -1266,7 +1281,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
||||
if (is_qp0(dev, qp)) {
|
||||
if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
|
||||
if (mlx4_INIT_PORT(dev->dev, qp->port))
|
||||
printk(KERN_WARNING "INIT_PORT failed for port %d\n",
|
||||
pr_warn("INIT_PORT failed for port %d\n",
|
||||
qp->port);
|
||||
|
||||
if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
|
||||
@@ -1319,6 +1334,11 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
|
||||
(rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
|
||||
IB_LINK_LAYER_ETHERNET))
|
||||
goto out;
|
||||
|
||||
if (attr_mask & IB_QP_PKEY_INDEX) {
|
||||
int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
|
||||
if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p])
|
||||
@@ -1424,6 +1444,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
||||
|
||||
if (is_eth) {
|
||||
u8 *smac;
|
||||
u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
|
||||
|
||||
mlx->sched_prio = cpu_to_be16(pcp);
|
||||
|
||||
memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
|
||||
/* FIXME: cache smac value? */
|
||||
@@ -1434,10 +1457,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
||||
if (!is_vlan) {
|
||||
sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
|
||||
} else {
|
||||
u16 pcp;
|
||||
|
||||
sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
|
||||
pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
|
||||
sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
|
||||
}
|
||||
} else {
|
||||
@@ -1460,16 +1480,16 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
||||
header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
|
||||
|
||||
if (0) {
|
||||
printk(KERN_ERR "built UD header of size %d:\n", header_size);
|
||||
pr_err("built UD header of size %d:\n", header_size);
|
||||
for (i = 0; i < header_size / 4; ++i) {
|
||||
if (i % 8 == 0)
|
||||
printk(" [%02x] ", i * 4);
|
||||
printk(" %08x",
|
||||
be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
|
||||
pr_err(" [%02x] ", i * 4);
|
||||
pr_cont(" %08x",
|
||||
be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
|
||||
if ((i + 1) % 8 == 0)
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
printk("\n");
|
||||
pr_err("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -59,7 +59,7 @@ static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
|
||||
event.event = IB_EVENT_SRQ_ERR;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
|
||||
pr_warn("Unexpected event type %d "
|
||||
"on SRQ %06x\n", type, srq->srqn);
|
||||
return;
|
||||
}
|
||||
|
Reference in New Issue
Block a user