Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull infiniband/RDMA updates from Roland Dreier:
 - large set of iSER initiator improvements
 - hardware driver fixes for cxgb4, mlx5 and ocrdma
 - small fixes to core midlayer

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (47 commits)
  RDMA/cxgb4: Fix ntuple calculation for ipv6 and remove duplicate line
  RDMA/cxgb4: Add missing neigh_release in find_route
  RDMA/cxgb4: Take IPv6 into account for best_mtu and set_emss
  RDMA/cxgb4: Make c4iw_wr_log_size_order static
  IB/core: Fix XRC race condition in ib_uverbs_open_qp
  IB/core: Clear AH attr variable to prevent garbage data
  RDMA/ocrdma: Save the bit environment, spare unncessary parenthesis
  RDMA/ocrdma: The kernel has a perfectly good BIT() macro - use it
  RDMA/ocrdma: Don't memset() buffers we just allocated with kzalloc()
  RDMA/ocrdma: Remove a unused-label warning
  RDMA/ocrdma: Convert kernel VA to PA for mmap in user
  RDMA/ocrdma: Get vlan tag from ib_qp_attrs
  RDMA/ocrdma: Add default GID at index 0
  IB/mlx5, iser, isert: Add Signature API additions
  Target/iser: Centralize ib_sig_domain setting
  IB/iser: Centralize ib_sig_domain settings
  IB/mlx5: Use extended internal signature layout
  IB/iser: Set IP_CSUM as default guard type
  IB/iser: Remove redundant assignment
  IB/mlx5: Use enumerations for PI copy mask
  ...
This commit is contained in:
Linus Torvalds
2014-10-19 12:29:23 -07:00
commit 2eb7f910c1
21 muutettua tiedostoa jossa 1369 lisäystä ja 950 poistoa

Näytä tiedosto

@@ -1317,6 +1317,11 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
path->rlid = cpu_to_be16(ah->dlid);
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
return -EINVAL;
}
path->grh_mlid |= 1 << 7;
path->mgid_index = ah->grh.sgid_index;
path->hop_limit = ah->grh.hop_limit;
@@ -1332,22 +1337,6 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
path->static_rate = err;
path->port = port;
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
return -EINVAL;
}
path->grh_mlid |= 1 << 7;
path->mgid_index = ah->grh.sgid_index;
path->hop_limit = ah->grh.hop_limit;
path->tclass_flowlabel =
cpu_to_be32((ah->grh.traffic_class << 20) |
(ah->grh.flow_label));
memcpy(path->rgid, ah->grh.dgid.raw, 16);
}
if (attr_mask & IB_QP_TIMEOUT)
path->ackto_lt = attr->timeout << 3;
@@ -2039,56 +2028,31 @@ static u8 bs_selector(int block_size)
}
}
static int format_selector(struct ib_sig_attrs *attr,
struct ib_sig_domain *domain,
int *selector)
static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
struct mlx5_bsf_inl *inl)
{
/* Valid inline section and allow BSF refresh */
inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
MLX5_BSF_REFRESH_DIF);
inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
/* repeating block */
inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
MLX5_DIF_CRC : MLX5_DIF_IPCS;
#define FORMAT_DIF_NONE 0
#define FORMAT_DIF_CRC_INC 8
#define FORMAT_DIF_CRC_NO_INC 12
#define FORMAT_DIF_CSUM_INC 13
#define FORMAT_DIF_CSUM_NO_INC 14
if (domain->sig.dif.ref_remap)
inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
switch (domain->sig.dif.type) {
case IB_T10DIF_NONE:
/* No DIF */
*selector = FORMAT_DIF_NONE;
break;
case IB_T10DIF_TYPE1: /* Fall through */
case IB_T10DIF_TYPE2:
switch (domain->sig.dif.bg_type) {
case IB_T10DIF_CRC:
*selector = FORMAT_DIF_CRC_INC;
break;
case IB_T10DIF_CSUM:
*selector = FORMAT_DIF_CSUM_INC;
break;
default:
return 1;
}
break;
case IB_T10DIF_TYPE3:
switch (domain->sig.dif.bg_type) {
case IB_T10DIF_CRC:
*selector = domain->sig.dif.type3_inc_reftag ?
FORMAT_DIF_CRC_INC :
FORMAT_DIF_CRC_NO_INC;
break;
case IB_T10DIF_CSUM:
*selector = domain->sig.dif.type3_inc_reftag ?
FORMAT_DIF_CSUM_INC :
FORMAT_DIF_CSUM_NO_INC;
break;
default:
return 1;
}
break;
default:
return 1;
if (domain->sig.dif.app_escape) {
if (domain->sig.dif.ref_escape)
inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
else
inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
}
return 0;
inl->dif_app_bitmask_check =
cpu_to_be16(domain->sig.dif.apptag_check_mask);
}
static int mlx5_set_bsf(struct ib_mr *sig_mr,
@@ -2099,45 +2063,49 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
struct mlx5_bsf_basic *basic = &bsf->basic;
struct ib_sig_domain *mem = &sig_attrs->mem;
struct ib_sig_domain *wire = &sig_attrs->wire;
int ret, selector;
memset(bsf, 0, sizeof(*bsf));
switch (sig_attrs->mem.sig_type) {
case IB_SIG_TYPE_T10_DIF:
if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF)
return -EINVAL;
/* Input domain check byte mask */
basic->check_byte_mask = sig_attrs->check_mask;
/* Basic + Extended + Inline */
basic->bsf_size_sbs = 1 << 7;
/* Input domain check byte mask */
basic->check_byte_mask = sig_attrs->check_mask;
basic->raw_data_size = cpu_to_be32(data_size);
/* Memory domain */
switch (sig_attrs->mem.sig_type) {
case IB_SIG_TYPE_NONE:
break;
case IB_SIG_TYPE_T10_DIF:
basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
mlx5_fill_inl_bsf(mem, &bsf->m_inl);
break;
default:
return -EINVAL;
}
/* Wire domain */
switch (sig_attrs->wire.sig_type) {
case IB_SIG_TYPE_NONE:
break;
case IB_SIG_TYPE_T10_DIF:
if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
mem->sig.dif.type == wire->sig.dif.type) {
mem->sig_type == wire->sig_type) {
/* Same block structure */
basic->bsf_size_sbs = 1 << 4;
basic->bsf_size_sbs |= 1 << 4;
if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
basic->wire.copy_byte_mask |= 0xc0;
basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
basic->wire.copy_byte_mask |= 0x30;
basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
basic->wire.copy_byte_mask |= 0x0f;
basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
} else
basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
basic->raw_data_size = cpu_to_be32(data_size);
ret = format_selector(sig_attrs, mem, &selector);
if (ret)
return -EINVAL;
basic->m_bfs_psv = cpu_to_be32(selector << 24 |
msig->psv_memory.psv_idx);
ret = format_selector(sig_attrs, wire, &selector);
if (ret)
return -EINVAL;
basic->w_bfs_psv = cpu_to_be32(selector << 24 |
msig->psv_wire.psv_idx);
basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
mlx5_fill_inl_bsf(wire, &bsf->w_inl);
break;
default:
return -EINVAL;
}
@@ -2336,20 +2304,21 @@ static int set_psv_wr(struct ib_sig_domain *domain,
memset(psv_seg, 0, sizeof(*psv_seg));
psv_seg->psv_num = cpu_to_be32(psv_idx);
switch (domain->sig_type) {
case IB_SIG_TYPE_NONE:
break;
case IB_SIG_TYPE_T10_DIF:
psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
domain->sig.dif.app_tag);
psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
*seg += sizeof(*psv_seg);
*size += sizeof(*psv_seg) / 16;
break;
default:
pr_err("Bad signature type given.\n");
return 1;
}
*seg += sizeof(*psv_seg);
*size += sizeof(*psv_seg) / 16;
return 0;
}