Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "This has been a slightly more active cycle than normal with ongoing core changes and quite a lot of collected driver updates. - Various driver fixes for bnxt_re, cxgb4, hns, mlx5, pvrdma, rxe - A new data transfer mode for HFI1 giving higher performance - Significant functional and bug fix update to the mlx5 On-Demand-Paging MR feature - A chip hang reset recovery system for hns - Change mm->pinned_vm to an atomic64 - Update bnxt_re to support a new 57500 chip - A sane netlink 'rdma link add' method for creating rxe devices and fixing the various unregistration race conditions in rxe's unregister flow - Allow lookup up objects by an ID over netlink - Various reworking of the core to driver interface: - drivers should not assume umem SGLs are in PAGE_SIZE chunks - ucontext is accessed via udata not other means - start to make the core code responsible for object memory allocation - drivers should convert struct device to struct ib_device via a helper - drivers have more tools to avoid use after unregister problems" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (280 commits) net/mlx5: ODP support for XRC transport is not enabled by default in FW IB/hfi1: Close race condition on user context disable and close RDMA/umem: Revert broken 'off by one' fix RDMA/umem: minor bug fix in error handling path RDMA/hns: Use GFP_ATOMIC in hns_roce_v2_modify_qp cxgb4: kfree mhp after the debug print IB/rdmavt: Fix concurrency panics in QP post_send and modify to error IB/rdmavt: Fix loopback send with invalidate ordering IB/iser: Fix dma_nents type definition IB/mlx5: Set correct write permissions for implicit ODP MR bnxt_re: Clean cq for kernel consumers only RDMA/uverbs: Don't do double free of allocated PD RDMA: Handle ucontext allocations by IB/core RDMA/core: Fix a WARN() message bnxt_re: fix the regression due to changes in alloc_pbl IB/mlx4: Increase the timeout for CM cache IB/core: Abort page fault handler silently during owning process exit IB/mlx5: Validate correct PD before prefetch MR IB/mlx5: Protect against prefetch of invalid MR RDMA/uverbs: Store PR pointer before it is overwritten ...
This commit is contained in:
@@ -780,12 +780,12 @@ static inline void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *w
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
||||
void ipoib_create_debug_files(struct net_device *dev);
|
||||
void ipoib_delete_debug_files(struct net_device *dev);
|
||||
int ipoib_register_debugfs(void);
|
||||
void ipoib_register_debugfs(void);
|
||||
void ipoib_unregister_debugfs(void);
|
||||
#else
|
||||
static inline void ipoib_create_debug_files(struct net_device *dev) { }
|
||||
static inline void ipoib_delete_debug_files(struct net_device *dev) { }
|
||||
static inline int ipoib_register_debugfs(void) { return 0; }
|
||||
static inline void ipoib_register_debugfs(void) { }
|
||||
static inline void ipoib_unregister_debugfs(void) { }
|
||||
#endif
|
||||
|
||||
|
@@ -267,14 +267,10 @@ void ipoib_create_debug_files(struct net_device *dev)
|
||||
snprintf(name, sizeof(name), "%s_mcg", dev->name);
|
||||
priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
|
||||
ipoib_root, dev, &ipoib_mcg_fops);
|
||||
if (!priv->mcg_dentry)
|
||||
ipoib_warn(priv, "failed to create mcg debug file\n");
|
||||
|
||||
snprintf(name, sizeof(name), "%s_path", dev->name);
|
||||
priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
|
||||
ipoib_root, dev, &ipoib_path_fops);
|
||||
if (!priv->path_dentry)
|
||||
ipoib_warn(priv, "failed to create path debug file\n");
|
||||
}
|
||||
|
||||
void ipoib_delete_debug_files(struct net_device *dev)
|
||||
@@ -286,10 +282,9 @@ void ipoib_delete_debug_files(struct net_device *dev)
|
||||
priv->mcg_dentry = priv->path_dentry = NULL;
|
||||
}
|
||||
|
||||
int ipoib_register_debugfs(void)
|
||||
void ipoib_register_debugfs(void)
|
||||
{
|
||||
ipoib_root = debugfs_create_dir("ipoib", NULL);
|
||||
return ipoib_root ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
void ipoib_unregister_debugfs(void)
|
||||
|
@@ -613,7 +613,7 @@ static void path_free(struct net_device *dev, struct ipoib_path *path)
|
||||
while ((skb = __skb_dequeue(&path->queue)))
|
||||
dev_kfree_skb_irq(skb);
|
||||
|
||||
ipoib_dbg(ipoib_priv(dev), "path_free\n");
|
||||
ipoib_dbg(ipoib_priv(dev), "%s\n", __func__);
|
||||
|
||||
/* remove all neigh connected to this path */
|
||||
ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
|
||||
@@ -1641,7 +1641,7 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = ipoib_priv(dev);
|
||||
|
||||
ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
|
||||
ipoib_dbg(priv, "%s\n", __func__);
|
||||
init_completion(&priv->ntbl.deleted);
|
||||
|
||||
cancel_delayed_work_sync(&priv->neigh_reap_task);
|
||||
@@ -2411,7 +2411,7 @@ static ssize_t dev_id_show(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR_RO(dev_id);
|
||||
|
||||
int ipoib_intercept_dev_id_attr(struct net_device *dev)
|
||||
static int ipoib_intercept_dev_id_attr(struct net_device *dev)
|
||||
{
|
||||
device_remove_file(&dev->dev, &dev_attr_dev_id);
|
||||
return device_create_file(&dev->dev, &dev_attr_dev_id);
|
||||
@@ -2495,7 +2495,7 @@ static void ipoib_add_one(struct ib_device *device)
|
||||
struct list_head *dev_list;
|
||||
struct net_device *dev;
|
||||
struct ipoib_dev_priv *priv;
|
||||
int p;
|
||||
unsigned int p;
|
||||
int count = 0;
|
||||
|
||||
dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
|
||||
@@ -2504,7 +2504,7 @@ static void ipoib_add_one(struct ib_device *device)
|
||||
|
||||
INIT_LIST_HEAD(dev_list);
|
||||
|
||||
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
|
||||
rdma_for_each_port (device, p) {
|
||||
if (!rdma_protocol_ib(device, p))
|
||||
continue;
|
||||
dev = ipoib_add_port("ib%d", device, p);
|
||||
@@ -2577,9 +2577,7 @@ static int __init ipoib_init_module(void)
|
||||
*/
|
||||
BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
|
||||
|
||||
ret = ipoib_register_debugfs();
|
||||
if (ret)
|
||||
return ret;
|
||||
ipoib_register_debugfs();
|
||||
|
||||
/*
|
||||
* We create a global workqueue here that is used for all flush
|
||||
|
@@ -197,7 +197,7 @@ struct iser_data_buf {
|
||||
struct scatterlist *sg;
|
||||
int size;
|
||||
unsigned long data_len;
|
||||
unsigned int dma_nents;
|
||||
int dma_nents;
|
||||
};
|
||||
|
||||
/* fwd declarations */
|
||||
|
@@ -145,9 +145,8 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
|
||||
for_each_sg(data->sg, sg, data->dma_nents, i)
|
||||
iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
|
||||
"off:0x%x sz:0x%x dma_len:0x%x\n",
|
||||
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
|
||||
sg_page(sg), sg->offset,
|
||||
sg->length, ib_sg_dma_len(ibdev, sg));
|
||||
i, (unsigned long)sg_dma_address(sg),
|
||||
sg_page(sg), sg->offset, sg->length, sg_dma_len(sg));
|
||||
}
|
||||
|
||||
static void iser_dump_page_vec(struct iser_page_vec *page_vec)
|
||||
@@ -204,8 +203,8 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
|
||||
reg->rkey = device->pd->unsafe_global_rkey;
|
||||
else
|
||||
reg->rkey = 0;
|
||||
reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
|
||||
reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
|
||||
reg->sge.addr = sg_dma_address(&sg[0]);
|
||||
reg->sge.length = sg_dma_len(&sg[0]);
|
||||
|
||||
iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
|
||||
" length=0x%x\n", reg->sge.lkey, reg->rkey,
|
||||
@@ -240,8 +239,8 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
|
||||
page_vec->npages = 0;
|
||||
page_vec->fake_mr.page_size = SIZE_4K;
|
||||
plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
|
||||
mem->size, NULL, iser_set_page);
|
||||
if (unlikely(plen < mem->size)) {
|
||||
mem->dma_nents, NULL, iser_set_page);
|
||||
if (unlikely(plen < mem->dma_nents)) {
|
||||
iser_err("page vec too short to hold this SG\n");
|
||||
iser_data_buf_dump(mem, device->ib_device);
|
||||
iser_dump_page_vec(page_vec);
|
||||
@@ -448,10 +447,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
|
||||
|
||||
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
|
||||
|
||||
n = ib_map_mr_sg(mr, mem->sg, mem->size, NULL, SIZE_4K);
|
||||
if (unlikely(n != mem->size)) {
|
||||
n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K);
|
||||
if (unlikely(n != mem->dma_nents)) {
|
||||
iser_err("failed to map sg (%d/%d)\n",
|
||||
n, mem->size);
|
||||
n, mem->dma_nents);
|
||||
return n < 0 ? n : -EINVAL;
|
||||
}
|
||||
|
||||
|
@@ -1,2 +1 @@
|
||||
ccflags-y := -Idrivers/target -Idrivers/target/iscsi
|
||||
obj-$(CONFIG_INFINIBAND_ISERT) += ib_isert.o
|
||||
|
@@ -443,8 +443,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
|
||||
if (pool_size <= 0)
|
||||
goto err;
|
||||
ret = -ENOMEM;
|
||||
pool = kzalloc(sizeof(struct srp_fr_pool) +
|
||||
pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
|
||||
pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
|
||||
if (!pool)
|
||||
goto err;
|
||||
pool->size = pool_size;
|
||||
@@ -1601,9 +1600,8 @@ static int srp_map_sg_entry(struct srp_map_state *state,
|
||||
{
|
||||
struct srp_target_port *target = ch->target;
|
||||
struct srp_device *dev = target->srp_host->srp_dev;
|
||||
struct ib_device *ibdev = dev->dev;
|
||||
dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
|
||||
unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
|
||||
dma_addr_t dma_addr = sg_dma_address(sg);
|
||||
unsigned int dma_len = sg_dma_len(sg);
|
||||
unsigned int len = 0;
|
||||
int ret;
|
||||
|
||||
@@ -1697,13 +1695,11 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
|
||||
int count)
|
||||
{
|
||||
struct srp_target_port *target = ch->target;
|
||||
struct srp_device *dev = target->srp_host->srp_dev;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(scat, sg, count, i) {
|
||||
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
|
||||
ib_sg_dma_len(dev->dev, sg),
|
||||
srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
|
||||
target->global_rkey);
|
||||
}
|
||||
|
||||
@@ -1853,8 +1849,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
|
||||
buf->len = cpu_to_be32(data_len);
|
||||
WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
|
||||
for_each_sg(scat, sg, count, i) {
|
||||
sge[i].addr = ib_sg_dma_address(ibdev, sg);
|
||||
sge[i].length = ib_sg_dma_len(ibdev, sg);
|
||||
sge[i].addr = sg_dma_address(sg);
|
||||
sge[i].length = sg_dma_len(sg);
|
||||
sge[i].lkey = target->lkey;
|
||||
}
|
||||
req->cmd->num_sge += count;
|
||||
@@ -1875,9 +1871,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
|
||||
struct srp_direct_buf *buf;
|
||||
|
||||
buf = (void *)cmd->add_data + cmd->add_cdb_len;
|
||||
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
|
||||
buf->va = cpu_to_be64(sg_dma_address(scat));
|
||||
buf->key = cpu_to_be32(target->global_rkey);
|
||||
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
|
||||
buf->len = cpu_to_be32(sg_dma_len(scat));
|
||||
|
||||
req->nmdesc = 0;
|
||||
goto map_complete;
|
||||
@@ -3814,6 +3810,7 @@ static ssize_t srp_create_target(struct device *dev,
|
||||
target_host->max_id = 1;
|
||||
target_host->max_lun = -1LL;
|
||||
target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
|
||||
target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
|
||||
|
||||
target = host_to_target(target_host);
|
||||
|
||||
@@ -4120,7 +4117,8 @@ static void srp_add_one(struct ib_device *device)
|
||||
struct srp_device *srp_dev;
|
||||
struct ib_device_attr *attr = &device->attrs;
|
||||
struct srp_host *host;
|
||||
int mr_page_shift, p;
|
||||
int mr_page_shift;
|
||||
unsigned int p;
|
||||
u64 max_pages_per_mr;
|
||||
unsigned int flags = 0;
|
||||
|
||||
@@ -4187,7 +4185,7 @@ static void srp_add_one(struct ib_device *device)
|
||||
WARN_ON_ONCE(srp_dev->global_rkey == 0);
|
||||
}
|
||||
|
||||
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
|
||||
rdma_for_each_port (device, p) {
|
||||
host = srp_add_port(srp_dev, p);
|
||||
if (host)
|
||||
list_add_tail(&host->list, &srp_dev->dev_list);
|
||||
|
@@ -1,2 +1 @@
|
||||
ccflags-y := -Idrivers/target
|
||||
obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o
|
||||
|
Reference in New Issue
Block a user