Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "Things are looking pretty quiet here in RDMA, not too many bug fixes rolling in right now. The usual driver bug fixes and fixes for a couple of regressions introduced in 5.2: - Fix a race on bootup with RDMA device renaming and srp. SRP also needs to rename its internal sys files - Fix a memory leak in hns - Don't leak resources in efa on certain error unwinds - Don't panic in certain error unwinds in ib_register_device - Various small user visible bug fix patches for the hfi and efa drivers - Fix the 32 bit compilation break" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/efa: Remove MAYEXEC flag check from mmap flow mlx5: avoid 64-bit division IB/hfi1: Validate page aligned for a given virtual address IB/{qib, hfi1, rdmavt}: Correct ibv_devinfo max_mr value IB/hfi1: Insure freeze_work work_struct is canceled on shutdown IB/rdmavt: Fix alloc_qpn() WARN_ON() RDMA/core: Fix panic when port_data isn't initialized RDMA/uverbs: Pass udata on uverbs error unwind RDMA/core: Clear out the udata before error unwind RDMA/hns: Fix PD memory leak for internal allocation RDMA/srp: Rename SRP sysfs name after IB device rename trigger
Этот коммит содержится в:
@@ -1728,7 +1728,6 @@ int efa_mmap(struct ib_ucontext *ibucontext,
|
||||
ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n");
|
||||
return -EPERM;
|
||||
}
|
||||
vma->vm_flags &= ~VM_MAYEXEC;
|
||||
|
||||
return __efa_mmap(dev, ucontext, vma, key, length);
|
||||
}
|
||||
|
@@ -9850,6 +9850,7 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
|
||||
|
||||
/* disable the port */
|
||||
clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
|
||||
cancel_work_sync(&ppd->freeze_work);
|
||||
}
|
||||
|
||||
static inline int init_cpu_counters(struct hfi1_devdata *dd)
|
||||
|
@@ -324,6 +324,9 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
|
||||
u32 *tidlist = NULL;
|
||||
struct tid_user_buf *tidbuf;
|
||||
|
||||
if (!PAGE_ALIGNED(tinfo->vaddr))
|
||||
return -EINVAL;
|
||||
|
||||
tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
|
||||
if (!tidbuf)
|
||||
return -ENOMEM;
|
||||
|
@@ -1356,8 +1356,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
|
||||
rdi->dparms.props.max_cq = hfi1_max_cqs;
|
||||
rdi->dparms.props.max_ah = hfi1_max_ahs;
|
||||
rdi->dparms.props.max_cqe = hfi1_max_cqes;
|
||||
rdi->dparms.props.max_mr = rdi->lkey_table.max;
|
||||
rdi->dparms.props.max_fmr = rdi->lkey_table.max;
|
||||
rdi->dparms.props.max_map_per_fmr = 32767;
|
||||
rdi->dparms.props.max_pd = hfi1_max_pds;
|
||||
rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
|
||||
|
@@ -899,6 +899,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
|
||||
dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
|
||||
|
||||
hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
|
||||
kfree(&free_mr->mr_free_pd->ibpd);
|
||||
}
|
||||
|
||||
static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
|
||||
|
@@ -190,12 +190,12 @@ int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
|
||||
u16 uid, phys_addr_t *addr, u32 *obj_id)
|
||||
{
|
||||
struct mlx5_core_dev *dev = dm->dev;
|
||||
u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
|
||||
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
|
||||
unsigned long *block_map;
|
||||
u64 icm_start_addr;
|
||||
u32 log_icm_size;
|
||||
u32 num_blocks;
|
||||
u32 max_blocks;
|
||||
u64 block_idx;
|
||||
void *sw_icm;
|
||||
@@ -224,6 +224,8 @@ int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
|
||||
max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
|
||||
spin_lock(&dm->lock);
|
||||
block_idx = bitmap_find_next_zero_area(block_map,
|
||||
@@ -266,13 +268,16 @@ int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
|
||||
u16 uid, phys_addr_t addr, u32 obj_id)
|
||||
{
|
||||
struct mlx5_core_dev *dev = dm->dev;
|
||||
u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
|
||||
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
|
||||
unsigned long *block_map;
|
||||
u32 num_blocks;
|
||||
u64 start_idx;
|
||||
int err;
|
||||
|
||||
num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
|
||||
|
||||
switch (type) {
|
||||
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
|
||||
start_idx =
|
||||
|
@@ -2344,7 +2344,7 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
|
||||
/* Allocation size must a multiple of the basic block size
|
||||
* and a power of 2.
|
||||
*/
|
||||
act_size = roundup(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
|
||||
act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
|
||||
act_size = roundup_pow_of_two(act_size);
|
||||
|
||||
dm->size = act_size;
|
||||
|
@@ -1459,8 +1459,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
|
||||
rdi->dparms.props.max_cq = ib_qib_max_cqs;
|
||||
rdi->dparms.props.max_cqe = ib_qib_max_cqes;
|
||||
rdi->dparms.props.max_ah = ib_qib_max_ahs;
|
||||
rdi->dparms.props.max_mr = rdi->lkey_table.max;
|
||||
rdi->dparms.props.max_fmr = rdi->lkey_table.max;
|
||||
rdi->dparms.props.max_map_per_fmr = 32767;
|
||||
rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
|
||||
rdi->dparms.props.max_qp_init_rd_atom = 255;
|
||||
|
Ссылка в новой задаче
Block a user