Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "This has been a slightly more active cycle than normal with ongoing core changes and quite a lot of collected driver updates. - Various driver fixes for bnxt_re, cxgb4, hns, mlx5, pvrdma, rxe - A new data transfer mode for HFI1 giving higher performance - Significant functional and bug fix update to the mlx5 On-Demand-Paging MR feature - A chip hang reset recovery system for hns - Change mm->pinned_vm to an atomic64 - Update bnxt_re to support a new 57500 chip - A sane netlink 'rdma link add' method for creating rxe devices and fixing the various unregistration race conditions in rxe's unregister flow - Allow lookup up objects by an ID over netlink - Various reworking of the core to driver interface: - drivers should not assume umem SGLs are in PAGE_SIZE chunks - ucontext is accessed via udata not other means - start to make the core code responsible for object memory allocation - drivers should convert struct device to struct ib_device via a helper - drivers have more tools to avoid use after unregister problems" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (280 commits) net/mlx5: ODP support for XRC transport is not enabled by default in FW IB/hfi1: Close race condition on user context disable and close RDMA/umem: Revert broken 'off by one' fix RDMA/umem: minor bug fix in error handling path RDMA/hns: Use GFP_ATOMIC in hns_roce_v2_modify_qp cxgb4: kfree mhp after the debug print IB/rdmavt: Fix concurrency panics in QP post_send and modify to error IB/rdmavt: Fix loopback send with invalidate ordering IB/iser: Fix dma_nents type definition IB/mlx5: Set correct write permissions for implicit ODP MR bnxt_re: Clean cq for kernel consumers only RDMA/uverbs: Don't do double free of allocated PD RDMA: Handle ucontext allocations by IB/core RDMA/core: Fix a WARN() message bnxt_re: fix the regression due to changes in alloc_pbl IB/mlx4: Increase the timeout for CM cache IB/core: Abort page fault handler silently during owning process exit IB/mlx5: Validate correct PD before prefetch MR IB/mlx5: Protect against prefetch of invalid MR RDMA/uverbs: Store PR pointer before it is overwritten ...
This commit is contained in:
@@ -272,21 +272,12 @@ static inline void __scif_release_mm(struct mm_struct *mm)
|
||||
|
||||
static inline int
|
||||
__scif_dec_pinned_vm_lock(struct mm_struct *mm,
|
||||
int nr_pages, bool try_lock)
|
||||
int nr_pages)
|
||||
{
|
||||
if (!mm || !nr_pages || !scif_ulimit_check)
|
||||
return 0;
|
||||
if (try_lock) {
|
||||
if (!down_write_trylock(&mm->mmap_sem)) {
|
||||
dev_err(scif_info.mdev.this_device,
|
||||
"%s %d err\n", __func__, __LINE__);
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
down_write(&mm->mmap_sem);
|
||||
}
|
||||
mm->pinned_vm -= nr_pages;
|
||||
up_write(&mm->mmap_sem);
|
||||
|
||||
atomic64_sub(nr_pages, &mm->pinned_vm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -298,16 +289,16 @@ static inline int __scif_check_inc_pinned_vm(struct mm_struct *mm,
|
||||
if (!mm || !nr_pages || !scif_ulimit_check)
|
||||
return 0;
|
||||
|
||||
locked = nr_pages;
|
||||
locked += mm->pinned_vm;
|
||||
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
||||
locked = atomic64_add_return(nr_pages, &mm->pinned_vm);
|
||||
|
||||
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
|
||||
atomic64_sub(nr_pages, &mm->pinned_vm);
|
||||
dev_err(scif_info.mdev.this_device,
|
||||
"locked(%lu) > lock_limit(%lu)\n",
|
||||
locked, lock_limit);
|
||||
return -ENOMEM;
|
||||
}
|
||||
mm->pinned_vm = locked;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -326,7 +317,7 @@ int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window)
|
||||
|
||||
might_sleep();
|
||||
if (!window->temp && window->mm) {
|
||||
__scif_dec_pinned_vm_lock(window->mm, window->nr_pages, 0);
|
||||
__scif_dec_pinned_vm_lock(window->mm, window->nr_pages);
|
||||
__scif_release_mm(window->mm);
|
||||
window->mm = NULL;
|
||||
}
|
||||
@@ -737,7 +728,7 @@ done:
|
||||
ep->rma_info.dma_chan);
|
||||
} else {
|
||||
if (!__scif_dec_pinned_vm_lock(window->mm,
|
||||
window->nr_pages, 1)) {
|
||||
window->nr_pages)) {
|
||||
__scif_release_mm(window->mm);
|
||||
window->mm = NULL;
|
||||
}
|
||||
@@ -1385,28 +1376,23 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot,
|
||||
prot |= SCIF_PROT_WRITE;
|
||||
retry:
|
||||
mm = current->mm;
|
||||
down_write(&mm->mmap_sem);
|
||||
if (ulimit) {
|
||||
err = __scif_check_inc_pinned_vm(mm, nr_pages);
|
||||
if (err) {
|
||||
up_write(&mm->mmap_sem);
|
||||
pinned_pages->nr_pages = 0;
|
||||
goto error_unmap;
|
||||
}
|
||||
}
|
||||
|
||||
pinned_pages->nr_pages = get_user_pages(
|
||||
pinned_pages->nr_pages = get_user_pages_fast(
|
||||
(u64)addr,
|
||||
nr_pages,
|
||||
(prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
|
||||
pinned_pages->pages,
|
||||
NULL);
|
||||
up_write(&mm->mmap_sem);
|
||||
pinned_pages->pages);
|
||||
if (nr_pages != pinned_pages->nr_pages) {
|
||||
if (try_upgrade) {
|
||||
if (ulimit)
|
||||
__scif_dec_pinned_vm_lock(mm,
|
||||
nr_pages, 0);
|
||||
__scif_dec_pinned_vm_lock(mm, nr_pages);
|
||||
/* Roll back any pinned pages */
|
||||
for (i = 0; i < pinned_pages->nr_pages; i++) {
|
||||
if (pinned_pages->pages[i])
|
||||
@@ -1433,7 +1419,7 @@ retry:
|
||||
return err;
|
||||
dec_pinned:
|
||||
if (ulimit)
|
||||
__scif_dec_pinned_vm_lock(mm, nr_pages, 0);
|
||||
__scif_dec_pinned_vm_lock(mm, nr_pages);
|
||||
/* Something went wrong! Rollback */
|
||||
error_unmap:
|
||||
pinned_pages->nr_pages = nr_pages;
|
||||
|
Reference in New Issue
Block a user