rxe_mmap.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  4. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  5. */
  6. #include <linux/vmalloc.h>
  7. #include <linux/mm.h>
  8. #include <linux/errno.h>
  9. #include <rdma/uverbs_ioctl.h>
  10. #include "rxe.h"
  11. #include "rxe_loc.h"
  12. #include "rxe_queue.h"
  13. void rxe_mmap_release(struct kref *ref)
  14. {
  15. struct rxe_mmap_info *ip = container_of(ref,
  16. struct rxe_mmap_info, ref);
  17. struct rxe_dev *rxe = to_rdev(ip->context->device);
  18. spin_lock_bh(&rxe->pending_lock);
  19. if (!list_empty(&ip->pending_mmaps))
  20. list_del(&ip->pending_mmaps);
  21. spin_unlock_bh(&rxe->pending_lock);
  22. vfree(ip->obj); /* buf */
  23. kfree(ip);
  24. }
  25. /*
  26. * open and close keep track of how many times the memory region is mapped,
  27. * to avoid releasing it.
  28. */
  29. static void rxe_vma_open(struct vm_area_struct *vma)
  30. {
  31. struct rxe_mmap_info *ip = vma->vm_private_data;
  32. kref_get(&ip->ref);
  33. }
  34. static void rxe_vma_close(struct vm_area_struct *vma)
  35. {
  36. struct rxe_mmap_info *ip = vma->vm_private_data;
  37. kref_put(&ip->ref, rxe_mmap_release);
  38. }
  39. static const struct vm_operations_struct rxe_vm_ops = {
  40. .open = rxe_vma_open,
  41. .close = rxe_vma_close,
  42. };
  43. /**
  44. * rxe_mmap - create a new mmap region
  45. * @context: the IB user context of the process making the mmap() call
  46. * @vma: the VMA to be initialized
  47. * Return zero if the mmap is OK. Otherwise, return an errno.
  48. */
  49. int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  50. {
  51. struct rxe_dev *rxe = to_rdev(context->device);
  52. unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
  53. unsigned long size = vma->vm_end - vma->vm_start;
  54. struct rxe_mmap_info *ip, *pp;
  55. int ret;
  56. /*
  57. * Search the device's list of objects waiting for a mmap call.
  58. * Normally, this list is very short since a call to create a
  59. * CQ, QP, or SRQ is soon followed by a call to mmap().
  60. */
  61. spin_lock_bh(&rxe->pending_lock);
  62. list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) {
  63. if (context != ip->context || (__u64)offset != ip->info.offset)
  64. continue;
  65. /* Don't allow a mmap larger than the object. */
  66. if (size > ip->info.size) {
  67. pr_err("mmap region is larger than the object!\n");
  68. spin_unlock_bh(&rxe->pending_lock);
  69. ret = -EINVAL;
  70. goto done;
  71. }
  72. goto found_it;
  73. }
  74. pr_warn("unable to find pending mmap info\n");
  75. spin_unlock_bh(&rxe->pending_lock);
  76. ret = -EINVAL;
  77. goto done;
  78. found_it:
  79. list_del_init(&ip->pending_mmaps);
  80. spin_unlock_bh(&rxe->pending_lock);
  81. ret = remap_vmalloc_range(vma, ip->obj, 0);
  82. if (ret) {
  83. pr_err("err %d from remap_vmalloc_range\n", ret);
  84. goto done;
  85. }
  86. vma->vm_ops = &rxe_vm_ops;
  87. vma->vm_private_data = ip;
  88. rxe_vma_open(vma);
  89. done:
  90. return ret;
  91. }
  92. /*
  93. * Allocate information for rxe_mmap
  94. */
  95. struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
  96. struct ib_udata *udata, void *obj)
  97. {
  98. struct rxe_mmap_info *ip;
  99. if (!udata)
  100. return ERR_PTR(-EINVAL);
  101. ip = kmalloc(sizeof(*ip), GFP_KERNEL);
  102. if (!ip)
  103. return ERR_PTR(-ENOMEM);
  104. size = PAGE_ALIGN(size);
  105. spin_lock_bh(&rxe->mmap_offset_lock);
  106. if (rxe->mmap_offset == 0)
  107. rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
  108. ip->info.offset = rxe->mmap_offset;
  109. rxe->mmap_offset += ALIGN(size, SHMLBA);
  110. spin_unlock_bh(&rxe->mmap_offset_lock);
  111. INIT_LIST_HEAD(&ip->pending_mmaps);
  112. ip->info.size = size;
  113. ip->context =
  114. container_of(udata, struct uverbs_attr_bundle, driver_udata)
  115. ->context;
  116. ip->obj = obj;
  117. kref_init(&ip->ref);
  118. return ip;
  119. }