siw_mem.h 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
  2. /* Authors: Bernard Metzler <[email protected]> */
  3. /* Copyright (c) 2008-2019, IBM Corporation */
  4. #ifndef _SIW_MEM_H
  5. #define _SIW_MEM_H
  6. struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
  7. void siw_umem_release(struct siw_umem *umem, bool dirty);
  8. struct siw_pbl *siw_pbl_alloc(u32 num_buf);
  9. dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
  10. struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
  11. int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
  12. int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
  13. int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
  14. enum ib_access_flags perms, int len);
  15. int siw_check_sge(struct ib_pd *pd, struct siw_sge *sge,
  16. struct siw_mem *mem[], enum ib_access_flags perms,
  17. u32 off, int len);
  18. void siw_wqe_put_mem(struct siw_wqe *wqe, enum siw_opcode op);
  19. int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
  20. u64 start, u64 len, int rights);
  21. void siw_mr_drop_mem(struct siw_mr *mr);
  22. void siw_free_mem(struct kref *ref);
  23. static inline void siw_mem_put(struct siw_mem *mem)
  24. {
  25. kref_put(&mem->ref, siw_free_mem);
  26. }
  27. static inline void siw_unref_mem_sgl(struct siw_mem **mem, unsigned int num_sge)
  28. {
  29. while (num_sge) {
  30. if (*mem == NULL)
  31. break;
  32. siw_mem_put(*mem);
  33. *mem = NULL;
  34. mem++;
  35. num_sge--;
  36. }
  37. }
  38. #define CHUNK_SHIFT 9 /* sets number of pages per chunk */
  39. #define PAGES_PER_CHUNK (_AC(1, UL) << CHUNK_SHIFT)
  40. #define CHUNK_MASK (~(PAGES_PER_CHUNK - 1))
  41. #define PAGE_CHUNK_SIZE (PAGES_PER_CHUNK * sizeof(struct page *))
  42. /*
  43. * siw_get_upage()
  44. *
  45. * Get page pointer for address on given umem.
  46. *
  47. * @umem: two dimensional list of page pointers
  48. * @addr: user virtual address
  49. */
  50. static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr)
  51. {
  52. unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT,
  53. chunk_idx = page_idx >> CHUNK_SHIFT,
  54. page_in_chunk = page_idx & ~CHUNK_MASK;
  55. if (likely(page_idx < umem->num_pages))
  56. return umem->page_chunk[chunk_idx].plist[page_in_chunk];
  57. return NULL;
  58. }
  59. #endif