restrack.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2019-2020, Mellanox Technologies Ltd. All rights reserved.
  4. */
  5. #include <uapi/rdma/rdma_netlink.h>
  6. #include <linux/mlx5/rsc_dump.h>
  7. #include <rdma/ib_umem_odp.h>
  8. #include <rdma/restrack.h>
  9. #include "mlx5_ib.h"
  10. #include "restrack.h"
  11. #define MAX_DUMP_SIZE 1024
  12. static int dump_rsc(struct mlx5_core_dev *dev, enum mlx5_sgmt_type type,
  13. int index, void *data, int *data_len)
  14. {
  15. struct mlx5_core_dev *mdev = dev;
  16. struct mlx5_rsc_dump_cmd *cmd;
  17. struct mlx5_rsc_key key = {};
  18. struct page *page;
  19. int offset = 0;
  20. int err = 0;
  21. int cmd_err;
  22. int size;
  23. page = alloc_page(GFP_KERNEL);
  24. if (!page)
  25. return -ENOMEM;
  26. key.size = PAGE_SIZE;
  27. key.rsc = type;
  28. key.index1 = index;
  29. key.num_of_obj1 = 1;
  30. cmd = mlx5_rsc_dump_cmd_create(mdev, &key);
  31. if (IS_ERR(cmd)) {
  32. err = PTR_ERR(cmd);
  33. goto free_page;
  34. }
  35. do {
  36. cmd_err = mlx5_rsc_dump_next(mdev, cmd, page, &size);
  37. if (cmd_err < 0 || size + offset > MAX_DUMP_SIZE) {
  38. err = cmd_err;
  39. goto destroy_cmd;
  40. }
  41. memcpy(data + offset, page_address(page), size);
  42. offset += size;
  43. } while (cmd_err > 0);
  44. *data_len = offset;
  45. destroy_cmd:
  46. mlx5_rsc_dump_cmd_destroy(cmd);
  47. free_page:
  48. __free_page(page);
  49. return err;
  50. }
  51. static int fill_res_raw(struct sk_buff *msg, struct mlx5_ib_dev *dev,
  52. enum mlx5_sgmt_type type, u32 key)
  53. {
  54. int len = 0;
  55. void *data;
  56. int err;
  57. data = kzalloc(MAX_DUMP_SIZE, GFP_KERNEL);
  58. if (!data)
  59. return -ENOMEM;
  60. err = dump_rsc(dev->mdev, type, key, data, &len);
  61. if (err)
  62. goto out;
  63. err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
  64. out:
  65. kfree(data);
  66. return err;
  67. }
  68. static int fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
  69. {
  70. struct mlx5_ib_mr *mr = to_mmr(ibmr);
  71. struct nlattr *table_attr;
  72. if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
  73. return 0;
  74. table_attr = nla_nest_start(msg,
  75. RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
  76. if (!table_attr)
  77. goto err;
  78. if (rdma_nl_stat_hwcounter_entry(msg, "page_faults",
  79. atomic64_read(&mr->odp_stats.faults)))
  80. goto err_table;
  81. if (rdma_nl_stat_hwcounter_entry(
  82. msg, "page_invalidations",
  83. atomic64_read(&mr->odp_stats.invalidations)))
  84. goto err_table;
  85. if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
  86. atomic64_read(&mr->odp_stats.prefetch)))
  87. goto err_table;
  88. nla_nest_end(msg, table_attr);
  89. return 0;
  90. err_table:
  91. nla_nest_cancel(msg, table_attr);
  92. err:
  93. return -EMSGSIZE;
  94. }
  95. static int fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ibmr)
  96. {
  97. struct mlx5_ib_mr *mr = to_mmr(ibmr);
  98. return fill_res_raw(msg, mr_to_mdev(mr), MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
  99. mlx5_mkey_to_idx(mr->mmkey.key));
  100. }
  101. static int fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
  102. {
  103. struct mlx5_ib_mr *mr = to_mmr(ibmr);
  104. struct nlattr *table_attr;
  105. if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
  106. return 0;
  107. table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
  108. if (!table_attr)
  109. goto err;
  110. if (mr->is_odp_implicit) {
  111. if (rdma_nl_put_driver_string(msg, "odp", "implicit"))
  112. goto err;
  113. } else {
  114. if (rdma_nl_put_driver_string(msg, "odp", "explicit"))
  115. goto err;
  116. }
  117. nla_nest_end(msg, table_attr);
  118. return 0;
  119. err:
  120. nla_nest_cancel(msg, table_attr);
  121. return -EMSGSIZE;
  122. }
  123. static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq)
  124. {
  125. struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
  126. struct mlx5_ib_cq *cq = to_mcq(ibcq);
  127. return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_CQ, cq->mcq.cqn);
  128. }
  129. static int fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp)
  130. {
  131. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  132. return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_QP,
  133. ibqp->qp_num);
  134. }
  135. static const struct ib_device_ops restrack_ops = {
  136. .fill_res_cq_entry_raw = fill_res_cq_entry_raw,
  137. .fill_res_mr_entry = fill_res_mr_entry,
  138. .fill_res_mr_entry_raw = fill_res_mr_entry_raw,
  139. .fill_res_qp_entry_raw = fill_res_qp_entry_raw,
  140. .fill_stat_mr_entry = fill_stat_mr_entry,
  141. };
  142. int mlx5_ib_restrack_init(struct mlx5_ib_dev *dev)
  143. {
  144. ib_set_device_ops(&dev->ib_dev, &restrack_ops);
  145. return 0;
  146. }