map_iter.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2020 Facebook */
  3. #include <linux/bpf.h>
  4. #include <linux/fs.h>
  5. #include <linux/filter.h>
  6. #include <linux/kernel.h>
  7. #include <linux/btf_ids.h>
  8. struct bpf_iter_seq_map_info {
  9. u32 map_id;
  10. };
  11. static void *bpf_map_seq_start(struct seq_file *seq, loff_t *pos)
  12. {
  13. struct bpf_iter_seq_map_info *info = seq->private;
  14. struct bpf_map *map;
  15. map = bpf_map_get_curr_or_next(&info->map_id);
  16. if (!map)
  17. return NULL;
  18. if (*pos == 0)
  19. ++*pos;
  20. return map;
  21. }
  22. static void *bpf_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  23. {
  24. struct bpf_iter_seq_map_info *info = seq->private;
  25. ++*pos;
  26. ++info->map_id;
  27. bpf_map_put((struct bpf_map *)v);
  28. return bpf_map_get_curr_or_next(&info->map_id);
  29. }
  30. struct bpf_iter__bpf_map {
  31. __bpf_md_ptr(struct bpf_iter_meta *, meta);
  32. __bpf_md_ptr(struct bpf_map *, map);
  33. };
  34. DEFINE_BPF_ITER_FUNC(bpf_map, struct bpf_iter_meta *meta, struct bpf_map *map)
  35. static int __bpf_map_seq_show(struct seq_file *seq, void *v, bool in_stop)
  36. {
  37. struct bpf_iter__bpf_map ctx;
  38. struct bpf_iter_meta meta;
  39. struct bpf_prog *prog;
  40. int ret = 0;
  41. ctx.meta = &meta;
  42. ctx.map = v;
  43. meta.seq = seq;
  44. prog = bpf_iter_get_info(&meta, in_stop);
  45. if (prog)
  46. ret = bpf_iter_run_prog(prog, &ctx);
  47. return ret;
  48. }
  49. static int bpf_map_seq_show(struct seq_file *seq, void *v)
  50. {
  51. return __bpf_map_seq_show(seq, v, false);
  52. }
  53. static void bpf_map_seq_stop(struct seq_file *seq, void *v)
  54. {
  55. if (!v)
  56. (void)__bpf_map_seq_show(seq, v, true);
  57. else
  58. bpf_map_put((struct bpf_map *)v);
  59. }
  60. static const struct seq_operations bpf_map_seq_ops = {
  61. .start = bpf_map_seq_start,
  62. .next = bpf_map_seq_next,
  63. .stop = bpf_map_seq_stop,
  64. .show = bpf_map_seq_show,
  65. };
  66. BTF_ID_LIST(btf_bpf_map_id)
  67. BTF_ID(struct, bpf_map)
  68. static const struct bpf_iter_seq_info bpf_map_seq_info = {
  69. .seq_ops = &bpf_map_seq_ops,
  70. .init_seq_private = NULL,
  71. .fini_seq_private = NULL,
  72. .seq_priv_size = sizeof(struct bpf_iter_seq_map_info),
  73. };
  74. static struct bpf_iter_reg bpf_map_reg_info = {
  75. .target = "bpf_map",
  76. .ctx_arg_info_size = 1,
  77. .ctx_arg_info = {
  78. { offsetof(struct bpf_iter__bpf_map, map),
  79. PTR_TO_BTF_ID_OR_NULL },
  80. },
  81. .seq_info = &bpf_map_seq_info,
  82. };
  83. static int bpf_iter_attach_map(struct bpf_prog *prog,
  84. union bpf_iter_link_info *linfo,
  85. struct bpf_iter_aux_info *aux)
  86. {
  87. u32 key_acc_size, value_acc_size, key_size, value_size;
  88. struct bpf_map *map;
  89. bool is_percpu = false;
  90. int err = -EINVAL;
  91. if (!linfo->map.map_fd)
  92. return -EBADF;
  93. map = bpf_map_get_with_uref(linfo->map.map_fd);
  94. if (IS_ERR(map))
  95. return PTR_ERR(map);
  96. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  97. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  98. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  99. is_percpu = true;
  100. else if (map->map_type != BPF_MAP_TYPE_HASH &&
  101. map->map_type != BPF_MAP_TYPE_LRU_HASH &&
  102. map->map_type != BPF_MAP_TYPE_ARRAY)
  103. goto put_map;
  104. key_acc_size = prog->aux->max_rdonly_access;
  105. value_acc_size = prog->aux->max_rdwr_access;
  106. key_size = map->key_size;
  107. if (!is_percpu)
  108. value_size = map->value_size;
  109. else
  110. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  111. if (key_acc_size > key_size || value_acc_size > value_size) {
  112. err = -EACCES;
  113. goto put_map;
  114. }
  115. aux->map = map;
  116. return 0;
  117. put_map:
  118. bpf_map_put_with_uref(map);
  119. return err;
  120. }
  121. static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
  122. {
  123. bpf_map_put_with_uref(aux->map);
  124. }
  125. void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
  126. struct seq_file *seq)
  127. {
  128. seq_printf(seq, "map_id:\t%u\n", aux->map->id);
  129. }
  130. int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
  131. struct bpf_link_info *info)
  132. {
  133. info->iter.map.map_id = aux->map->id;
  134. return 0;
  135. }
  136. DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,
  137. struct bpf_map *map, void *key, void *value)
  138. static const struct bpf_iter_reg bpf_map_elem_reg_info = {
  139. .target = "bpf_map_elem",
  140. .attach_target = bpf_iter_attach_map,
  141. .detach_target = bpf_iter_detach_map,
  142. .show_fdinfo = bpf_iter_map_show_fdinfo,
  143. .fill_link_info = bpf_iter_map_fill_link_info,
  144. .ctx_arg_info_size = 2,
  145. .ctx_arg_info = {
  146. { offsetof(struct bpf_iter__bpf_map_elem, key),
  147. PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
  148. { offsetof(struct bpf_iter__bpf_map_elem, value),
  149. PTR_TO_BUF | PTR_MAYBE_NULL },
  150. },
  151. };
  152. static int __init bpf_map_iter_init(void)
  153. {
  154. int ret;
  155. bpf_map_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_map_id;
  156. ret = bpf_iter_reg_target(&bpf_map_reg_info);
  157. if (ret)
  158. return ret;
  159. return bpf_iter_reg_target(&bpf_map_elem_reg_info);
  160. }
  161. late_initcall(bpf_map_iter_init);