map_perf_test_kern.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /* Copyright (c) 2016 Facebook
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #include <linux/skbuff.h>
  8. #include <linux/netdevice.h>
  9. #include <linux/version.h>
  10. #include <uapi/linux/bpf.h>
  11. #include <bpf/bpf_helpers.h>
  12. #include <bpf/bpf_tracing.h>
  13. #include <bpf/bpf_core_read.h>
  14. #include "trace_common.h"
  15. #define MAX_ENTRIES 1000
  16. #define MAX_NR_CPUS 1024
  17. struct {
  18. __uint(type, BPF_MAP_TYPE_HASH);
  19. __type(key, u32);
  20. __type(value, long);
  21. __uint(max_entries, MAX_ENTRIES);
  22. } hash_map SEC(".maps");
  23. struct {
  24. __uint(type, BPF_MAP_TYPE_LRU_HASH);
  25. __type(key, u32);
  26. __type(value, long);
  27. __uint(max_entries, 10000);
  28. } lru_hash_map SEC(".maps");
  29. struct {
  30. __uint(type, BPF_MAP_TYPE_LRU_HASH);
  31. __type(key, u32);
  32. __type(value, long);
  33. __uint(max_entries, 10000);
  34. __uint(map_flags, BPF_F_NO_COMMON_LRU);
  35. } nocommon_lru_hash_map SEC(".maps");
  36. struct inner_lru {
  37. __uint(type, BPF_MAP_TYPE_LRU_HASH);
  38. __type(key, u32);
  39. __type(value, long);
  40. __uint(max_entries, MAX_ENTRIES);
  41. __uint(map_flags, BPF_F_NUMA_NODE);
  42. __uint(numa_node, 0);
  43. } inner_lru_hash_map SEC(".maps");
  44. struct {
  45. __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
  46. __uint(max_entries, MAX_NR_CPUS);
  47. __uint(key_size, sizeof(u32));
  48. __array(values, struct inner_lru); /* use inner_lru as inner map */
  49. } array_of_lru_hashs SEC(".maps") = {
  50. /* statically initialize the first element */
  51. .values = { &inner_lru_hash_map },
  52. };
  53. struct {
  54. __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
  55. __uint(key_size, sizeof(u32));
  56. __uint(value_size, sizeof(long));
  57. __uint(max_entries, MAX_ENTRIES);
  58. } percpu_hash_map SEC(".maps");
  59. struct {
  60. __uint(type, BPF_MAP_TYPE_HASH);
  61. __type(key, u32);
  62. __type(value, long);
  63. __uint(max_entries, MAX_ENTRIES);
  64. __uint(map_flags, BPF_F_NO_PREALLOC);
  65. } hash_map_alloc SEC(".maps");
  66. struct {
  67. __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
  68. __uint(key_size, sizeof(u32));
  69. __uint(value_size, sizeof(long));
  70. __uint(max_entries, MAX_ENTRIES);
  71. __uint(map_flags, BPF_F_NO_PREALLOC);
  72. } percpu_hash_map_alloc SEC(".maps");
  73. struct {
  74. __uint(type, BPF_MAP_TYPE_LPM_TRIE);
  75. __uint(key_size, 8);
  76. __uint(value_size, sizeof(long));
  77. __uint(max_entries, 10000);
  78. __uint(map_flags, BPF_F_NO_PREALLOC);
  79. } lpm_trie_map_alloc SEC(".maps");
  80. struct {
  81. __uint(type, BPF_MAP_TYPE_ARRAY);
  82. __type(key, u32);
  83. __type(value, long);
  84. __uint(max_entries, MAX_ENTRIES);
  85. } array_map SEC(".maps");
  86. struct {
  87. __uint(type, BPF_MAP_TYPE_LRU_HASH);
  88. __type(key, u32);
  89. __type(value, long);
  90. __uint(max_entries, MAX_ENTRIES);
  91. } lru_hash_lookup_map SEC(".maps");
  92. SEC("kprobe/" SYSCALL(sys_getuid))
  93. int stress_hmap(struct pt_regs *ctx)
  94. {
  95. u32 key = bpf_get_current_pid_tgid();
  96. long init_val = 1;
  97. long *value;
  98. int i;
  99. for (i = 0; i < 10; i++) {
  100. bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
  101. value = bpf_map_lookup_elem(&hash_map, &key);
  102. if (value)
  103. bpf_map_delete_elem(&hash_map, &key);
  104. }
  105. return 0;
  106. }
  107. SEC("kprobe/" SYSCALL(sys_geteuid))
  108. int stress_percpu_hmap(struct pt_regs *ctx)
  109. {
  110. u32 key = bpf_get_current_pid_tgid();
  111. long init_val = 1;
  112. long *value;
  113. int i;
  114. for (i = 0; i < 10; i++) {
  115. bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
  116. value = bpf_map_lookup_elem(&percpu_hash_map, &key);
  117. if (value)
  118. bpf_map_delete_elem(&percpu_hash_map, &key);
  119. }
  120. return 0;
  121. }
  122. SEC("kprobe/" SYSCALL(sys_getgid))
  123. int stress_hmap_alloc(struct pt_regs *ctx)
  124. {
  125. u32 key = bpf_get_current_pid_tgid();
  126. long init_val = 1;
  127. long *value;
  128. int i;
  129. for (i = 0; i < 10; i++) {
  130. bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
  131. value = bpf_map_lookup_elem(&hash_map_alloc, &key);
  132. if (value)
  133. bpf_map_delete_elem(&hash_map_alloc, &key);
  134. }
  135. return 0;
  136. }
  137. SEC("kprobe/" SYSCALL(sys_getegid))
  138. int stress_percpu_hmap_alloc(struct pt_regs *ctx)
  139. {
  140. u32 key = bpf_get_current_pid_tgid();
  141. long init_val = 1;
  142. long *value;
  143. int i;
  144. for (i = 0; i < 10; i++) {
  145. bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
  146. value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
  147. if (value)
  148. bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
  149. }
  150. return 0;
  151. }
  152. SEC("kprobe/" SYSCALL(sys_connect))
  153. int stress_lru_hmap_alloc(struct pt_regs *ctx)
  154. {
  155. struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
  156. char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
  157. union {
  158. u16 dst6[8];
  159. struct {
  160. u16 magic0;
  161. u16 magic1;
  162. u16 tcase;
  163. u16 unused16;
  164. u32 unused32;
  165. u32 key;
  166. };
  167. } test_params;
  168. struct sockaddr_in6 *in6;
  169. u16 test_case;
  170. int addrlen, ret;
  171. long val = 1;
  172. u32 key = 0;
  173. in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
  174. addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
  175. if (addrlen != sizeof(*in6))
  176. return 0;
  177. ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6),
  178. &in6->sin6_addr);
  179. if (ret)
  180. goto done;
  181. if (test_params.magic0 != 0xdead ||
  182. test_params.magic1 != 0xbeef)
  183. return 0;
  184. test_case = test_params.tcase;
  185. if (test_case != 3)
  186. key = bpf_get_prandom_u32();
  187. if (test_case == 0) {
  188. ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY);
  189. } else if (test_case == 1) {
  190. ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val,
  191. BPF_ANY);
  192. } else if (test_case == 2) {
  193. void *nolocal_lru_map;
  194. int cpu = bpf_get_smp_processor_id();
  195. nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs,
  196. &cpu);
  197. if (!nolocal_lru_map) {
  198. ret = -ENOENT;
  199. goto done;
  200. }
  201. ret = bpf_map_update_elem(nolocal_lru_map, &key, &val,
  202. BPF_ANY);
  203. } else if (test_case == 3) {
  204. u32 i;
  205. key = test_params.key;
  206. #pragma clang loop unroll(full)
  207. for (i = 0; i < 32; i++) {
  208. bpf_map_lookup_elem(&lru_hash_lookup_map, &key);
  209. key++;
  210. }
  211. } else {
  212. ret = -EINVAL;
  213. }
  214. done:
  215. if (ret)
  216. bpf_trace_printk(fmt, sizeof(fmt), ret);
  217. return 0;
  218. }
  219. SEC("kprobe/" SYSCALL(sys_gettid))
  220. int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
  221. {
  222. union {
  223. u32 b32[2];
  224. u8 b8[8];
  225. } key;
  226. unsigned int i;
  227. key.b32[0] = 32;
  228. key.b8[4] = 192;
  229. key.b8[5] = 168;
  230. key.b8[6] = 0;
  231. key.b8[7] = 1;
  232. #pragma clang loop unroll(full)
  233. for (i = 0; i < 32; ++i)
  234. bpf_map_lookup_elem(&lpm_trie_map_alloc, &key);
  235. return 0;
  236. }
  237. SEC("kprobe/" SYSCALL(sys_getpgid))
  238. int stress_hash_map_lookup(struct pt_regs *ctx)
  239. {
  240. u32 key = 1, i;
  241. long *value;
  242. #pragma clang loop unroll(full)
  243. for (i = 0; i < 64; ++i)
  244. value = bpf_map_lookup_elem(&hash_map, &key);
  245. return 0;
  246. }
  247. SEC("kprobe/" SYSCALL(sys_getppid))
  248. int stress_array_map_lookup(struct pt_regs *ctx)
  249. {
  250. u32 key = 1, i;
  251. long *value;
  252. #pragma clang loop unroll(full)
  253. for (i = 0; i < 64; ++i)
  254. value = bpf_map_lookup_elem(&array_map, &key);
  255. return 0;
  256. }
  257. char _license[] SEC("license") = "GPL";
  258. u32 _version SEC("version") = LINUX_VERSION_CODE;