test_maps.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Testsuite for eBPF maps
  4. *
  5. * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
  6. * Copyright (c) 2016 Facebook
  7. */
  8. #include <stdio.h>
  9. #include <unistd.h>
  10. #include <errno.h>
  11. #include <string.h>
  12. #include <assert.h>
  13. #include <stdlib.h>
  14. #include <time.h>
  15. #include <sys/wait.h>
  16. #include <sys/socket.h>
  17. #include <netinet/in.h>
  18. #include <linux/bpf.h>
  19. #include <bpf/bpf.h>
  20. #include <bpf/libbpf.h>
  21. #include "bpf_util.h"
  22. #include "test_maps.h"
  23. #include "testing_helpers.h"
  24. #ifndef ENOTSUPP
  25. #define ENOTSUPP 524
  26. #endif
  27. int skips;
  28. static struct bpf_map_create_opts map_opts = { .sz = sizeof(map_opts) };
  29. static void test_hashmap(unsigned int task, void *data)
  30. {
  31. long long key, next_key, first_key, value;
  32. int fd;
  33. fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), 2, &map_opts);
  34. if (fd < 0) {
  35. printf("Failed to create hashmap '%s'!\n", strerror(errno));
  36. exit(1);
  37. }
  38. key = 1;
  39. value = 1234;
  40. /* Insert key=1 element. */
  41. assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
  42. value = 0;
  43. /* BPF_NOEXIST means add new element if it doesn't exist. */
  44. assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
  45. /* key=1 already exists. */
  46. errno == EEXIST);
  47. /* -1 is an invalid flag. */
  48. assert(bpf_map_update_elem(fd, &key, &value, -1) < 0 &&
  49. errno == EINVAL);
  50. /* Check that key=1 can be found. */
  51. assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234);
  52. key = 2;
  53. value = 1234;
  54. /* Insert key=2 element. */
  55. assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
  56. /* Check that key=2 matches the value and delete it */
  57. assert(bpf_map_lookup_and_delete_elem(fd, &key, &value) == 0 && value == 1234);
  58. /* Check that key=2 is not found. */
  59. assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == ENOENT);
  60. /* BPF_EXIST means update existing element. */
  61. assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) < 0 &&
  62. /* key=2 is not there. */
  63. errno == ENOENT);
  64. /* Insert key=2 element. */
  65. assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
  66. /* key=1 and key=2 were inserted, check that key=0 cannot be
  67. * inserted due to max_entries limit.
  68. */
  69. key = 0;
  70. assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
  71. errno == E2BIG);
  72. /* Update existing element, though the map is full. */
  73. key = 1;
  74. assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
  75. key = 2;
  76. assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
  77. key = 3;
  78. assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
  79. errno == E2BIG);
  80. /* Check that key = 0 doesn't exist. */
  81. key = 0;
  82. assert(bpf_map_delete_elem(fd, &key) < 0 && errno == ENOENT);
  83. /* Iterate over two elements. */
  84. assert(bpf_map_get_next_key(fd, NULL, &first_key) == 0 &&
  85. (first_key == 1 || first_key == 2));
  86. assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
  87. (next_key == first_key));
  88. assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
  89. (next_key == 1 || next_key == 2) &&
  90. (next_key != first_key));
  91. assert(bpf_map_get_next_key(fd, &next_key, &next_key) < 0 &&
  92. errno == ENOENT);
  93. /* Delete both elements. */
  94. key = 1;
  95. assert(bpf_map_delete_elem(fd, &key) == 0);
  96. key = 2;
  97. assert(bpf_map_delete_elem(fd, &key) == 0);
  98. assert(bpf_map_delete_elem(fd, &key) < 0 && errno == ENOENT);
  99. key = 0;
  100. /* Check that map is empty. */
  101. assert(bpf_map_get_next_key(fd, NULL, &next_key) < 0 &&
  102. errno == ENOENT);
  103. assert(bpf_map_get_next_key(fd, &key, &next_key) < 0 &&
  104. errno == ENOENT);
  105. close(fd);
  106. }
  107. static void test_hashmap_sizes(unsigned int task, void *data)
  108. {
  109. int fd, i, j;
  110. for (i = 1; i <= 512; i <<= 1)
  111. for (j = 1; j <= 1 << 18; j <<= 1) {
  112. fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, i, j, 2, &map_opts);
  113. if (fd < 0) {
  114. if (errno == ENOMEM)
  115. return;
  116. printf("Failed to create hashmap key=%d value=%d '%s'\n",
  117. i, j, strerror(errno));
  118. exit(1);
  119. }
  120. close(fd);
  121. usleep(10); /* give kernel time to destroy */
  122. }
  123. }
  124. static void test_hashmap_percpu(unsigned int task, void *data)
  125. {
  126. unsigned int nr_cpus = bpf_num_possible_cpus();
  127. BPF_DECLARE_PERCPU(long, value);
  128. long long key, next_key, first_key;
  129. int expected_key_mask = 0;
  130. int fd, i;
  131. fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_HASH, NULL, sizeof(key),
  132. sizeof(bpf_percpu(value, 0)), 2, &map_opts);
  133. if (fd < 0) {
  134. printf("Failed to create hashmap '%s'!\n", strerror(errno));
  135. exit(1);
  136. }
  137. for (i = 0; i < nr_cpus; i++)
  138. bpf_percpu(value, i) = i + 100;
  139. key = 1;
  140. /* Insert key=1 element. */
  141. assert(!(expected_key_mask & key));
  142. assert(bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0);
  143. /* Lookup and delete elem key=1 and check value. */
  144. assert(bpf_map_lookup_and_delete_elem(fd, &key, value) == 0 &&
  145. bpf_percpu(value,0) == 100);
  146. for (i = 0; i < nr_cpus; i++)
  147. bpf_percpu(value,i) = i + 100;
  148. /* Insert key=1 element which should not exist. */
  149. assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == 0);
  150. expected_key_mask |= key;
  151. /* BPF_NOEXIST means add new element if it doesn't exist. */
  152. assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) < 0 &&
  153. /* key=1 already exists. */
  154. errno == EEXIST);
  155. /* -1 is an invalid flag. */
  156. assert(bpf_map_update_elem(fd, &key, value, -1) < 0 &&
  157. errno == EINVAL);
  158. /* Check that key=1 can be found. Value could be 0 if the lookup
  159. * was run from a different CPU.
  160. */
  161. bpf_percpu(value, 0) = 1;
  162. assert(bpf_map_lookup_elem(fd, &key, value) == 0 &&
  163. bpf_percpu(value, 0) == 100);
  164. key = 2;
  165. /* Check that key=2 is not found. */
  166. assert(bpf_map_lookup_elem(fd, &key, value) < 0 && errno == ENOENT);
  167. /* BPF_EXIST means update existing element. */
  168. assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) < 0 &&
  169. /* key=2 is not there. */
  170. errno == ENOENT);
  171. /* Insert key=2 element. */
  172. assert(!(expected_key_mask & key));
  173. assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == 0);
  174. expected_key_mask |= key;
  175. /* key=1 and key=2 were inserted, check that key=0 cannot be
  176. * inserted due to max_entries limit.
  177. */
  178. key = 0;
  179. assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) < 0 &&
  180. errno == E2BIG);
  181. /* Check that key = 0 doesn't exist. */
  182. assert(bpf_map_delete_elem(fd, &key) < 0 && errno == ENOENT);
  183. /* Iterate over two elements. */
  184. assert(bpf_map_get_next_key(fd, NULL, &first_key) == 0 &&
  185. ((expected_key_mask & first_key) == first_key));
  186. while (!bpf_map_get_next_key(fd, &key, &next_key)) {
  187. if (first_key) {
  188. assert(next_key == first_key);
  189. first_key = 0;
  190. }
  191. assert((expected_key_mask & next_key) == next_key);
  192. expected_key_mask &= ~next_key;
  193. assert(bpf_map_lookup_elem(fd, &next_key, value) == 0);
  194. for (i = 0; i < nr_cpus; i++)
  195. assert(bpf_percpu(value, i) == i + 100);
  196. key = next_key;
  197. }
  198. assert(errno == ENOENT);
  199. /* Update with BPF_EXIST. */
  200. key = 1;
  201. assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == 0);
  202. /* Delete both elements. */
  203. key = 1;
  204. assert(bpf_map_delete_elem(fd, &key) == 0);
  205. key = 2;
  206. assert(bpf_map_delete_elem(fd, &key) == 0);
  207. assert(bpf_map_delete_elem(fd, &key) < 0 && errno == ENOENT);
  208. key = 0;
  209. /* Check that map is empty. */
  210. assert(bpf_map_get_next_key(fd, NULL, &next_key) < 0 &&
  211. errno == ENOENT);
  212. assert(bpf_map_get_next_key(fd, &key, &next_key) < 0 &&
  213. errno == ENOENT);
  214. close(fd);
  215. }
  216. #define VALUE_SIZE 3
  217. static int helper_fill_hashmap(int max_entries)
  218. {
  219. int i, fd, ret;
  220. long long key, value[VALUE_SIZE] = {};
  221. fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value),
  222. max_entries, &map_opts);
  223. CHECK(fd < 0,
  224. "failed to create hashmap",
  225. "err: %s, flags: 0x%x\n", strerror(errno), map_opts.map_flags);
  226. for (i = 0; i < max_entries; i++) {
  227. key = i; value[0] = key;
  228. ret = bpf_map_update_elem(fd, &key, value, BPF_NOEXIST);
  229. CHECK(ret != 0,
  230. "can't update hashmap",
  231. "err: %s\n", strerror(ret));
  232. }
  233. return fd;
  234. }
  235. static void test_hashmap_walk(unsigned int task, void *data)
  236. {
  237. int fd, i, max_entries = 10000;
  238. long long key, value[VALUE_SIZE], next_key;
  239. bool next_key_valid = true;
  240. fd = helper_fill_hashmap(max_entries);
  241. for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key,
  242. &next_key) == 0; i++) {
  243. key = next_key;
  244. assert(bpf_map_lookup_elem(fd, &key, value) == 0);
  245. }
  246. assert(i == max_entries);
  247. assert(bpf_map_get_next_key(fd, NULL, &key) == 0);
  248. for (i = 0; next_key_valid; i++) {
  249. next_key_valid = bpf_map_get_next_key(fd, &key, &next_key) == 0;
  250. assert(bpf_map_lookup_elem(fd, &key, value) == 0);
  251. value[0]++;
  252. assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == 0);
  253. key = next_key;
  254. }
  255. assert(i == max_entries);
  256. for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key,
  257. &next_key) == 0; i++) {
  258. key = next_key;
  259. assert(bpf_map_lookup_elem(fd, &key, value) == 0);
  260. assert(value[0] - 1 == key);
  261. }
  262. assert(i == max_entries);
  263. close(fd);
  264. }
  265. static void test_hashmap_zero_seed(void)
  266. {
  267. int i, first, second, old_flags;
  268. long long key, next_first, next_second;
  269. old_flags = map_opts.map_flags;
  270. map_opts.map_flags |= BPF_F_ZERO_SEED;
  271. first = helper_fill_hashmap(3);
  272. second = helper_fill_hashmap(3);
  273. for (i = 0; ; i++) {
  274. void *key_ptr = !i ? NULL : &key;
  275. if (bpf_map_get_next_key(first, key_ptr, &next_first) != 0)
  276. break;
  277. CHECK(bpf_map_get_next_key(second, key_ptr, &next_second) != 0,
  278. "next_key for second map must succeed",
  279. "key_ptr: %p", key_ptr);
  280. CHECK(next_first != next_second,
  281. "keys must match",
  282. "i: %d first: %lld second: %lld\n", i,
  283. next_first, next_second);
  284. key = next_first;
  285. }
  286. map_opts.map_flags = old_flags;
  287. close(first);
  288. close(second);
  289. }
  290. static void test_arraymap(unsigned int task, void *data)
  291. {
  292. int key, next_key, fd;
  293. long long value;
  294. fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(key), sizeof(value), 2, NULL);
  295. if (fd < 0) {
  296. printf("Failed to create arraymap '%s'!\n", strerror(errno));
  297. exit(1);
  298. }
  299. key = 1;
  300. value = 1234;
  301. /* Insert key=1 element. */
  302. assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
  303. value = 0;
  304. assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
  305. errno == EEXIST);
  306. /* Check that key=1 can be found. */
  307. assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234);
  308. key = 0;
  309. /* Check that key=0 is also found and zero initialized. */
  310. assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0);
  311. /* key=0 and key=1 were inserted, check that key=2 cannot be inserted
  312. * due to max_entries limit.
  313. */
  314. key = 2;
  315. assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) < 0 &&
  316. errno == E2BIG);
  317. /* Check that key = 2 doesn't exist. */
  318. assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == ENOENT);
  319. /* Iterate over two elements. */
  320. assert(bpf_map_get_next_key(fd, NULL, &next_key) == 0 &&
  321. next_key == 0);
  322. assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
  323. next_key == 0);
  324. assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
  325. next_key == 1);
  326. assert(bpf_map_get_next_key(fd, &next_key, &next_key) < 0 &&
  327. errno == ENOENT);
  328. /* Delete shouldn't succeed. */
  329. key = 1;
  330. assert(bpf_map_delete_elem(fd, &key) < 0 && errno == EINVAL);
  331. close(fd);
  332. }
  333. static void test_arraymap_percpu(unsigned int task, void *data)
  334. {
  335. unsigned int nr_cpus = bpf_num_possible_cpus();
  336. BPF_DECLARE_PERCPU(long, values);
  337. int key, next_key, fd, i;
  338. fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, NULL, sizeof(key),
  339. sizeof(bpf_percpu(values, 0)), 2, NULL);
  340. if (fd < 0) {
  341. printf("Failed to create arraymap '%s'!\n", strerror(errno));
  342. exit(1);
  343. }
  344. for (i = 0; i < nr_cpus; i++)
  345. bpf_percpu(values, i) = i + 100;
  346. key = 1;
  347. /* Insert key=1 element. */
  348. assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
  349. bpf_percpu(values, 0) = 0;
  350. assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) < 0 &&
  351. errno == EEXIST);
  352. /* Check that key=1 can be found. */
  353. assert(bpf_map_lookup_elem(fd, &key, values) == 0 &&
  354. bpf_percpu(values, 0) == 100);
  355. key = 0;
  356. /* Check that key=0 is also found and zero initialized. */
  357. assert(bpf_map_lookup_elem(fd, &key, values) == 0 &&
  358. bpf_percpu(values, 0) == 0 &&
  359. bpf_percpu(values, nr_cpus - 1) == 0);
  360. /* Check that key=2 cannot be inserted due to max_entries limit. */
  361. key = 2;
  362. assert(bpf_map_update_elem(fd, &key, values, BPF_EXIST) < 0 &&
  363. errno == E2BIG);
  364. /* Check that key = 2 doesn't exist. */
  365. assert(bpf_map_lookup_elem(fd, &key, values) < 0 && errno == ENOENT);
  366. /* Iterate over two elements. */
  367. assert(bpf_map_get_next_key(fd, NULL, &next_key) == 0 &&
  368. next_key == 0);
  369. assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
  370. next_key == 0);
  371. assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
  372. next_key == 1);
  373. assert(bpf_map_get_next_key(fd, &next_key, &next_key) < 0 &&
  374. errno == ENOENT);
  375. /* Delete shouldn't succeed. */
  376. key = 1;
  377. assert(bpf_map_delete_elem(fd, &key) < 0 && errno == EINVAL);
  378. close(fd);
  379. }
  380. static void test_arraymap_percpu_many_keys(void)
  381. {
  382. unsigned int nr_cpus = bpf_num_possible_cpus();
  383. BPF_DECLARE_PERCPU(long, values);
  384. /* nr_keys is not too large otherwise the test stresses percpu
  385. * allocator more than anything else
  386. */
  387. unsigned int nr_keys = 2000;
  388. int key, fd, i;
  389. fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, NULL, sizeof(key),
  390. sizeof(bpf_percpu(values, 0)), nr_keys, NULL);
  391. if (fd < 0) {
  392. printf("Failed to create per-cpu arraymap '%s'!\n",
  393. strerror(errno));
  394. exit(1);
  395. }
  396. for (i = 0; i < nr_cpus; i++)
  397. bpf_percpu(values, i) = i + 10;
  398. for (key = 0; key < nr_keys; key++)
  399. assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
  400. for (key = 0; key < nr_keys; key++) {
  401. for (i = 0; i < nr_cpus; i++)
  402. bpf_percpu(values, i) = 0;
  403. assert(bpf_map_lookup_elem(fd, &key, values) == 0);
  404. for (i = 0; i < nr_cpus; i++)
  405. assert(bpf_percpu(values, i) == i + 10);
  406. }
  407. close(fd);
  408. }
  409. static void test_devmap(unsigned int task, void *data)
  410. {
  411. int fd;
  412. __u32 key, value;
  413. fd = bpf_map_create(BPF_MAP_TYPE_DEVMAP, NULL, sizeof(key), sizeof(value), 2, NULL);
  414. if (fd < 0) {
  415. printf("Failed to create devmap '%s'!\n", strerror(errno));
  416. exit(1);
  417. }
  418. close(fd);
  419. }
  420. static void test_devmap_hash(unsigned int task, void *data)
  421. {
  422. int fd;
  423. __u32 key, value;
  424. fd = bpf_map_create(BPF_MAP_TYPE_DEVMAP_HASH, NULL, sizeof(key), sizeof(value), 2, NULL);
  425. if (fd < 0) {
  426. printf("Failed to create devmap_hash '%s'!\n", strerror(errno));
  427. exit(1);
  428. }
  429. close(fd);
  430. }
  431. static void test_queuemap(unsigned int task, void *data)
  432. {
  433. const int MAP_SIZE = 32;
  434. __u32 vals[MAP_SIZE + MAP_SIZE/2], val;
  435. int fd, i;
  436. /* Fill test values to be used */
  437. for (i = 0; i < MAP_SIZE + MAP_SIZE/2; i++)
  438. vals[i] = rand();
  439. /* Invalid key size */
  440. fd = bpf_map_create(BPF_MAP_TYPE_QUEUE, NULL, 4, sizeof(val), MAP_SIZE, &map_opts);
  441. assert(fd < 0 && errno == EINVAL);
  442. fd = bpf_map_create(BPF_MAP_TYPE_QUEUE, NULL, 0, sizeof(val), MAP_SIZE, &map_opts);
  443. /* Queue map does not support BPF_F_NO_PREALLOC */
  444. if (map_opts.map_flags & BPF_F_NO_PREALLOC) {
  445. assert(fd < 0 && errno == EINVAL);
  446. return;
  447. }
  448. if (fd < 0) {
  449. printf("Failed to create queuemap '%s'!\n", strerror(errno));
  450. exit(1);
  451. }
  452. /* Push MAP_SIZE elements */
  453. for (i = 0; i < MAP_SIZE; i++)
  454. assert(bpf_map_update_elem(fd, NULL, &vals[i], 0) == 0);
  455. /* Check that element cannot be pushed due to max_entries limit */
  456. assert(bpf_map_update_elem(fd, NULL, &val, 0) < 0 &&
  457. errno == E2BIG);
  458. /* Peek element */
  459. assert(bpf_map_lookup_elem(fd, NULL, &val) == 0 && val == vals[0]);
  460. /* Replace half elements */
  461. for (i = MAP_SIZE; i < MAP_SIZE + MAP_SIZE/2; i++)
  462. assert(bpf_map_update_elem(fd, NULL, &vals[i], BPF_EXIST) == 0);
  463. /* Pop all elements */
  464. for (i = MAP_SIZE/2; i < MAP_SIZE + MAP_SIZE/2; i++)
  465. assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) == 0 &&
  466. val == vals[i]);
  467. /* Check that there are not elements left */
  468. assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) < 0 &&
  469. errno == ENOENT);
  470. /* Check that non supported functions set errno to EINVAL */
  471. assert(bpf_map_delete_elem(fd, NULL) < 0 && errno == EINVAL);
  472. assert(bpf_map_get_next_key(fd, NULL, NULL) < 0 && errno == EINVAL);
  473. close(fd);
  474. }
  475. static void test_stackmap(unsigned int task, void *data)
  476. {
  477. const int MAP_SIZE = 32;
  478. __u32 vals[MAP_SIZE + MAP_SIZE/2], val;
  479. int fd, i;
  480. /* Fill test values to be used */
  481. for (i = 0; i < MAP_SIZE + MAP_SIZE/2; i++)
  482. vals[i] = rand();
  483. /* Invalid key size */
  484. fd = bpf_map_create(BPF_MAP_TYPE_STACK, NULL, 4, sizeof(val), MAP_SIZE, &map_opts);
  485. assert(fd < 0 && errno == EINVAL);
  486. fd = bpf_map_create(BPF_MAP_TYPE_STACK, NULL, 0, sizeof(val), MAP_SIZE, &map_opts);
  487. /* Stack map does not support BPF_F_NO_PREALLOC */
  488. if (map_opts.map_flags & BPF_F_NO_PREALLOC) {
  489. assert(fd < 0 && errno == EINVAL);
  490. return;
  491. }
  492. if (fd < 0) {
  493. printf("Failed to create stackmap '%s'!\n", strerror(errno));
  494. exit(1);
  495. }
  496. /* Push MAP_SIZE elements */
  497. for (i = 0; i < MAP_SIZE; i++)
  498. assert(bpf_map_update_elem(fd, NULL, &vals[i], 0) == 0);
  499. /* Check that element cannot be pushed due to max_entries limit */
  500. assert(bpf_map_update_elem(fd, NULL, &val, 0) < 0 &&
  501. errno == E2BIG);
  502. /* Peek element */
  503. assert(bpf_map_lookup_elem(fd, NULL, &val) == 0 && val == vals[i - 1]);
  504. /* Replace half elements */
  505. for (i = MAP_SIZE; i < MAP_SIZE + MAP_SIZE/2; i++)
  506. assert(bpf_map_update_elem(fd, NULL, &vals[i], BPF_EXIST) == 0);
  507. /* Pop all elements */
  508. for (i = MAP_SIZE + MAP_SIZE/2 - 1; i >= MAP_SIZE/2; i--)
  509. assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) == 0 &&
  510. val == vals[i]);
  511. /* Check that there are not elements left */
  512. assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) < 0 &&
  513. errno == ENOENT);
  514. /* Check that non supported functions set errno to EINVAL */
  515. assert(bpf_map_delete_elem(fd, NULL) < 0 && errno == EINVAL);
  516. assert(bpf_map_get_next_key(fd, NULL, NULL) < 0 && errno == EINVAL);
  517. close(fd);
  518. }
  519. #include <sys/ioctl.h>
  520. #include <arpa/inet.h>
  521. #include <sys/select.h>
  522. #include <linux/err.h>
  523. #define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.bpf.o"
  524. #define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.bpf.o"
  525. #define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.bpf.o"
  526. static void test_sockmap(unsigned int tasks, void *data)
  527. {
  528. struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break;
  529. int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break;
  530. struct bpf_object *parse_obj, *verdict_obj, *msg_obj;
  531. int ports[] = {50200, 50201, 50202, 50204};
  532. int err, i, fd, udp, sfd[6] = {0xdeadbeef};
  533. u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0};
  534. int parse_prog, verdict_prog, msg_prog;
  535. struct sockaddr_in addr;
  536. int one = 1, s, sc, rc;
  537. struct timeval to;
  538. __u32 key, value;
  539. pid_t pid[tasks];
  540. fd_set w;
  541. /* Create some sockets to use with sockmap */
  542. for (i = 0; i < 2; i++) {
  543. sfd[i] = socket(AF_INET, SOCK_STREAM, 0);
  544. if (sfd[i] < 0)
  545. goto out;
  546. err = setsockopt(sfd[i], SOL_SOCKET, SO_REUSEADDR,
  547. (char *)&one, sizeof(one));
  548. if (err) {
  549. printf("failed to setsockopt\n");
  550. goto out;
  551. }
  552. err = ioctl(sfd[i], FIONBIO, (char *)&one);
  553. if (err < 0) {
  554. printf("failed to ioctl\n");
  555. goto out;
  556. }
  557. memset(&addr, 0, sizeof(struct sockaddr_in));
  558. addr.sin_family = AF_INET;
  559. addr.sin_addr.s_addr = inet_addr("127.0.0.1");
  560. addr.sin_port = htons(ports[i]);
  561. err = bind(sfd[i], (struct sockaddr *)&addr, sizeof(addr));
  562. if (err < 0) {
  563. printf("failed to bind: err %i: %i:%i\n",
  564. err, i, sfd[i]);
  565. goto out;
  566. }
  567. err = listen(sfd[i], 32);
  568. if (err < 0) {
  569. printf("failed to listen\n");
  570. goto out;
  571. }
  572. }
  573. for (i = 2; i < 4; i++) {
  574. sfd[i] = socket(AF_INET, SOCK_STREAM, 0);
  575. if (sfd[i] < 0)
  576. goto out;
  577. err = setsockopt(sfd[i], SOL_SOCKET, SO_REUSEADDR,
  578. (char *)&one, sizeof(one));
  579. if (err) {
  580. printf("set sock opt\n");
  581. goto out;
  582. }
  583. memset(&addr, 0, sizeof(struct sockaddr_in));
  584. addr.sin_family = AF_INET;
  585. addr.sin_addr.s_addr = inet_addr("127.0.0.1");
  586. addr.sin_port = htons(ports[i - 2]);
  587. err = connect(sfd[i], (struct sockaddr *)&addr, sizeof(addr));
  588. if (err) {
  589. printf("failed to connect\n");
  590. goto out;
  591. }
  592. }
  593. for (i = 4; i < 6; i++) {
  594. sfd[i] = accept(sfd[i - 4], NULL, NULL);
  595. if (sfd[i] < 0) {
  596. printf("accept failed\n");
  597. goto out;
  598. }
  599. }
  600. /* Test sockmap with connected sockets */
  601. fd = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL,
  602. sizeof(key), sizeof(value),
  603. 6, NULL);
  604. if (fd < 0) {
  605. if (!libbpf_probe_bpf_map_type(BPF_MAP_TYPE_SOCKMAP, NULL)) {
  606. printf("%s SKIP (unsupported map type BPF_MAP_TYPE_SOCKMAP)\n",
  607. __func__);
  608. skips++;
  609. for (i = 0; i < 6; i++)
  610. close(sfd[i]);
  611. return;
  612. }
  613. printf("Failed to create sockmap %i\n", fd);
  614. goto out_sockmap;
  615. }
  616. /* Test update with unsupported UDP socket */
  617. udp = socket(AF_INET, SOCK_DGRAM, 0);
  618. i = 0;
  619. err = bpf_map_update_elem(fd, &i, &udp, BPF_ANY);
  620. if (err) {
  621. printf("Failed socket update SOCK_DGRAM '%i:%i'\n",
  622. i, udp);
  623. goto out_sockmap;
  624. }
  625. close(udp);
  626. /* Test update without programs */
  627. for (i = 0; i < 6; i++) {
  628. err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
  629. if (err) {
  630. printf("Failed noprog update sockmap '%i:%i'\n",
  631. i, sfd[i]);
  632. goto out_sockmap;
  633. }
  634. }
  635. /* Test attaching/detaching bad fds */
  636. err = bpf_prog_attach(-1, fd, BPF_SK_SKB_STREAM_PARSER, 0);
  637. if (!err) {
  638. printf("Failed invalid parser prog attach\n");
  639. goto out_sockmap;
  640. }
  641. err = bpf_prog_attach(-1, fd, BPF_SK_SKB_STREAM_VERDICT, 0);
  642. if (!err) {
  643. printf("Failed invalid verdict prog attach\n");
  644. goto out_sockmap;
  645. }
  646. err = bpf_prog_attach(-1, fd, BPF_SK_MSG_VERDICT, 0);
  647. if (!err) {
  648. printf("Failed invalid msg verdict prog attach\n");
  649. goto out_sockmap;
  650. }
  651. err = bpf_prog_attach(-1, fd, __MAX_BPF_ATTACH_TYPE, 0);
  652. if (!err) {
  653. printf("Failed unknown prog attach\n");
  654. goto out_sockmap;
  655. }
  656. err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER);
  657. if (!err) {
  658. printf("Failed empty parser prog detach\n");
  659. goto out_sockmap;
  660. }
  661. err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT);
  662. if (!err) {
  663. printf("Failed empty verdict prog detach\n");
  664. goto out_sockmap;
  665. }
  666. err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT);
  667. if (!err) {
  668. printf("Failed empty msg verdict prog detach\n");
  669. goto out_sockmap;
  670. }
  671. err = bpf_prog_detach(fd, __MAX_BPF_ATTACH_TYPE);
  672. if (!err) {
  673. printf("Detach invalid prog successful\n");
  674. goto out_sockmap;
  675. }
  676. /* Load SK_SKB program and Attach */
  677. err = bpf_prog_test_load(SOCKMAP_PARSE_PROG,
  678. BPF_PROG_TYPE_SK_SKB, &parse_obj, &parse_prog);
  679. if (err) {
  680. printf("Failed to load SK_SKB parse prog\n");
  681. goto out_sockmap;
  682. }
  683. err = bpf_prog_test_load(SOCKMAP_TCP_MSG_PROG,
  684. BPF_PROG_TYPE_SK_MSG, &msg_obj, &msg_prog);
  685. if (err) {
  686. printf("Failed to load SK_SKB msg prog\n");
  687. goto out_sockmap;
  688. }
  689. err = bpf_prog_test_load(SOCKMAP_VERDICT_PROG,
  690. BPF_PROG_TYPE_SK_SKB, &verdict_obj, &verdict_prog);
  691. if (err) {
  692. printf("Failed to load SK_SKB verdict prog\n");
  693. goto out_sockmap;
  694. }
  695. bpf_map_rx = bpf_object__find_map_by_name(verdict_obj, "sock_map_rx");
  696. if (!bpf_map_rx) {
  697. printf("Failed to load map rx from verdict prog\n");
  698. goto out_sockmap;
  699. }
  700. map_fd_rx = bpf_map__fd(bpf_map_rx);
  701. if (map_fd_rx < 0) {
  702. printf("Failed to get map rx fd\n");
  703. goto out_sockmap;
  704. }
  705. bpf_map_tx = bpf_object__find_map_by_name(verdict_obj, "sock_map_tx");
  706. if (!bpf_map_tx) {
  707. printf("Failed to load map tx from verdict prog\n");
  708. goto out_sockmap;
  709. }
  710. map_fd_tx = bpf_map__fd(bpf_map_tx);
  711. if (map_fd_tx < 0) {
  712. printf("Failed to get map tx fd\n");
  713. goto out_sockmap;
  714. }
  715. bpf_map_msg = bpf_object__find_map_by_name(verdict_obj, "sock_map_msg");
  716. if (!bpf_map_msg) {
  717. printf("Failed to load map msg from msg_verdict prog\n");
  718. goto out_sockmap;
  719. }
  720. map_fd_msg = bpf_map__fd(bpf_map_msg);
  721. if (map_fd_msg < 0) {
  722. printf("Failed to get map msg fd\n");
  723. goto out_sockmap;
  724. }
  725. bpf_map_break = bpf_object__find_map_by_name(verdict_obj, "sock_map_break");
  726. if (!bpf_map_break) {
  727. printf("Failed to load map tx from verdict prog\n");
  728. goto out_sockmap;
  729. }
  730. map_fd_break = bpf_map__fd(bpf_map_break);
  731. if (map_fd_break < 0) {
  732. printf("Failed to get map tx fd\n");
  733. goto out_sockmap;
  734. }
  735. err = bpf_prog_attach(parse_prog, map_fd_break,
  736. BPF_SK_SKB_STREAM_PARSER, 0);
  737. if (!err) {
  738. printf("Allowed attaching SK_SKB program to invalid map\n");
  739. goto out_sockmap;
  740. }
  741. err = bpf_prog_attach(parse_prog, map_fd_rx,
  742. BPF_SK_SKB_STREAM_PARSER, 0);
  743. if (err) {
  744. printf("Failed stream parser bpf prog attach\n");
  745. goto out_sockmap;
  746. }
  747. err = bpf_prog_attach(verdict_prog, map_fd_rx,
  748. BPF_SK_SKB_STREAM_VERDICT, 0);
  749. if (err) {
  750. printf("Failed stream verdict bpf prog attach\n");
  751. goto out_sockmap;
  752. }
  753. err = bpf_prog_attach(msg_prog, map_fd_msg, BPF_SK_MSG_VERDICT, 0);
  754. if (err) {
  755. printf("Failed msg verdict bpf prog attach\n");
  756. goto out_sockmap;
  757. }
  758. err = bpf_prog_attach(verdict_prog, map_fd_rx,
  759. __MAX_BPF_ATTACH_TYPE, 0);
  760. if (!err) {
  761. printf("Attached unknown bpf prog\n");
  762. goto out_sockmap;
  763. }
  764. /* Test map update elem afterwards fd lives in fd and map_fd */
  765. for (i = 2; i < 6; i++) {
  766. err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY);
  767. if (err) {
  768. printf("Failed map_fd_rx update sockmap %i '%i:%i'\n",
  769. err, i, sfd[i]);
  770. goto out_sockmap;
  771. }
  772. err = bpf_map_update_elem(map_fd_tx, &i, &sfd[i], BPF_ANY);
  773. if (err) {
  774. printf("Failed map_fd_tx update sockmap %i '%i:%i'\n",
  775. err, i, sfd[i]);
  776. goto out_sockmap;
  777. }
  778. }
  779. /* Test map delete elem and remove send/recv sockets */
  780. for (i = 2; i < 4; i++) {
  781. err = bpf_map_delete_elem(map_fd_rx, &i);
  782. if (err) {
  783. printf("Failed delete sockmap rx %i '%i:%i'\n",
  784. err, i, sfd[i]);
  785. goto out_sockmap;
  786. }
  787. err = bpf_map_delete_elem(map_fd_tx, &i);
  788. if (err) {
  789. printf("Failed delete sockmap tx %i '%i:%i'\n",
  790. err, i, sfd[i]);
  791. goto out_sockmap;
  792. }
  793. }
  794. /* Put sfd[2] (sending fd below) into msg map to test sendmsg bpf */
  795. i = 0;
  796. err = bpf_map_update_elem(map_fd_msg, &i, &sfd[2], BPF_ANY);
  797. if (err) {
  798. printf("Failed map_fd_msg update sockmap %i\n", err);
  799. goto out_sockmap;
  800. }
  801. /* Test map send/recv */
  802. for (i = 0; i < 2; i++) {
  803. buf[0] = i;
  804. buf[1] = 0x5;
  805. sc = send(sfd[2], buf, 20, 0);
  806. if (sc < 0) {
  807. printf("Failed sockmap send\n");
  808. goto out_sockmap;
  809. }
  810. FD_ZERO(&w);
  811. FD_SET(sfd[3], &w);
  812. to.tv_sec = 30;
  813. to.tv_usec = 0;
  814. s = select(sfd[3] + 1, &w, NULL, NULL, &to);
  815. if (s == -1) {
  816. perror("Failed sockmap select()");
  817. goto out_sockmap;
  818. } else if (!s) {
  819. printf("Failed sockmap unexpected timeout\n");
  820. goto out_sockmap;
  821. }
  822. if (!FD_ISSET(sfd[3], &w)) {
  823. printf("Failed sockmap select/recv\n");
  824. goto out_sockmap;
  825. }
  826. rc = recv(sfd[3], buf, sizeof(buf), 0);
  827. if (rc < 0) {
  828. printf("Failed sockmap recv\n");
  829. goto out_sockmap;
  830. }
  831. }
  832. /* Negative null entry lookup from datapath should be dropped */
  833. buf[0] = 1;
  834. buf[1] = 12;
  835. sc = send(sfd[2], buf, 20, 0);
  836. if (sc < 0) {
  837. printf("Failed sockmap send\n");
  838. goto out_sockmap;
  839. }
  840. /* Push fd into same slot */
  841. i = 2;
  842. err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_NOEXIST);
  843. if (!err) {
  844. printf("Failed allowed sockmap dup slot BPF_NOEXIST\n");
  845. goto out_sockmap;
  846. }
  847. err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
  848. if (err) {
  849. printf("Failed sockmap update new slot BPF_ANY\n");
  850. goto out_sockmap;
  851. }
  852. err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_EXIST);
  853. if (err) {
  854. printf("Failed sockmap update new slot BPF_EXIST\n");
  855. goto out_sockmap;
  856. }
  857. /* Delete the elems without programs */
  858. for (i = 2; i < 6; i++) {
  859. err = bpf_map_delete_elem(fd, &i);
  860. if (err) {
  861. printf("Failed delete sockmap %i '%i:%i'\n",
  862. err, i, sfd[i]);
  863. }
  864. }
  865. /* Test having multiple maps open and set with programs on same fds */
  866. err = bpf_prog_attach(parse_prog, fd,
  867. BPF_SK_SKB_STREAM_PARSER, 0);
  868. if (err) {
  869. printf("Failed fd bpf parse prog attach\n");
  870. goto out_sockmap;
  871. }
  872. err = bpf_prog_attach(verdict_prog, fd,
  873. BPF_SK_SKB_STREAM_VERDICT, 0);
  874. if (err) {
  875. printf("Failed fd bpf verdict prog attach\n");
  876. goto out_sockmap;
  877. }
  878. for (i = 4; i < 6; i++) {
  879. err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
  880. if (!err) {
  881. printf("Failed allowed duplicate programs in update ANY sockmap %i '%i:%i'\n",
  882. err, i, sfd[i]);
  883. goto out_sockmap;
  884. }
  885. err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_NOEXIST);
  886. if (!err) {
  887. printf("Failed allowed duplicate program in update NOEXIST sockmap %i '%i:%i'\n",
  888. err, i, sfd[i]);
  889. goto out_sockmap;
  890. }
  891. err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_EXIST);
  892. if (!err) {
  893. printf("Failed allowed duplicate program in update EXIST sockmap %i '%i:%i'\n",
  894. err, i, sfd[i]);
  895. goto out_sockmap;
  896. }
  897. }
  898. /* Test tasks number of forked operations */
  899. for (i = 0; i < tasks; i++) {
  900. pid[i] = fork();
  901. if (pid[i] == 0) {
  902. for (i = 0; i < 6; i++) {
  903. bpf_map_delete_elem(map_fd_tx, &i);
  904. bpf_map_delete_elem(map_fd_rx, &i);
  905. bpf_map_update_elem(map_fd_tx, &i,
  906. &sfd[i], BPF_ANY);
  907. bpf_map_update_elem(map_fd_rx, &i,
  908. &sfd[i], BPF_ANY);
  909. }
  910. exit(0);
  911. } else if (pid[i] == -1) {
  912. printf("Couldn't spawn #%d process!\n", i);
  913. exit(1);
  914. }
  915. }
  916. for (i = 0; i < tasks; i++) {
  917. int status;
  918. assert(waitpid(pid[i], &status, 0) == pid[i]);
  919. assert(status == 0);
  920. }
  921. err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE);
  922. if (!err) {
  923. printf("Detached an invalid prog type.\n");
  924. goto out_sockmap;
  925. }
  926. err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
  927. if (err) {
  928. printf("Failed parser prog detach\n");
  929. goto out_sockmap;
  930. }
  931. err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
  932. if (err) {
  933. printf("Failed parser prog detach\n");
  934. goto out_sockmap;
  935. }
  936. /* Test map close sockets and empty maps */
  937. for (i = 0; i < 6; i++) {
  938. bpf_map_delete_elem(map_fd_tx, &i);
  939. bpf_map_delete_elem(map_fd_rx, &i);
  940. close(sfd[i]);
  941. }
  942. close(fd);
  943. close(map_fd_rx);
  944. bpf_object__close(parse_obj);
  945. bpf_object__close(msg_obj);
  946. bpf_object__close(verdict_obj);
  947. return;
  948. out:
  949. for (i = 0; i < 6; i++)
  950. close(sfd[i]);
  951. printf("Failed to create sockmap '%i:%s'!\n", i, strerror(errno));
  952. exit(1);
  953. out_sockmap:
  954. for (i = 0; i < 6; i++) {
  955. if (map_fd_tx)
  956. bpf_map_delete_elem(map_fd_tx, &i);
  957. if (map_fd_rx)
  958. bpf_map_delete_elem(map_fd_rx, &i);
  959. close(sfd[i]);
  960. }
  961. close(fd);
  962. exit(1);
  963. }
  964. #define MAPINMAP_PROG "./test_map_in_map.bpf.o"
  965. #define MAPINMAP_INVALID_PROG "./test_map_in_map_invalid.bpf.o"
  966. static void test_map_in_map(void)
  967. {
  968. struct bpf_object *obj;
  969. struct bpf_map *map;
  970. int mim_fd, fd, err;
  971. int pos = 0;
  972. struct bpf_map_info info = {};
  973. __u32 len = sizeof(info);
  974. __u32 id = 0;
  975. libbpf_print_fn_t old_print_fn;
  976. obj = bpf_object__open(MAPINMAP_PROG);
  977. fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int), sizeof(int), 2, NULL);
  978. if (fd < 0) {
  979. printf("Failed to create hashmap '%s'!\n", strerror(errno));
  980. exit(1);
  981. }
  982. map = bpf_object__find_map_by_name(obj, "mim_array");
  983. if (!map) {
  984. printf("Failed to load array of maps from test prog\n");
  985. goto out_map_in_map;
  986. }
  987. err = bpf_map__set_inner_map_fd(map, fd);
  988. if (err) {
  989. printf("Failed to set inner_map_fd for array of maps\n");
  990. goto out_map_in_map;
  991. }
  992. map = bpf_object__find_map_by_name(obj, "mim_hash");
  993. if (!map) {
  994. printf("Failed to load hash of maps from test prog\n");
  995. goto out_map_in_map;
  996. }
  997. err = bpf_map__set_inner_map_fd(map, fd);
  998. if (err) {
  999. printf("Failed to set inner_map_fd for hash of maps\n");
  1000. goto out_map_in_map;
  1001. }
  1002. bpf_object__load(obj);
  1003. map = bpf_object__find_map_by_name(obj, "mim_array");
  1004. if (!map) {
  1005. printf("Failed to load array of maps from test prog\n");
  1006. goto out_map_in_map;
  1007. }
  1008. mim_fd = bpf_map__fd(map);
  1009. if (mim_fd < 0) {
  1010. printf("Failed to get descriptor for array of maps\n");
  1011. goto out_map_in_map;
  1012. }
  1013. err = bpf_map_update_elem(mim_fd, &pos, &fd, 0);
  1014. if (err) {
  1015. printf("Failed to update array of maps\n");
  1016. goto out_map_in_map;
  1017. }
  1018. map = bpf_object__find_map_by_name(obj, "mim_hash");
  1019. if (!map) {
  1020. printf("Failed to load hash of maps from test prog\n");
  1021. goto out_map_in_map;
  1022. }
  1023. mim_fd = bpf_map__fd(map);
  1024. if (mim_fd < 0) {
  1025. printf("Failed to get descriptor for hash of maps\n");
  1026. goto out_map_in_map;
  1027. }
  1028. err = bpf_map_update_elem(mim_fd, &pos, &fd, 0);
  1029. if (err) {
  1030. printf("Failed to update hash of maps\n");
  1031. goto out_map_in_map;
  1032. }
  1033. close(fd);
  1034. fd = -1;
  1035. bpf_object__close(obj);
  1036. /* Test that failing bpf_object__create_map() destroys the inner map */
  1037. obj = bpf_object__open(MAPINMAP_INVALID_PROG);
  1038. err = libbpf_get_error(obj);
  1039. if (err) {
  1040. printf("Failed to load %s program: %d %d",
  1041. MAPINMAP_INVALID_PROG, err, errno);
  1042. goto out_map_in_map;
  1043. }
  1044. map = bpf_object__find_map_by_name(obj, "mim");
  1045. if (!map) {
  1046. printf("Failed to load array of maps from test prog\n");
  1047. goto out_map_in_map;
  1048. }
  1049. old_print_fn = libbpf_set_print(NULL);
  1050. err = bpf_object__load(obj);
  1051. if (!err) {
  1052. printf("Loading obj supposed to fail\n");
  1053. goto out_map_in_map;
  1054. }
  1055. libbpf_set_print(old_print_fn);
  1056. /* Iterate over all maps to check whether the internal map
  1057. * ("mim.internal") has been destroyed.
  1058. */
  1059. while (true) {
  1060. err = bpf_map_get_next_id(id, &id);
  1061. if (err) {
  1062. if (errno == ENOENT)
  1063. break;
  1064. printf("Failed to get next map: %d", errno);
  1065. goto out_map_in_map;
  1066. }
  1067. fd = bpf_map_get_fd_by_id(id);
  1068. if (fd < 0) {
  1069. if (errno == ENOENT)
  1070. continue;
  1071. printf("Failed to get map by id %u: %d", id, errno);
  1072. goto out_map_in_map;
  1073. }
  1074. err = bpf_obj_get_info_by_fd(fd, &info, &len);
  1075. if (err) {
  1076. printf("Failed to get map info by fd %d: %d", fd,
  1077. errno);
  1078. goto out_map_in_map;
  1079. }
  1080. if (!strcmp(info.name, "mim.inner")) {
  1081. printf("Inner map mim.inner was not destroyed\n");
  1082. goto out_map_in_map;
  1083. }
  1084. close(fd);
  1085. }
  1086. bpf_object__close(obj);
  1087. return;
  1088. out_map_in_map:
  1089. if (fd >= 0)
  1090. close(fd);
  1091. exit(1);
  1092. }
  1093. #define MAP_SIZE (32 * 1024)
  1094. static void test_map_large(void)
  1095. {
  1096. struct bigkey {
  1097. int a;
  1098. char b[4096];
  1099. long long c;
  1100. } key;
  1101. int fd, i, value;
  1102. fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value),
  1103. MAP_SIZE, &map_opts);
  1104. if (fd < 0) {
  1105. printf("Failed to create large map '%s'!\n", strerror(errno));
  1106. exit(1);
  1107. }
  1108. for (i = 0; i < MAP_SIZE; i++) {
  1109. key = (struct bigkey) { .c = i };
  1110. value = i;
  1111. assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
  1112. }
  1113. key.c = -1;
  1114. assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
  1115. errno == E2BIG);
  1116. /* Iterate through all elements. */
  1117. assert(bpf_map_get_next_key(fd, NULL, &key) == 0);
  1118. key.c = -1;
  1119. for (i = 0; i < MAP_SIZE; i++)
  1120. assert(bpf_map_get_next_key(fd, &key, &key) == 0);
  1121. assert(bpf_map_get_next_key(fd, &key, &key) < 0 && errno == ENOENT);
  1122. key.c = 0;
  1123. assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0);
  1124. key.a = 1;
  1125. assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == ENOENT);
  1126. close(fd);
  1127. }
  1128. #define run_parallel(N, FN, DATA) \
  1129. printf("Fork %u tasks to '" #FN "'\n", N); \
  1130. __run_parallel(N, FN, DATA)
  1131. static void __run_parallel(unsigned int tasks,
  1132. void (*fn)(unsigned int task, void *data),
  1133. void *data)
  1134. {
  1135. pid_t pid[tasks];
  1136. int i;
  1137. fflush(stdout);
  1138. for (i = 0; i < tasks; i++) {
  1139. pid[i] = fork();
  1140. if (pid[i] == 0) {
  1141. fn(i, data);
  1142. exit(0);
  1143. } else if (pid[i] == -1) {
  1144. printf("Couldn't spawn #%d process!\n", i);
  1145. exit(1);
  1146. }
  1147. }
  1148. for (i = 0; i < tasks; i++) {
  1149. int status;
  1150. assert(waitpid(pid[i], &status, 0) == pid[i]);
  1151. assert(status == 0);
  1152. }
  1153. }
  1154. static void test_map_stress(void)
  1155. {
  1156. run_parallel(100, test_hashmap_walk, NULL);
  1157. run_parallel(100, test_hashmap, NULL);
  1158. run_parallel(100, test_hashmap_percpu, NULL);
  1159. run_parallel(100, test_hashmap_sizes, NULL);
  1160. run_parallel(100, test_arraymap, NULL);
  1161. run_parallel(100, test_arraymap_percpu, NULL);
  1162. }
  1163. #define TASKS 100
  1164. #define DO_UPDATE 1
  1165. #define DO_DELETE 0
  1166. #define MAP_RETRIES 20
  1167. #define MAX_DELAY_US 50000
  1168. #define MIN_DELAY_RANGE_US 5000
  1169. static int map_update_retriable(int map_fd, const void *key, const void *value,
  1170. int flags, int attempts)
  1171. {
  1172. int delay = rand() % MIN_DELAY_RANGE_US;
  1173. while (bpf_map_update_elem(map_fd, key, value, flags)) {
  1174. if (!attempts || (errno != EAGAIN && errno != EBUSY))
  1175. return -errno;
  1176. if (delay <= MAX_DELAY_US / 2)
  1177. delay *= 2;
  1178. usleep(delay);
  1179. attempts--;
  1180. }
  1181. return 0;
  1182. }
  1183. static int map_delete_retriable(int map_fd, const void *key, int attempts)
  1184. {
  1185. int delay = rand() % MIN_DELAY_RANGE_US;
  1186. while (bpf_map_delete_elem(map_fd, key)) {
  1187. if (!attempts || (errno != EAGAIN && errno != EBUSY))
  1188. return -errno;
  1189. if (delay <= MAX_DELAY_US / 2)
  1190. delay *= 2;
  1191. usleep(delay);
  1192. attempts--;
  1193. }
  1194. return 0;
  1195. }
  1196. static void test_update_delete(unsigned int fn, void *data)
  1197. {
  1198. int do_update = ((int *)data)[1];
  1199. int fd = ((int *)data)[0];
  1200. int i, key, value, err;
  1201. if (fn & 1)
  1202. test_hashmap_walk(fn, NULL);
  1203. for (i = fn; i < MAP_SIZE; i += TASKS) {
  1204. key = value = i;
  1205. if (do_update) {
  1206. err = map_update_retriable(fd, &key, &value, BPF_NOEXIST, MAP_RETRIES);
  1207. if (err)
  1208. printf("error %d %d\n", err, errno);
  1209. assert(err == 0);
  1210. err = map_update_retriable(fd, &key, &value, BPF_EXIST, MAP_RETRIES);
  1211. if (err)
  1212. printf("error %d %d\n", err, errno);
  1213. assert(err == 0);
  1214. } else {
  1215. err = map_delete_retriable(fd, &key, MAP_RETRIES);
  1216. if (err)
  1217. printf("error %d %d\n", err, errno);
  1218. assert(err == 0);
  1219. }
  1220. }
  1221. }
  1222. static void test_map_parallel(void)
  1223. {
  1224. int i, fd, key = 0, value = 0, j = 0;
  1225. int data[2];
  1226. fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value),
  1227. MAP_SIZE, &map_opts);
  1228. if (fd < 0) {
  1229. printf("Failed to create map for parallel test '%s'!\n",
  1230. strerror(errno));
  1231. exit(1);
  1232. }
  1233. again:
  1234. /* Use the same fd in children to add elements to this map:
  1235. * child_0 adds key=0, key=1024, key=2048, ...
  1236. * child_1 adds key=1, key=1025, key=2049, ...
  1237. * child_1023 adds key=1023, ...
  1238. */
  1239. data[0] = fd;
  1240. data[1] = DO_UPDATE;
  1241. run_parallel(TASKS, test_update_delete, data);
  1242. /* Check that key=0 is already there. */
  1243. assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
  1244. errno == EEXIST);
  1245. /* Check that all elements were inserted. */
  1246. assert(bpf_map_get_next_key(fd, NULL, &key) == 0);
  1247. key = -1;
  1248. for (i = 0; i < MAP_SIZE; i++)
  1249. assert(bpf_map_get_next_key(fd, &key, &key) == 0);
  1250. assert(bpf_map_get_next_key(fd, &key, &key) < 0 && errno == ENOENT);
  1251. /* Another check for all elements */
  1252. for (i = 0; i < MAP_SIZE; i++) {
  1253. key = MAP_SIZE - i - 1;
  1254. assert(bpf_map_lookup_elem(fd, &key, &value) == 0 &&
  1255. value == key);
  1256. }
  1257. /* Now let's delete all elemenets in parallel. */
  1258. data[1] = DO_DELETE;
  1259. run_parallel(TASKS, test_update_delete, data);
  1260. /* Nothing should be left. */
  1261. key = -1;
  1262. assert(bpf_map_get_next_key(fd, NULL, &key) < 0 && errno == ENOENT);
  1263. assert(bpf_map_get_next_key(fd, &key, &key) < 0 && errno == ENOENT);
  1264. key = 0;
  1265. bpf_map_delete_elem(fd, &key);
  1266. if (j++ < 5)
  1267. goto again;
  1268. close(fd);
  1269. }
  1270. static void test_map_rdonly(void)
  1271. {
  1272. int fd, key = 0, value = 0;
  1273. __u32 old_flags;
  1274. old_flags = map_opts.map_flags;
  1275. map_opts.map_flags |= BPF_F_RDONLY;
  1276. fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value),
  1277. MAP_SIZE, &map_opts);
  1278. map_opts.map_flags = old_flags;
  1279. if (fd < 0) {
  1280. printf("Failed to create map for read only test '%s'!\n",
  1281. strerror(errno));
  1282. exit(1);
  1283. }
  1284. key = 1;
  1285. value = 1234;
  1286. /* Try to insert key=1 element. */
  1287. assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) < 0 &&
  1288. errno == EPERM);
  1289. /* Check that key=1 is not found. */
  1290. assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == ENOENT);
  1291. assert(bpf_map_get_next_key(fd, &key, &value) < 0 && errno == ENOENT);
  1292. close(fd);
  1293. }
  1294. static void test_map_wronly_hash(void)
  1295. {
  1296. int fd, key = 0, value = 0;
  1297. __u32 old_flags;
  1298. old_flags = map_opts.map_flags;
  1299. map_opts.map_flags |= BPF_F_WRONLY;
  1300. fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value),
  1301. MAP_SIZE, &map_opts);
  1302. map_opts.map_flags = old_flags;
  1303. if (fd < 0) {
  1304. printf("Failed to create map for write only test '%s'!\n",
  1305. strerror(errno));
  1306. exit(1);
  1307. }
  1308. key = 1;
  1309. value = 1234;
  1310. /* Insert key=1 element. */
  1311. assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
  1312. /* Check that reading elements and keys from the map is not allowed. */
  1313. assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == EPERM);
  1314. assert(bpf_map_get_next_key(fd, &key, &value) < 0 && errno == EPERM);
  1315. close(fd);
  1316. }
  1317. static void test_map_wronly_stack_or_queue(enum bpf_map_type map_type)
  1318. {
  1319. int fd, value = 0;
  1320. __u32 old_flags;
  1321. assert(map_type == BPF_MAP_TYPE_QUEUE ||
  1322. map_type == BPF_MAP_TYPE_STACK);
  1323. old_flags = map_opts.map_flags;
  1324. map_opts.map_flags |= BPF_F_WRONLY;
  1325. fd = bpf_map_create(map_type, NULL, 0, sizeof(value), MAP_SIZE, &map_opts);
  1326. map_opts.map_flags = old_flags;
  1327. /* Stack/Queue maps do not support BPF_F_NO_PREALLOC */
  1328. if (map_opts.map_flags & BPF_F_NO_PREALLOC) {
  1329. assert(fd < 0 && errno == EINVAL);
  1330. return;
  1331. }
  1332. if (fd < 0) {
  1333. printf("Failed to create map '%s'!\n", strerror(errno));
  1334. exit(1);
  1335. }
  1336. value = 1234;
  1337. assert(bpf_map_update_elem(fd, NULL, &value, BPF_ANY) == 0);
  1338. /* Peek element should fail */
  1339. assert(bpf_map_lookup_elem(fd, NULL, &value) < 0 && errno == EPERM);
  1340. /* Pop element should fail */
  1341. assert(bpf_map_lookup_and_delete_elem(fd, NULL, &value) < 0 &&
  1342. errno == EPERM);
  1343. close(fd);
  1344. }
  1345. static void test_map_wronly(void)
  1346. {
  1347. test_map_wronly_hash();
  1348. test_map_wronly_stack_or_queue(BPF_MAP_TYPE_STACK);
  1349. test_map_wronly_stack_or_queue(BPF_MAP_TYPE_QUEUE);
  1350. }
  1351. static void prepare_reuseport_grp(int type, int map_fd, size_t map_elem_size,
  1352. __s64 *fds64, __u64 *sk_cookies,
  1353. unsigned int n)
  1354. {
  1355. socklen_t optlen, addrlen;
  1356. struct sockaddr_in6 s6;
  1357. const __u32 index0 = 0;
  1358. const int optval = 1;
  1359. unsigned int i;
  1360. u64 sk_cookie;
  1361. void *value;
  1362. __s32 fd32;
  1363. __s64 fd64;
  1364. int err;
  1365. s6.sin6_family = AF_INET6;
  1366. s6.sin6_addr = in6addr_any;
  1367. s6.sin6_port = 0;
  1368. addrlen = sizeof(s6);
  1369. optlen = sizeof(sk_cookie);
  1370. for (i = 0; i < n; i++) {
  1371. fd64 = socket(AF_INET6, type, 0);
  1372. CHECK(fd64 == -1, "socket()",
  1373. "sock_type:%d fd64:%lld errno:%d\n",
  1374. type, fd64, errno);
  1375. err = setsockopt(fd64, SOL_SOCKET, SO_REUSEPORT,
  1376. &optval, sizeof(optval));
  1377. CHECK(err == -1, "setsockopt(SO_REUSEPORT)",
  1378. "err:%d errno:%d\n", err, errno);
  1379. /* reuseport_array does not allow unbound sk */
  1380. if (map_elem_size == sizeof(__u64))
  1381. value = &fd64;
  1382. else {
  1383. assert(map_elem_size == sizeof(__u32));
  1384. fd32 = (__s32)fd64;
  1385. value = &fd32;
  1386. }
  1387. err = bpf_map_update_elem(map_fd, &index0, value, BPF_ANY);
  1388. CHECK(err >= 0 || errno != EINVAL,
  1389. "reuseport array update unbound sk",
  1390. "sock_type:%d err:%d errno:%d\n",
  1391. type, err, errno);
  1392. err = bind(fd64, (struct sockaddr *)&s6, sizeof(s6));
  1393. CHECK(err == -1, "bind()",
  1394. "sock_type:%d err:%d errno:%d\n", type, err, errno);
  1395. if (i == 0) {
  1396. err = getsockname(fd64, (struct sockaddr *)&s6,
  1397. &addrlen);
  1398. CHECK(err == -1, "getsockname()",
  1399. "sock_type:%d err:%d errno:%d\n",
  1400. type, err, errno);
  1401. }
  1402. err = getsockopt(fd64, SOL_SOCKET, SO_COOKIE, &sk_cookie,
  1403. &optlen);
  1404. CHECK(err == -1, "getsockopt(SO_COOKIE)",
  1405. "sock_type:%d err:%d errno:%d\n", type, err, errno);
  1406. if (type == SOCK_STREAM) {
  1407. /*
  1408. * reuseport_array does not allow
  1409. * non-listening tcp sk.
  1410. */
  1411. err = bpf_map_update_elem(map_fd, &index0, value,
  1412. BPF_ANY);
  1413. CHECK(err >= 0 || errno != EINVAL,
  1414. "reuseport array update non-listening sk",
  1415. "sock_type:%d err:%d errno:%d\n",
  1416. type, err, errno);
  1417. err = listen(fd64, 0);
  1418. CHECK(err == -1, "listen()",
  1419. "sock_type:%d, err:%d errno:%d\n",
  1420. type, err, errno);
  1421. }
  1422. fds64[i] = fd64;
  1423. sk_cookies[i] = sk_cookie;
  1424. }
  1425. }
  1426. static void test_reuseport_array(void)
  1427. {
  1428. #define REUSEPORT_FD_IDX(err, last) ({ (err) ? last : !last; })
  1429. const __u32 array_size = 4, index0 = 0, index3 = 3;
  1430. int types[2] = { SOCK_STREAM, SOCK_DGRAM }, type;
  1431. __u64 grpa_cookies[2], sk_cookie, map_cookie;
  1432. __s64 grpa_fds64[2] = { -1, -1 }, fd64 = -1;
  1433. const __u32 bad_index = array_size;
  1434. int map_fd, err, t, f;
  1435. __u32 fds_idx = 0;
  1436. int fd;
  1437. map_fd = bpf_map_create(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, NULL,
  1438. sizeof(__u32), sizeof(__u64), array_size, NULL);
  1439. CHECK(map_fd < 0, "reuseport array create",
  1440. "map_fd:%d, errno:%d\n", map_fd, errno);
  1441. /* Test lookup/update/delete with invalid index */
  1442. err = bpf_map_delete_elem(map_fd, &bad_index);
  1443. CHECK(err >= 0 || errno != E2BIG, "reuseport array del >=max_entries",
  1444. "err:%d errno:%d\n", err, errno);
  1445. err = bpf_map_update_elem(map_fd, &bad_index, &fd64, BPF_ANY);
  1446. CHECK(err >= 0 || errno != E2BIG,
  1447. "reuseport array update >=max_entries",
  1448. "err:%d errno:%d\n", err, errno);
  1449. err = bpf_map_lookup_elem(map_fd, &bad_index, &map_cookie);
  1450. CHECK(err >= 0 || errno != ENOENT,
  1451. "reuseport array update >=max_entries",
  1452. "err:%d errno:%d\n", err, errno);
  1453. /* Test lookup/delete non existence elem */
  1454. err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
  1455. CHECK(err >= 0 || errno != ENOENT,
  1456. "reuseport array lookup not-exist elem",
  1457. "err:%d errno:%d\n", err, errno);
  1458. err = bpf_map_delete_elem(map_fd, &index3);
  1459. CHECK(err >= 0 || errno != ENOENT,
  1460. "reuseport array del not-exist elem",
  1461. "err:%d errno:%d\n", err, errno);
  1462. for (t = 0; t < ARRAY_SIZE(types); t++) {
  1463. type = types[t];
  1464. prepare_reuseport_grp(type, map_fd, sizeof(__u64), grpa_fds64,
  1465. grpa_cookies, ARRAY_SIZE(grpa_fds64));
  1466. /* Test BPF_* update flags */
  1467. /* BPF_EXIST failure case */
  1468. err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
  1469. BPF_EXIST);
  1470. CHECK(err >= 0 || errno != ENOENT,
  1471. "reuseport array update empty elem BPF_EXIST",
  1472. "sock_type:%d err:%d errno:%d\n",
  1473. type, err, errno);
  1474. fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
  1475. /* BPF_NOEXIST success case */
  1476. err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
  1477. BPF_NOEXIST);
  1478. CHECK(err < 0,
  1479. "reuseport array update empty elem BPF_NOEXIST",
  1480. "sock_type:%d err:%d errno:%d\n",
  1481. type, err, errno);
  1482. fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
  1483. /* BPF_EXIST success case. */
  1484. err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
  1485. BPF_EXIST);
  1486. CHECK(err < 0,
  1487. "reuseport array update same elem BPF_EXIST",
  1488. "sock_type:%d err:%d errno:%d\n", type, err, errno);
  1489. fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
  1490. /* BPF_NOEXIST failure case */
  1491. err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
  1492. BPF_NOEXIST);
  1493. CHECK(err >= 0 || errno != EEXIST,
  1494. "reuseport array update non-empty elem BPF_NOEXIST",
  1495. "sock_type:%d err:%d errno:%d\n",
  1496. type, err, errno);
  1497. fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
  1498. /* BPF_ANY case (always succeed) */
  1499. err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
  1500. BPF_ANY);
  1501. CHECK(err < 0,
  1502. "reuseport array update same sk with BPF_ANY",
  1503. "sock_type:%d err:%d errno:%d\n", type, err, errno);
  1504. fd64 = grpa_fds64[fds_idx];
  1505. sk_cookie = grpa_cookies[fds_idx];
  1506. /* The same sk cannot be added to reuseport_array twice */
  1507. err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_ANY);
  1508. CHECK(err >= 0 || errno != EBUSY,
  1509. "reuseport array update same sk with same index",
  1510. "sock_type:%d err:%d errno:%d\n",
  1511. type, err, errno);
  1512. err = bpf_map_update_elem(map_fd, &index0, &fd64, BPF_ANY);
  1513. CHECK(err >= 0 || errno != EBUSY,
  1514. "reuseport array update same sk with different index",
  1515. "sock_type:%d err:%d errno:%d\n",
  1516. type, err, errno);
  1517. /* Test delete elem */
  1518. err = bpf_map_delete_elem(map_fd, &index3);
  1519. CHECK(err < 0, "reuseport array delete sk",
  1520. "sock_type:%d err:%d errno:%d\n",
  1521. type, err, errno);
  1522. /* Add it back with BPF_NOEXIST */
  1523. err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST);
  1524. CHECK(err < 0,
  1525. "reuseport array re-add with BPF_NOEXIST after del",
  1526. "sock_type:%d err:%d errno:%d\n", type, err, errno);
  1527. /* Test cookie */
  1528. err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
  1529. CHECK(err < 0 || sk_cookie != map_cookie,
  1530. "reuseport array lookup re-added sk",
  1531. "sock_type:%d err:%d errno:%d sk_cookie:0x%llx map_cookie:0x%llxn",
  1532. type, err, errno, sk_cookie, map_cookie);
  1533. /* Test elem removed by close() */
  1534. for (f = 0; f < ARRAY_SIZE(grpa_fds64); f++)
  1535. close(grpa_fds64[f]);
  1536. err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
  1537. CHECK(err >= 0 || errno != ENOENT,
  1538. "reuseport array lookup after close()",
  1539. "sock_type:%d err:%d errno:%d\n",
  1540. type, err, errno);
  1541. }
  1542. /* Test SOCK_RAW */
  1543. fd64 = socket(AF_INET6, SOCK_RAW, IPPROTO_UDP);
  1544. CHECK(fd64 == -1, "socket(SOCK_RAW)", "err:%d errno:%d\n",
  1545. err, errno);
  1546. err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST);
  1547. CHECK(err >= 0 || errno != ENOTSUPP, "reuseport array update SOCK_RAW",
  1548. "err:%d errno:%d\n", err, errno);
  1549. close(fd64);
  1550. /* Close the 64 bit value map */
  1551. close(map_fd);
  1552. /* Test 32 bit fd */
  1553. map_fd = bpf_map_create(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, NULL,
  1554. sizeof(__u32), sizeof(__u32), array_size, NULL);
  1555. CHECK(map_fd < 0, "reuseport array create",
  1556. "map_fd:%d, errno:%d\n", map_fd, errno);
  1557. prepare_reuseport_grp(SOCK_STREAM, map_fd, sizeof(__u32), &fd64,
  1558. &sk_cookie, 1);
  1559. fd = fd64;
  1560. err = bpf_map_update_elem(map_fd, &index3, &fd, BPF_NOEXIST);
  1561. CHECK(err < 0, "reuseport array update 32 bit fd",
  1562. "err:%d errno:%d\n", err, errno);
  1563. err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
  1564. CHECK(err >= 0 || errno != ENOSPC,
  1565. "reuseport array lookup 32 bit fd",
  1566. "err:%d errno:%d\n", err, errno);
  1567. close(fd);
  1568. close(map_fd);
  1569. }
  1570. static void run_all_tests(void)
  1571. {
  1572. test_hashmap(0, NULL);
  1573. test_hashmap_percpu(0, NULL);
  1574. test_hashmap_walk(0, NULL);
  1575. test_hashmap_zero_seed();
  1576. test_arraymap(0, NULL);
  1577. test_arraymap_percpu(0, NULL);
  1578. test_arraymap_percpu_many_keys();
  1579. test_devmap(0, NULL);
  1580. test_devmap_hash(0, NULL);
  1581. test_sockmap(0, NULL);
  1582. test_map_large();
  1583. test_map_parallel();
  1584. test_map_stress();
  1585. test_map_rdonly();
  1586. test_map_wronly();
  1587. test_reuseport_array();
  1588. test_queuemap(0, NULL);
  1589. test_stackmap(0, NULL);
  1590. test_map_in_map();
  1591. }
  1592. #define DEFINE_TEST(name) extern void test_##name(void);
  1593. #include <map_tests/tests.h>
  1594. #undef DEFINE_TEST
  1595. int main(void)
  1596. {
  1597. srand(time(NULL));
  1598. libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
  1599. map_opts.map_flags = 0;
  1600. run_all_tests();
  1601. map_opts.map_flags = BPF_F_NO_PREALLOC;
  1602. run_all_tests();
  1603. #define DEFINE_TEST(name) test_##name();
  1604. #include <map_tests/tests.h>
  1605. #undef DEFINE_TEST
  1606. printf("test_maps: OK, %d SKIPPED\n", skips);
  1607. return 0;
  1608. }