test_progs.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2017 Facebook
  3. */
  4. #define _GNU_SOURCE
  5. #include "test_progs.h"
  6. #include "testing_helpers.h"
  7. #include "cgroup_helpers.h"
  8. #include <argp.h>
  9. #include <pthread.h>
  10. #include <sched.h>
  11. #include <signal.h>
  12. #include <string.h>
  13. #include <execinfo.h> /* backtrace */
  14. #include <linux/membarrier.h>
  15. #include <sys/sysinfo.h> /* get_nprocs */
  16. #include <netinet/in.h>
  17. #include <sys/select.h>
  18. #include <sys/socket.h>
  19. #include <sys/un.h>
  20. static bool verbose(void)
  21. {
  22. return env.verbosity > VERBOSE_NONE;
  23. }
  24. static void stdio_hijack_init(char **log_buf, size_t *log_cnt)
  25. {
  26. #ifdef __GLIBC__
  27. if (verbose() && env.worker_id == -1) {
  28. /* nothing to do, output to stdout by default */
  29. return;
  30. }
  31. fflush(stdout);
  32. fflush(stderr);
  33. stdout = open_memstream(log_buf, log_cnt);
  34. if (!stdout) {
  35. stdout = env.stdout;
  36. perror("open_memstream");
  37. return;
  38. }
  39. if (env.subtest_state)
  40. env.subtest_state->stdout = stdout;
  41. else
  42. env.test_state->stdout = stdout;
  43. stderr = stdout;
  44. #endif
  45. }
  46. static void stdio_hijack(char **log_buf, size_t *log_cnt)
  47. {
  48. #ifdef __GLIBC__
  49. if (verbose() && env.worker_id == -1) {
  50. /* nothing to do, output to stdout by default */
  51. return;
  52. }
  53. env.stdout = stdout;
  54. env.stderr = stderr;
  55. stdio_hijack_init(log_buf, log_cnt);
  56. #endif
  57. }
  58. static void stdio_restore_cleanup(void)
  59. {
  60. #ifdef __GLIBC__
  61. if (verbose() && env.worker_id == -1) {
  62. /* nothing to do, output to stdout by default */
  63. return;
  64. }
  65. fflush(stdout);
  66. if (env.subtest_state) {
  67. fclose(env.subtest_state->stdout);
  68. env.subtest_state->stdout = NULL;
  69. stdout = env.test_state->stdout;
  70. stderr = env.test_state->stdout;
  71. } else {
  72. fclose(env.test_state->stdout);
  73. env.test_state->stdout = NULL;
  74. }
  75. #endif
  76. }
  77. static void stdio_restore(void)
  78. {
  79. #ifdef __GLIBC__
  80. if (verbose() && env.worker_id == -1) {
  81. /* nothing to do, output to stdout by default */
  82. return;
  83. }
  84. if (stdout == env.stdout)
  85. return;
  86. stdio_restore_cleanup();
  87. stdout = env.stdout;
  88. stderr = env.stderr;
  89. #endif
  90. }
  91. /* Adapted from perf/util/string.c */
  92. static bool glob_match(const char *str, const char *pat)
  93. {
  94. while (*str && *pat && *pat != '*') {
  95. if (*str != *pat)
  96. return false;
  97. str++;
  98. pat++;
  99. }
  100. /* Check wild card */
  101. if (*pat == '*') {
  102. while (*pat == '*')
  103. pat++;
  104. if (!*pat) /* Tail wild card matches all */
  105. return true;
  106. while (*str)
  107. if (glob_match(str++, pat))
  108. return true;
  109. }
  110. return !*str && !*pat;
  111. }
  112. #define EXIT_NO_TEST 2
  113. #define EXIT_ERR_SETUP_INFRA 3
  114. /* defined in test_progs.h */
  115. struct test_env env = {};
  116. struct prog_test_def {
  117. const char *test_name;
  118. int test_num;
  119. void (*run_test)(void);
  120. void (*run_serial_test)(void);
  121. bool should_run;
  122. bool need_cgroup_cleanup;
  123. };
  124. /* Override C runtime library's usleep() implementation to ensure nanosleep()
  125. * is always called. Usleep is frequently used in selftests as a way to
  126. * trigger kprobe and tracepoints.
  127. */
  128. int usleep(useconds_t usec)
  129. {
  130. struct timespec ts = {
  131. .tv_sec = usec / 1000000,
  132. .tv_nsec = (usec % 1000000) * 1000,
  133. };
  134. return syscall(__NR_nanosleep, &ts, NULL);
  135. }
  136. static bool should_run(struct test_selector *sel, int num, const char *name)
  137. {
  138. int i;
  139. for (i = 0; i < sel->blacklist.cnt; i++) {
  140. if (glob_match(name, sel->blacklist.tests[i].name) &&
  141. !sel->blacklist.tests[i].subtest_cnt)
  142. return false;
  143. }
  144. for (i = 0; i < sel->whitelist.cnt; i++) {
  145. if (glob_match(name, sel->whitelist.tests[i].name))
  146. return true;
  147. }
  148. if (!sel->whitelist.cnt && !sel->num_set)
  149. return true;
  150. return num < sel->num_set_len && sel->num_set[num];
  151. }
  152. static bool should_run_subtest(struct test_selector *sel,
  153. struct test_selector *subtest_sel,
  154. int subtest_num,
  155. const char *test_name,
  156. const char *subtest_name)
  157. {
  158. int i, j;
  159. for (i = 0; i < sel->blacklist.cnt; i++) {
  160. if (glob_match(test_name, sel->blacklist.tests[i].name)) {
  161. if (!sel->blacklist.tests[i].subtest_cnt)
  162. return false;
  163. for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) {
  164. if (glob_match(subtest_name,
  165. sel->blacklist.tests[i].subtests[j]))
  166. return false;
  167. }
  168. }
  169. }
  170. for (i = 0; i < sel->whitelist.cnt; i++) {
  171. if (glob_match(test_name, sel->whitelist.tests[i].name)) {
  172. if (!sel->whitelist.tests[i].subtest_cnt)
  173. return true;
  174. for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) {
  175. if (glob_match(subtest_name,
  176. sel->whitelist.tests[i].subtests[j]))
  177. return true;
  178. }
  179. }
  180. }
  181. if (!sel->whitelist.cnt && !subtest_sel->num_set)
  182. return true;
  183. return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
  184. }
  185. static char *test_result(bool failed, bool skipped)
  186. {
  187. return failed ? "FAIL" : (skipped ? "SKIP" : "OK");
  188. }
  189. static void print_test_log(char *log_buf, size_t log_cnt)
  190. {
  191. log_buf[log_cnt] = '\0';
  192. fprintf(env.stdout, "%s", log_buf);
  193. if (log_buf[log_cnt - 1] != '\n')
  194. fprintf(env.stdout, "\n");
  195. }
  196. #define TEST_NUM_WIDTH 7
  197. static void print_test_name(int test_num, const char *test_name, char *result)
  198. {
  199. fprintf(env.stdout, "#%-*d %s", TEST_NUM_WIDTH, test_num, test_name);
  200. if (result)
  201. fprintf(env.stdout, ":%s", result);
  202. fprintf(env.stdout, "\n");
  203. }
  204. static void print_subtest_name(int test_num, int subtest_num,
  205. const char *test_name, char *subtest_name,
  206. char *result)
  207. {
  208. char test_num_str[TEST_NUM_WIDTH + 1];
  209. snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num);
  210. fprintf(env.stdout, "#%-*s %s/%s",
  211. TEST_NUM_WIDTH, test_num_str,
  212. test_name, subtest_name);
  213. if (result)
  214. fprintf(env.stdout, ":%s", result);
  215. fprintf(env.stdout, "\n");
  216. }
  217. static void dump_test_log(const struct prog_test_def *test,
  218. const struct test_state *test_state,
  219. bool skip_ok_subtests,
  220. bool par_exec_result)
  221. {
  222. bool test_failed = test_state->error_cnt > 0;
  223. bool force_log = test_state->force_log;
  224. bool print_test = verbose() || force_log || test_failed;
  225. int i;
  226. struct subtest_state *subtest_state;
  227. bool subtest_failed;
  228. bool subtest_filtered;
  229. bool print_subtest;
  230. /* we do not print anything in the worker thread */
  231. if (env.worker_id != -1)
  232. return;
  233. /* there is nothing to print when verbose log is used and execution
  234. * is not in parallel mode
  235. */
  236. if (verbose() && !par_exec_result)
  237. return;
  238. if (test_state->log_cnt && print_test)
  239. print_test_log(test_state->log_buf, test_state->log_cnt);
  240. for (i = 0; i < test_state->subtest_num; i++) {
  241. subtest_state = &test_state->subtest_states[i];
  242. subtest_failed = subtest_state->error_cnt;
  243. subtest_filtered = subtest_state->filtered;
  244. print_subtest = verbose() || force_log || subtest_failed;
  245. if ((skip_ok_subtests && !subtest_failed) || subtest_filtered)
  246. continue;
  247. if (subtest_state->log_cnt && print_subtest) {
  248. print_test_log(subtest_state->log_buf,
  249. subtest_state->log_cnt);
  250. }
  251. print_subtest_name(test->test_num, i + 1,
  252. test->test_name, subtest_state->name,
  253. test_result(subtest_state->error_cnt,
  254. subtest_state->skipped));
  255. }
  256. print_test_name(test->test_num, test->test_name,
  257. test_result(test_failed, test_state->skip_cnt));
  258. }
  259. static void stdio_restore(void);
  260. /* A bunch of tests set custom affinity per-thread and/or per-process. Reset
  261. * it after each test/sub-test.
  262. */
  263. static void reset_affinity(void)
  264. {
  265. cpu_set_t cpuset;
  266. int i, err;
  267. CPU_ZERO(&cpuset);
  268. for (i = 0; i < env.nr_cpus; i++)
  269. CPU_SET(i, &cpuset);
  270. err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
  271. if (err < 0) {
  272. stdio_restore();
  273. fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
  274. exit(EXIT_ERR_SETUP_INFRA);
  275. }
  276. err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
  277. if (err < 0) {
  278. stdio_restore();
  279. fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
  280. exit(EXIT_ERR_SETUP_INFRA);
  281. }
  282. }
  283. static void save_netns(void)
  284. {
  285. env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
  286. if (env.saved_netns_fd == -1) {
  287. perror("open(/proc/self/ns/net)");
  288. exit(EXIT_ERR_SETUP_INFRA);
  289. }
  290. }
  291. static void restore_netns(void)
  292. {
  293. if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
  294. stdio_restore();
  295. perror("setns(CLONE_NEWNS)");
  296. exit(EXIT_ERR_SETUP_INFRA);
  297. }
  298. }
  299. void test__end_subtest(void)
  300. {
  301. struct prog_test_def *test = env.test;
  302. struct test_state *test_state = env.test_state;
  303. struct subtest_state *subtest_state = env.subtest_state;
  304. if (subtest_state->error_cnt) {
  305. test_state->error_cnt++;
  306. } else {
  307. if (!subtest_state->skipped)
  308. test_state->sub_succ_cnt++;
  309. else
  310. test_state->skip_cnt++;
  311. }
  312. if (verbose() && !env.workers)
  313. print_subtest_name(test->test_num, test_state->subtest_num,
  314. test->test_name, subtest_state->name,
  315. test_result(subtest_state->error_cnt,
  316. subtest_state->skipped));
  317. stdio_restore_cleanup();
  318. env.subtest_state = NULL;
  319. }
  320. bool test__start_subtest(const char *subtest_name)
  321. {
  322. struct prog_test_def *test = env.test;
  323. struct test_state *state = env.test_state;
  324. struct subtest_state *subtest_state;
  325. size_t sub_state_size = sizeof(*subtest_state);
  326. if (env.subtest_state)
  327. test__end_subtest();
  328. state->subtest_num++;
  329. state->subtest_states =
  330. realloc(state->subtest_states,
  331. state->subtest_num * sub_state_size);
  332. if (!state->subtest_states) {
  333. fprintf(stderr, "Not enough memory to allocate subtest result\n");
  334. return false;
  335. }
  336. subtest_state = &state->subtest_states[state->subtest_num - 1];
  337. memset(subtest_state, 0, sub_state_size);
  338. if (!subtest_name || !subtest_name[0]) {
  339. fprintf(env.stderr,
  340. "Subtest #%d didn't provide sub-test name!\n",
  341. state->subtest_num);
  342. return false;
  343. }
  344. subtest_state->name = strdup(subtest_name);
  345. if (!subtest_state->name) {
  346. fprintf(env.stderr,
  347. "Subtest #%d: failed to copy subtest name!\n",
  348. state->subtest_num);
  349. return false;
  350. }
  351. if (!should_run_subtest(&env.test_selector,
  352. &env.subtest_selector,
  353. state->subtest_num,
  354. test->test_name,
  355. subtest_name)) {
  356. subtest_state->filtered = true;
  357. return false;
  358. }
  359. env.subtest_state = subtest_state;
  360. stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt);
  361. return true;
  362. }
  363. void test__force_log(void)
  364. {
  365. env.test_state->force_log = true;
  366. }
  367. void test__skip(void)
  368. {
  369. if (env.subtest_state)
  370. env.subtest_state->skipped = true;
  371. else
  372. env.test_state->skip_cnt++;
  373. }
  374. void test__fail(void)
  375. {
  376. if (env.subtest_state)
  377. env.subtest_state->error_cnt++;
  378. else
  379. env.test_state->error_cnt++;
  380. }
  381. int test__join_cgroup(const char *path)
  382. {
  383. int fd;
  384. if (!env.test->need_cgroup_cleanup) {
  385. if (setup_cgroup_environment()) {
  386. fprintf(stderr,
  387. "#%d %s: Failed to setup cgroup environment\n",
  388. env.test->test_num, env.test->test_name);
  389. return -1;
  390. }
  391. env.test->need_cgroup_cleanup = true;
  392. }
  393. fd = create_and_get_cgroup(path);
  394. if (fd < 0) {
  395. fprintf(stderr,
  396. "#%d %s: Failed to create cgroup '%s' (errno=%d)\n",
  397. env.test->test_num, env.test->test_name, path, errno);
  398. return fd;
  399. }
  400. if (join_cgroup(path)) {
  401. fprintf(stderr,
  402. "#%d %s: Failed to join cgroup '%s' (errno=%d)\n",
  403. env.test->test_num, env.test->test_name, path, errno);
  404. return -1;
  405. }
  406. return fd;
  407. }
  408. int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
  409. {
  410. struct bpf_map *map;
  411. map = bpf_object__find_map_by_name(obj, name);
  412. if (!map) {
  413. fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name);
  414. test__fail();
  415. return -1;
  416. }
  417. return bpf_map__fd(map);
  418. }
  419. static bool is_jit_enabled(void)
  420. {
  421. const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
  422. bool enabled = false;
  423. int sysctl_fd;
  424. sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
  425. if (sysctl_fd != -1) {
  426. char tmpc;
  427. if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
  428. enabled = (tmpc != '0');
  429. close(sysctl_fd);
  430. }
  431. return enabled;
  432. }
  433. int compare_map_keys(int map1_fd, int map2_fd)
  434. {
  435. __u32 key, next_key;
  436. char val_buf[PERF_MAX_STACK_DEPTH *
  437. sizeof(struct bpf_stack_build_id)];
  438. int err;
  439. err = bpf_map_get_next_key(map1_fd, NULL, &key);
  440. if (err)
  441. return err;
  442. err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
  443. if (err)
  444. return err;
  445. while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
  446. err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
  447. if (err)
  448. return err;
  449. key = next_key;
  450. }
  451. if (errno != ENOENT)
  452. return -1;
  453. return 0;
  454. }
  455. int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
  456. {
  457. __u32 key, next_key, *cur_key_p, *next_key_p;
  458. char *val_buf1, *val_buf2;
  459. int i, err = 0;
  460. val_buf1 = malloc(stack_trace_len);
  461. val_buf2 = malloc(stack_trace_len);
  462. cur_key_p = NULL;
  463. next_key_p = &key;
  464. while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
  465. err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
  466. if (err)
  467. goto out;
  468. err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
  469. if (err)
  470. goto out;
  471. for (i = 0; i < stack_trace_len; i++) {
  472. if (val_buf1[i] != val_buf2[i]) {
  473. err = -1;
  474. goto out;
  475. }
  476. }
  477. key = *next_key_p;
  478. cur_key_p = &key;
  479. next_key_p = &next_key;
  480. }
  481. if (errno != ENOENT)
  482. err = -1;
  483. out:
  484. free(val_buf1);
  485. free(val_buf2);
  486. return err;
  487. }
  488. int extract_build_id(char *build_id, size_t size)
  489. {
  490. FILE *fp;
  491. char *line = NULL;
  492. size_t len = 0;
  493. fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
  494. if (fp == NULL)
  495. return -1;
  496. if (getline(&line, &len, fp) == -1)
  497. goto err;
  498. pclose(fp);
  499. if (len > size)
  500. len = size;
  501. memcpy(build_id, line, len);
  502. build_id[len] = '\0';
  503. free(line);
  504. return 0;
  505. err:
  506. pclose(fp);
  507. return -1;
  508. }
  509. static int finit_module(int fd, const char *param_values, int flags)
  510. {
  511. return syscall(__NR_finit_module, fd, param_values, flags);
  512. }
  513. static int delete_module(const char *name, int flags)
  514. {
  515. return syscall(__NR_delete_module, name, flags);
  516. }
  517. /*
  518. * Trigger synchronize_rcu() in kernel.
  519. */
  520. int kern_sync_rcu(void)
  521. {
  522. return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0);
  523. }
  524. static void unload_bpf_testmod(void)
  525. {
  526. if (kern_sync_rcu())
  527. fprintf(env.stderr, "Failed to trigger kernel-side RCU sync!\n");
  528. if (delete_module("bpf_testmod", 0)) {
  529. if (errno == ENOENT) {
  530. if (verbose())
  531. fprintf(stdout, "bpf_testmod.ko is already unloaded.\n");
  532. return;
  533. }
  534. fprintf(env.stderr, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno);
  535. return;
  536. }
  537. if (verbose())
  538. fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n");
  539. }
  540. static int load_bpf_testmod(void)
  541. {
  542. int fd;
  543. /* ensure previous instance of the module is unloaded */
  544. unload_bpf_testmod();
  545. if (verbose())
  546. fprintf(stdout, "Loading bpf_testmod.ko...\n");
  547. fd = open("bpf_testmod.ko", O_RDONLY);
  548. if (fd < 0) {
  549. fprintf(env.stderr, "Can't find bpf_testmod.ko kernel module: %d\n", -errno);
  550. return -ENOENT;
  551. }
  552. if (finit_module(fd, "", 0)) {
  553. fprintf(env.stderr, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno);
  554. close(fd);
  555. return -EINVAL;
  556. }
  557. close(fd);
  558. if (verbose())
  559. fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n");
  560. return 0;
  561. }
  562. /* extern declarations for test funcs */
  563. #define DEFINE_TEST(name) \
  564. extern void test_##name(void) __weak; \
  565. extern void serial_test_##name(void) __weak;
  566. #include <prog_tests/tests.h>
  567. #undef DEFINE_TEST
  568. static struct prog_test_def prog_test_defs[] = {
  569. #define DEFINE_TEST(name) { \
  570. .test_name = #name, \
  571. .run_test = &test_##name, \
  572. .run_serial_test = &serial_test_##name, \
  573. },
  574. #include <prog_tests/tests.h>
  575. #undef DEFINE_TEST
  576. };
  577. static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
  578. static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
  579. const char *argp_program_version = "test_progs 0.1";
  580. const char *argp_program_bug_address = "<[email protected]>";
  581. static const char argp_program_doc[] = "BPF selftests test runner";
  582. enum ARG_KEYS {
  583. ARG_TEST_NUM = 'n',
  584. ARG_TEST_NAME = 't',
  585. ARG_TEST_NAME_BLACKLIST = 'b',
  586. ARG_VERIFIER_STATS = 's',
  587. ARG_VERBOSE = 'v',
  588. ARG_GET_TEST_CNT = 'c',
  589. ARG_LIST_TEST_NAMES = 'l',
  590. ARG_TEST_NAME_GLOB_ALLOWLIST = 'a',
  591. ARG_TEST_NAME_GLOB_DENYLIST = 'd',
  592. ARG_NUM_WORKERS = 'j',
  593. ARG_DEBUG = -1,
  594. };
  595. static const struct argp_option opts[] = {
  596. { "num", ARG_TEST_NUM, "NUM", 0,
  597. "Run test number NUM only " },
  598. { "name", ARG_TEST_NAME, "NAMES", 0,
  599. "Run tests with names containing any string from NAMES list" },
  600. { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0,
  601. "Don't run tests with names containing any string from NAMES list" },
  602. { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
  603. "Output verifier statistics", },
  604. { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
  605. "Verbose output (use -vv or -vvv for progressively verbose output)" },
  606. { "count", ARG_GET_TEST_CNT, NULL, 0,
  607. "Get number of selected top-level tests " },
  608. { "list", ARG_LIST_TEST_NAMES, NULL, 0,
  609. "List test names that would run (without running them) " },
  610. { "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0,
  611. "Run tests with name matching the pattern (supports '*' wildcard)." },
  612. { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0,
  613. "Don't run tests with name matching the pattern (supports '*' wildcard)." },
  614. { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL,
  615. "Number of workers to run in parallel, default to number of cpus." },
  616. { "debug", ARG_DEBUG, NULL, 0,
  617. "print extra debug information for test_progs." },
  618. {},
  619. };
  620. static int libbpf_print_fn(enum libbpf_print_level level,
  621. const char *format, va_list args)
  622. {
  623. if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
  624. return 0;
  625. vfprintf(stdout, format, args);
  626. return 0;
  627. }
  628. static void free_test_filter_set(const struct test_filter_set *set)
  629. {
  630. int i, j;
  631. if (!set)
  632. return;
  633. for (i = 0; i < set->cnt; i++) {
  634. free((void *)set->tests[i].name);
  635. for (j = 0; j < set->tests[i].subtest_cnt; j++)
  636. free((void *)set->tests[i].subtests[j]);
  637. free((void *)set->tests[i].subtests);
  638. }
  639. free((void *)set->tests);
  640. }
  641. static void free_test_selector(struct test_selector *test_selector)
  642. {
  643. free_test_filter_set(&test_selector->blacklist);
  644. free_test_filter_set(&test_selector->whitelist);
  645. free(test_selector->num_set);
  646. }
  647. extern int extra_prog_load_log_flags;
  648. static error_t parse_arg(int key, char *arg, struct argp_state *state)
  649. {
  650. struct test_env *env = state->input;
  651. switch (key) {
  652. case ARG_TEST_NUM: {
  653. char *subtest_str = strchr(arg, '/');
  654. if (subtest_str) {
  655. *subtest_str = '\0';
  656. if (parse_num_list(subtest_str + 1,
  657. &env->subtest_selector.num_set,
  658. &env->subtest_selector.num_set_len)) {
  659. fprintf(stderr,
  660. "Failed to parse subtest numbers.\n");
  661. return -EINVAL;
  662. }
  663. }
  664. if (parse_num_list(arg, &env->test_selector.num_set,
  665. &env->test_selector.num_set_len)) {
  666. fprintf(stderr, "Failed to parse test numbers.\n");
  667. return -EINVAL;
  668. }
  669. break;
  670. }
  671. case ARG_TEST_NAME_GLOB_ALLOWLIST:
  672. case ARG_TEST_NAME: {
  673. if (parse_test_list(arg,
  674. &env->test_selector.whitelist,
  675. key == ARG_TEST_NAME_GLOB_ALLOWLIST))
  676. return -ENOMEM;
  677. break;
  678. }
  679. case ARG_TEST_NAME_GLOB_DENYLIST:
  680. case ARG_TEST_NAME_BLACKLIST: {
  681. if (parse_test_list(arg,
  682. &env->test_selector.blacklist,
  683. key == ARG_TEST_NAME_GLOB_DENYLIST))
  684. return -ENOMEM;
  685. break;
  686. }
  687. case ARG_VERIFIER_STATS:
  688. env->verifier_stats = true;
  689. break;
  690. case ARG_VERBOSE:
  691. env->verbosity = VERBOSE_NORMAL;
  692. if (arg) {
  693. if (strcmp(arg, "v") == 0) {
  694. env->verbosity = VERBOSE_VERY;
  695. extra_prog_load_log_flags = 1;
  696. } else if (strcmp(arg, "vv") == 0) {
  697. env->verbosity = VERBOSE_SUPER;
  698. extra_prog_load_log_flags = 2;
  699. } else {
  700. fprintf(stderr,
  701. "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
  702. arg);
  703. return -EINVAL;
  704. }
  705. }
  706. if (verbose()) {
  707. if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) {
  708. fprintf(stderr,
  709. "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)",
  710. errno);
  711. return -EINVAL;
  712. }
  713. }
  714. break;
  715. case ARG_GET_TEST_CNT:
  716. env->get_test_cnt = true;
  717. break;
  718. case ARG_LIST_TEST_NAMES:
  719. env->list_test_names = true;
  720. break;
  721. case ARG_NUM_WORKERS:
  722. if (arg) {
  723. env->workers = atoi(arg);
  724. if (!env->workers) {
  725. fprintf(stderr, "Invalid number of worker: %s.", arg);
  726. return -EINVAL;
  727. }
  728. } else {
  729. env->workers = get_nprocs();
  730. }
  731. break;
  732. case ARG_DEBUG:
  733. env->debug = true;
  734. break;
  735. case ARGP_KEY_ARG:
  736. argp_usage(state);
  737. break;
  738. case ARGP_KEY_END:
  739. break;
  740. default:
  741. return ARGP_ERR_UNKNOWN;
  742. }
  743. return 0;
  744. }
  745. /*
  746. * Determine if test_progs is running as a "flavored" test runner and switch
  747. * into corresponding sub-directory to load correct BPF objects.
  748. *
  749. * This is done by looking at executable name. If it contains "-flavor"
  750. * suffix, then we are running as a flavored test runner.
  751. */
  752. int cd_flavor_subdir(const char *exec_name)
  753. {
  754. /* General form of argv[0] passed here is:
  755. * some/path/to/test_progs[-flavor], where -flavor part is optional.
  756. * First cut out "test_progs[-flavor]" part, then extract "flavor"
  757. * part, if it's there.
  758. */
  759. const char *flavor = strrchr(exec_name, '/');
  760. if (!flavor)
  761. flavor = exec_name;
  762. else
  763. flavor++;
  764. flavor = strrchr(flavor, '-');
  765. if (!flavor)
  766. return 0;
  767. flavor++;
  768. if (verbose())
  769. fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
  770. return chdir(flavor);
  771. }
  772. int trigger_module_test_read(int read_sz)
  773. {
  774. int fd, err;
  775. fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY);
  776. err = -errno;
  777. if (!ASSERT_GE(fd, 0, "testmod_file_open"))
  778. return err;
  779. read(fd, NULL, read_sz);
  780. close(fd);
  781. return 0;
  782. }
  783. int trigger_module_test_write(int write_sz)
  784. {
  785. int fd, err;
  786. char *buf = malloc(write_sz);
  787. if (!buf)
  788. return -ENOMEM;
  789. memset(buf, 'a', write_sz);
  790. buf[write_sz-1] = '\0';
  791. fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY);
  792. err = -errno;
  793. if (!ASSERT_GE(fd, 0, "testmod_file_open")) {
  794. free(buf);
  795. return err;
  796. }
  797. write(fd, buf, write_sz);
  798. close(fd);
  799. free(buf);
  800. return 0;
  801. }
  802. int write_sysctl(const char *sysctl, const char *value)
  803. {
  804. int fd, err, len;
  805. fd = open(sysctl, O_WRONLY);
  806. if (!ASSERT_NEQ(fd, -1, "open sysctl"))
  807. return -1;
  808. len = strlen(value);
  809. err = write(fd, value, len);
  810. close(fd);
  811. if (!ASSERT_EQ(err, len, "write sysctl"))
  812. return -1;
  813. return 0;
  814. }
  815. #define MAX_BACKTRACE_SZ 128
  816. void crash_handler(int signum)
  817. {
  818. void *bt[MAX_BACKTRACE_SZ];
  819. size_t sz;
  820. sz = backtrace(bt, ARRAY_SIZE(bt));
  821. if (env.test) {
  822. env.test_state->error_cnt++;
  823. dump_test_log(env.test, env.test_state, true, false);
  824. }
  825. if (env.stdout)
  826. stdio_restore();
  827. if (env.worker_id != -1)
  828. fprintf(stderr, "[%d]: ", env.worker_id);
  829. fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
  830. backtrace_symbols_fd(bt, sz, STDERR_FILENO);
  831. }
  832. static void sigint_handler(int signum)
  833. {
  834. int i;
  835. for (i = 0; i < env.workers; i++)
  836. if (env.worker_socks[i] > 0)
  837. close(env.worker_socks[i]);
  838. }
  839. static int current_test_idx;
  840. static pthread_mutex_t current_test_lock;
  841. static pthread_mutex_t stdout_output_lock;
  842. static inline const char *str_msg(const struct msg *msg, char *buf)
  843. {
  844. switch (msg->type) {
  845. case MSG_DO_TEST:
  846. sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num);
  847. break;
  848. case MSG_TEST_DONE:
  849. sprintf(buf, "MSG_TEST_DONE %d (log: %d)",
  850. msg->test_done.num,
  851. msg->test_done.have_log);
  852. break;
  853. case MSG_SUBTEST_DONE:
  854. sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)",
  855. msg->subtest_done.num,
  856. msg->subtest_done.have_log);
  857. break;
  858. case MSG_TEST_LOG:
  859. sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)",
  860. strlen(msg->test_log.log_buf),
  861. msg->test_log.is_last);
  862. break;
  863. case MSG_EXIT:
  864. sprintf(buf, "MSG_EXIT");
  865. break;
  866. default:
  867. sprintf(buf, "UNKNOWN");
  868. break;
  869. }
  870. return buf;
  871. }
  872. static int send_message(int sock, const struct msg *msg)
  873. {
  874. char buf[256];
  875. if (env.debug)
  876. fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf));
  877. return send(sock, msg, sizeof(*msg), 0);
  878. }
  879. static int recv_message(int sock, struct msg *msg)
  880. {
  881. int ret;
  882. char buf[256];
  883. memset(msg, 0, sizeof(*msg));
  884. ret = recv(sock, msg, sizeof(*msg), 0);
  885. if (ret >= 0) {
  886. if (env.debug)
  887. fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf));
  888. }
  889. return ret;
  890. }
  891. static void run_one_test(int test_num)
  892. {
  893. struct prog_test_def *test = &prog_test_defs[test_num];
  894. struct test_state *state = &test_states[test_num];
  895. env.test = test;
  896. env.test_state = state;
  897. stdio_hijack(&state->log_buf, &state->log_cnt);
  898. if (test->run_test)
  899. test->run_test();
  900. else if (test->run_serial_test)
  901. test->run_serial_test();
  902. /* ensure last sub-test is finalized properly */
  903. if (env.subtest_state)
  904. test__end_subtest();
  905. state->tested = true;
  906. if (verbose() && env.worker_id == -1)
  907. print_test_name(test_num + 1, test->test_name,
  908. test_result(state->error_cnt, state->skip_cnt));
  909. reset_affinity();
  910. restore_netns();
  911. if (test->need_cgroup_cleanup)
  912. cleanup_cgroup_environment();
  913. stdio_restore();
  914. dump_test_log(test, state, false, false);
  915. }
  916. struct dispatch_data {
  917. int worker_id;
  918. int sock_fd;
  919. };
  920. static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type)
  921. {
  922. if (recv_message(sock_fd, msg) < 0)
  923. return 1;
  924. if (msg->type != type) {
  925. printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type);
  926. return 1;
  927. }
  928. return 0;
  929. }
  930. static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt)
  931. {
  932. FILE *log_fp = NULL;
  933. int result = 0;
  934. log_fp = open_memstream(log_buf, log_cnt);
  935. if (!log_fp)
  936. return 1;
  937. while (true) {
  938. struct msg msg;
  939. if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) {
  940. result = 1;
  941. goto out;
  942. }
  943. fprintf(log_fp, "%s", msg.test_log.log_buf);
  944. if (msg.test_log.is_last)
  945. break;
  946. }
  947. out:
  948. fclose(log_fp);
  949. log_fp = NULL;
  950. return result;
  951. }
  952. static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state)
  953. {
  954. struct msg msg;
  955. struct subtest_state *subtest_state;
  956. int subtest_num = state->subtest_num;
  957. state->subtest_states = malloc(subtest_num * sizeof(*subtest_state));
  958. for (int i = 0; i < subtest_num; i++) {
  959. subtest_state = &state->subtest_states[i];
  960. memset(subtest_state, 0, sizeof(*subtest_state));
  961. if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE))
  962. return 1;
  963. subtest_state->name = strdup(msg.subtest_done.name);
  964. subtest_state->error_cnt = msg.subtest_done.error_cnt;
  965. subtest_state->skipped = msg.subtest_done.skipped;
  966. subtest_state->filtered = msg.subtest_done.filtered;
  967. /* collect all logs */
  968. if (msg.subtest_done.have_log)
  969. if (dispatch_thread_read_log(sock_fd,
  970. &subtest_state->log_buf,
  971. &subtest_state->log_cnt))
  972. return 1;
  973. }
  974. return 0;
  975. }
  976. static void *dispatch_thread(void *ctx)
  977. {
  978. struct dispatch_data *data = ctx;
  979. int sock_fd;
  980. sock_fd = data->sock_fd;
  981. while (true) {
  982. int test_to_run = -1;
  983. struct prog_test_def *test;
  984. struct test_state *state;
  985. /* grab a test */
  986. {
  987. pthread_mutex_lock(&current_test_lock);
  988. if (current_test_idx >= prog_test_cnt) {
  989. pthread_mutex_unlock(&current_test_lock);
  990. goto done;
  991. }
  992. test = &prog_test_defs[current_test_idx];
  993. test_to_run = current_test_idx;
  994. current_test_idx++;
  995. pthread_mutex_unlock(&current_test_lock);
  996. }
  997. if (!test->should_run || test->run_serial_test)
  998. continue;
  999. /* run test through worker */
  1000. {
  1001. struct msg msg_do_test;
  1002. memset(&msg_do_test, 0, sizeof(msg_do_test));
  1003. msg_do_test.type = MSG_DO_TEST;
  1004. msg_do_test.do_test.num = test_to_run;
  1005. if (send_message(sock_fd, &msg_do_test) < 0) {
  1006. perror("Fail to send command");
  1007. goto done;
  1008. }
  1009. env.worker_current_test[data->worker_id] = test_to_run;
  1010. }
  1011. /* wait for test done */
  1012. do {
  1013. struct msg msg;
  1014. if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE))
  1015. goto error;
  1016. if (test_to_run != msg.test_done.num)
  1017. goto error;
  1018. state = &test_states[test_to_run];
  1019. state->tested = true;
  1020. state->error_cnt = msg.test_done.error_cnt;
  1021. state->skip_cnt = msg.test_done.skip_cnt;
  1022. state->sub_succ_cnt = msg.test_done.sub_succ_cnt;
  1023. state->subtest_num = msg.test_done.subtest_num;
  1024. /* collect all logs */
  1025. if (msg.test_done.have_log) {
  1026. if (dispatch_thread_read_log(sock_fd,
  1027. &state->log_buf,
  1028. &state->log_cnt))
  1029. goto error;
  1030. }
  1031. /* collect all subtests and subtest logs */
  1032. if (!state->subtest_num)
  1033. break;
  1034. if (dispatch_thread_send_subtests(sock_fd, state))
  1035. goto error;
  1036. } while (false);
  1037. pthread_mutex_lock(&stdout_output_lock);
  1038. dump_test_log(test, state, false, true);
  1039. pthread_mutex_unlock(&stdout_output_lock);
  1040. } /* while (true) */
  1041. error:
  1042. if (env.debug)
  1043. fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno));
  1044. done:
  1045. {
  1046. struct msg msg_exit;
  1047. msg_exit.type = MSG_EXIT;
  1048. if (send_message(sock_fd, &msg_exit) < 0) {
  1049. if (env.debug)
  1050. fprintf(stderr, "[%d]: send_message msg_exit: %s.\n",
  1051. data->worker_id, strerror(errno));
  1052. }
  1053. }
  1054. return NULL;
  1055. }
  1056. static void calculate_summary_and_print_errors(struct test_env *env)
  1057. {
  1058. int i;
  1059. int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0;
  1060. for (i = 0; i < prog_test_cnt; i++) {
  1061. struct test_state *state = &test_states[i];
  1062. if (!state->tested)
  1063. continue;
  1064. sub_succ_cnt += state->sub_succ_cnt;
  1065. skip_cnt += state->skip_cnt;
  1066. if (state->error_cnt)
  1067. fail_cnt++;
  1068. else
  1069. succ_cnt++;
  1070. }
  1071. /*
  1072. * We only print error logs summary when there are failed tests and
  1073. * verbose mode is not enabled. Otherwise, results may be incosistent.
  1074. *
  1075. */
  1076. if (!verbose() && fail_cnt) {
  1077. printf("\nAll error logs:\n");
  1078. /* print error logs again */
  1079. for (i = 0; i < prog_test_cnt; i++) {
  1080. struct prog_test_def *test = &prog_test_defs[i];
  1081. struct test_state *state = &test_states[i];
  1082. if (!state->tested || !state->error_cnt)
  1083. continue;
  1084. dump_test_log(test, state, true, true);
  1085. }
  1086. }
  1087. printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
  1088. succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
  1089. env->succ_cnt = succ_cnt;
  1090. env->sub_succ_cnt = sub_succ_cnt;
  1091. env->fail_cnt = fail_cnt;
  1092. env->skip_cnt = skip_cnt;
  1093. }
  1094. static void server_main(void)
  1095. {
  1096. pthread_t *dispatcher_threads;
  1097. struct dispatch_data *data;
  1098. struct sigaction sigact_int = {
  1099. .sa_handler = sigint_handler,
  1100. .sa_flags = SA_RESETHAND,
  1101. };
  1102. int i;
  1103. sigaction(SIGINT, &sigact_int, NULL);
  1104. dispatcher_threads = calloc(sizeof(pthread_t), env.workers);
  1105. data = calloc(sizeof(struct dispatch_data), env.workers);
  1106. env.worker_current_test = calloc(sizeof(int), env.workers);
  1107. for (i = 0; i < env.workers; i++) {
  1108. int rc;
  1109. data[i].worker_id = i;
  1110. data[i].sock_fd = env.worker_socks[i];
  1111. rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]);
  1112. if (rc < 0) {
  1113. perror("Failed to launch dispatcher thread");
  1114. exit(EXIT_ERR_SETUP_INFRA);
  1115. }
  1116. }
  1117. /* wait for all dispatcher to finish */
  1118. for (i = 0; i < env.workers; i++) {
  1119. while (true) {
  1120. int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL);
  1121. if (!ret) {
  1122. break;
  1123. } else if (ret == EBUSY) {
  1124. if (env.debug)
  1125. fprintf(stderr, "Still waiting for thread %d (test %d).\n",
  1126. i, env.worker_current_test[i] + 1);
  1127. usleep(1000 * 1000);
  1128. continue;
  1129. } else {
  1130. fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret);
  1131. break;
  1132. }
  1133. }
  1134. }
  1135. free(dispatcher_threads);
  1136. free(env.worker_current_test);
  1137. free(data);
  1138. /* run serial tests */
  1139. save_netns();
  1140. for (int i = 0; i < prog_test_cnt; i++) {
  1141. struct prog_test_def *test = &prog_test_defs[i];
  1142. if (!test->should_run || !test->run_serial_test)
  1143. continue;
  1144. run_one_test(i);
  1145. }
  1146. /* generate summary */
  1147. fflush(stderr);
  1148. fflush(stdout);
  1149. calculate_summary_and_print_errors(&env);
  1150. /* reap all workers */
  1151. for (i = 0; i < env.workers; i++) {
  1152. int wstatus, pid;
  1153. pid = waitpid(env.worker_pids[i], &wstatus, 0);
  1154. if (pid != env.worker_pids[i])
  1155. perror("Unable to reap worker");
  1156. }
  1157. }
  1158. static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt)
  1159. {
  1160. char *src;
  1161. size_t slen;
  1162. src = log_buf;
  1163. slen = log_cnt;
  1164. while (slen) {
  1165. struct msg msg_log;
  1166. char *dest;
  1167. size_t len;
  1168. memset(&msg_log, 0, sizeof(msg_log));
  1169. msg_log.type = MSG_TEST_LOG;
  1170. dest = msg_log.test_log.log_buf;
  1171. len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen;
  1172. memcpy(dest, src, len);
  1173. src += len;
  1174. slen -= len;
  1175. if (!slen)
  1176. msg_log.test_log.is_last = true;
  1177. assert(send_message(sock, &msg_log) >= 0);
  1178. }
  1179. }
  1180. static void free_subtest_state(struct subtest_state *state)
  1181. {
  1182. if (state->log_buf) {
  1183. free(state->log_buf);
  1184. state->log_buf = NULL;
  1185. state->log_cnt = 0;
  1186. }
  1187. free(state->name);
  1188. state->name = NULL;
  1189. }
  1190. static int worker_main_send_subtests(int sock, struct test_state *state)
  1191. {
  1192. int i, result = 0;
  1193. struct msg msg;
  1194. struct subtest_state *subtest_state;
  1195. memset(&msg, 0, sizeof(msg));
  1196. msg.type = MSG_SUBTEST_DONE;
  1197. for (i = 0; i < state->subtest_num; i++) {
  1198. subtest_state = &state->subtest_states[i];
  1199. msg.subtest_done.num = i;
  1200. strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME);
  1201. msg.subtest_done.error_cnt = subtest_state->error_cnt;
  1202. msg.subtest_done.skipped = subtest_state->skipped;
  1203. msg.subtest_done.filtered = subtest_state->filtered;
  1204. msg.subtest_done.have_log = false;
  1205. if (verbose() || state->force_log || subtest_state->error_cnt) {
  1206. if (subtest_state->log_cnt)
  1207. msg.subtest_done.have_log = true;
  1208. }
  1209. if (send_message(sock, &msg) < 0) {
  1210. perror("Fail to send message done");
  1211. result = 1;
  1212. goto out;
  1213. }
  1214. /* send logs */
  1215. if (msg.subtest_done.have_log)
  1216. worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt);
  1217. free_subtest_state(subtest_state);
  1218. free(subtest_state->name);
  1219. }
  1220. out:
  1221. for (; i < state->subtest_num; i++)
  1222. free_subtest_state(&state->subtest_states[i]);
  1223. free(state->subtest_states);
  1224. return result;
  1225. }
  1226. static int worker_main(int sock)
  1227. {
  1228. save_netns();
  1229. while (true) {
  1230. /* receive command */
  1231. struct msg msg;
  1232. if (recv_message(sock, &msg) < 0)
  1233. goto out;
  1234. switch (msg.type) {
  1235. case MSG_EXIT:
  1236. if (env.debug)
  1237. fprintf(stderr, "[%d]: worker exit.\n",
  1238. env.worker_id);
  1239. goto out;
  1240. case MSG_DO_TEST: {
  1241. int test_to_run = msg.do_test.num;
  1242. struct prog_test_def *test = &prog_test_defs[test_to_run];
  1243. struct test_state *state = &test_states[test_to_run];
  1244. struct msg msg;
  1245. if (env.debug)
  1246. fprintf(stderr, "[%d]: #%d:%s running.\n",
  1247. env.worker_id,
  1248. test_to_run + 1,
  1249. test->test_name);
  1250. run_one_test(test_to_run);
  1251. memset(&msg, 0, sizeof(msg));
  1252. msg.type = MSG_TEST_DONE;
  1253. msg.test_done.num = test_to_run;
  1254. msg.test_done.error_cnt = state->error_cnt;
  1255. msg.test_done.skip_cnt = state->skip_cnt;
  1256. msg.test_done.sub_succ_cnt = state->sub_succ_cnt;
  1257. msg.test_done.subtest_num = state->subtest_num;
  1258. msg.test_done.have_log = false;
  1259. if (verbose() || state->force_log || state->error_cnt) {
  1260. if (state->log_cnt)
  1261. msg.test_done.have_log = true;
  1262. }
  1263. if (send_message(sock, &msg) < 0) {
  1264. perror("Fail to send message done");
  1265. goto out;
  1266. }
  1267. /* send logs */
  1268. if (msg.test_done.have_log)
  1269. worker_main_send_log(sock, state->log_buf, state->log_cnt);
  1270. if (state->log_buf) {
  1271. free(state->log_buf);
  1272. state->log_buf = NULL;
  1273. state->log_cnt = 0;
  1274. }
  1275. if (state->subtest_num)
  1276. if (worker_main_send_subtests(sock, state))
  1277. goto out;
  1278. if (env.debug)
  1279. fprintf(stderr, "[%d]: #%d:%s done.\n",
  1280. env.worker_id,
  1281. test_to_run + 1,
  1282. test->test_name);
  1283. break;
  1284. } /* case MSG_DO_TEST */
  1285. default:
  1286. if (env.debug)
  1287. fprintf(stderr, "[%d]: unknown message.\n", env.worker_id);
  1288. return -1;
  1289. }
  1290. }
  1291. out:
  1292. return 0;
  1293. }
  1294. static void free_test_states(void)
  1295. {
  1296. int i, j;
  1297. for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) {
  1298. struct test_state *test_state = &test_states[i];
  1299. for (j = 0; j < test_state->subtest_num; j++)
  1300. free_subtest_state(&test_state->subtest_states[j]);
  1301. free(test_state->subtest_states);
  1302. free(test_state->log_buf);
  1303. test_state->subtest_states = NULL;
  1304. test_state->log_buf = NULL;
  1305. }
  1306. }
  1307. int main(int argc, char **argv)
  1308. {
  1309. static const struct argp argp = {
  1310. .options = opts,
  1311. .parser = parse_arg,
  1312. .doc = argp_program_doc,
  1313. };
  1314. struct sigaction sigact = {
  1315. .sa_handler = crash_handler,
  1316. .sa_flags = SA_RESETHAND,
  1317. };
  1318. int err, i;
  1319. sigaction(SIGSEGV, &sigact, NULL);
  1320. err = argp_parse(&argp, argc, argv, 0, NULL, &env);
  1321. if (err)
  1322. return err;
  1323. err = cd_flavor_subdir(argv[0]);
  1324. if (err)
  1325. return err;
  1326. /* Use libbpf 1.0 API mode */
  1327. libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
  1328. libbpf_set_print(libbpf_print_fn);
  1329. srand(time(NULL));
  1330. env.jit_enabled = is_jit_enabled();
  1331. env.nr_cpus = libbpf_num_possible_cpus();
  1332. if (env.nr_cpus < 0) {
  1333. fprintf(stderr, "Failed to get number of CPUs: %d!\n",
  1334. env.nr_cpus);
  1335. return -1;
  1336. }
  1337. env.stdout = stdout;
  1338. env.stderr = stderr;
  1339. env.has_testmod = true;
  1340. if (!env.list_test_names && load_bpf_testmod()) {
  1341. fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
  1342. env.has_testmod = false;
  1343. }
  1344. /* initializing tests */
  1345. for (i = 0; i < prog_test_cnt; i++) {
  1346. struct prog_test_def *test = &prog_test_defs[i];
  1347. test->test_num = i + 1;
  1348. test->should_run = should_run(&env.test_selector,
  1349. test->test_num, test->test_name);
  1350. if ((test->run_test == NULL && test->run_serial_test == NULL) ||
  1351. (test->run_test != NULL && test->run_serial_test != NULL)) {
  1352. fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n",
  1353. test->test_num, test->test_name, test->test_name, test->test_name);
  1354. exit(EXIT_ERR_SETUP_INFRA);
  1355. }
  1356. }
  1357. /* ignore workers if we are just listing */
  1358. if (env.get_test_cnt || env.list_test_names)
  1359. env.workers = 0;
  1360. /* launch workers if requested */
  1361. env.worker_id = -1; /* main process */
  1362. if (env.workers) {
  1363. env.worker_pids = calloc(sizeof(__pid_t), env.workers);
  1364. env.worker_socks = calloc(sizeof(int), env.workers);
  1365. if (env.debug)
  1366. fprintf(stdout, "Launching %d workers.\n", env.workers);
  1367. for (i = 0; i < env.workers; i++) {
  1368. int sv[2];
  1369. pid_t pid;
  1370. if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) {
  1371. perror("Fail to create worker socket");
  1372. return -1;
  1373. }
  1374. pid = fork();
  1375. if (pid < 0) {
  1376. perror("Failed to fork worker");
  1377. return -1;
  1378. } else if (pid != 0) { /* main process */
  1379. close(sv[1]);
  1380. env.worker_pids[i] = pid;
  1381. env.worker_socks[i] = sv[0];
  1382. } else { /* inside each worker process */
  1383. close(sv[0]);
  1384. env.worker_id = i;
  1385. return worker_main(sv[1]);
  1386. }
  1387. }
  1388. if (env.worker_id == -1) {
  1389. server_main();
  1390. goto out;
  1391. }
  1392. }
  1393. /* The rest of the main process */
  1394. /* on single mode */
  1395. save_netns();
  1396. for (i = 0; i < prog_test_cnt; i++) {
  1397. struct prog_test_def *test = &prog_test_defs[i];
  1398. if (!test->should_run)
  1399. continue;
  1400. if (env.get_test_cnt) {
  1401. env.succ_cnt++;
  1402. continue;
  1403. }
  1404. if (env.list_test_names) {
  1405. fprintf(env.stdout, "%s\n", test->test_name);
  1406. env.succ_cnt++;
  1407. continue;
  1408. }
  1409. run_one_test(i);
  1410. }
  1411. if (env.get_test_cnt) {
  1412. printf("%d\n", env.succ_cnt);
  1413. goto out;
  1414. }
  1415. if (env.list_test_names)
  1416. goto out;
  1417. calculate_summary_and_print_errors(&env);
  1418. close(env.saved_netns_fd);
  1419. out:
  1420. if (!env.list_test_names && env.has_testmod)
  1421. unload_bpf_testmod();
  1422. free_test_selector(&env.test_selector);
  1423. free_test_selector(&env.subtest_selector);
  1424. free_test_states();
  1425. if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
  1426. return EXIT_NO_TEST;
  1427. return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
  1428. }