test_cpu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define _GNU_SOURCE
  3. #include <linux/limits.h>
  4. #include <sys/sysinfo.h>
  5. #include <sys/wait.h>
  6. #include <errno.h>
  7. #include <pthread.h>
  8. #include <stdio.h>
  9. #include <time.h>
  10. #include "../kselftest.h"
  11. #include "cgroup_util.h"
  12. enum hog_clock_type {
  13. // Count elapsed time using the CLOCK_PROCESS_CPUTIME_ID clock.
  14. CPU_HOG_CLOCK_PROCESS,
  15. // Count elapsed time using system wallclock time.
  16. CPU_HOG_CLOCK_WALL,
  17. };
  18. struct cpu_hogger {
  19. char *cgroup;
  20. pid_t pid;
  21. long usage;
  22. };
  23. struct cpu_hog_func_param {
  24. int nprocs;
  25. struct timespec ts;
  26. enum hog_clock_type clock_type;
  27. };
  28. /*
  29. * This test creates two nested cgroups with and without enabling
  30. * the cpu controller.
  31. */
  32. static int test_cpucg_subtree_control(const char *root)
  33. {
  34. char *parent = NULL, *child = NULL, *parent2 = NULL, *child2 = NULL;
  35. int ret = KSFT_FAIL;
  36. // Create two nested cgroups with the cpu controller enabled.
  37. parent = cg_name(root, "cpucg_test_0");
  38. if (!parent)
  39. goto cleanup;
  40. if (cg_create(parent))
  41. goto cleanup;
  42. if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
  43. goto cleanup;
  44. child = cg_name(parent, "cpucg_test_child");
  45. if (!child)
  46. goto cleanup;
  47. if (cg_create(child))
  48. goto cleanup;
  49. if (cg_read_strstr(child, "cgroup.controllers", "cpu"))
  50. goto cleanup;
  51. // Create two nested cgroups without enabling the cpu controller.
  52. parent2 = cg_name(root, "cpucg_test_1");
  53. if (!parent2)
  54. goto cleanup;
  55. if (cg_create(parent2))
  56. goto cleanup;
  57. child2 = cg_name(parent2, "cpucg_test_child");
  58. if (!child2)
  59. goto cleanup;
  60. if (cg_create(child2))
  61. goto cleanup;
  62. if (!cg_read_strstr(child2, "cgroup.controllers", "cpu"))
  63. goto cleanup;
  64. ret = KSFT_PASS;
  65. cleanup:
  66. cg_destroy(child);
  67. free(child);
  68. cg_destroy(child2);
  69. free(child2);
  70. cg_destroy(parent);
  71. free(parent);
  72. cg_destroy(parent2);
  73. free(parent2);
  74. return ret;
  75. }
  76. static void *hog_cpu_thread_func(void *arg)
  77. {
  78. while (1)
  79. ;
  80. return NULL;
  81. }
  82. static struct timespec
  83. timespec_sub(const struct timespec *lhs, const struct timespec *rhs)
  84. {
  85. struct timespec zero = {
  86. .tv_sec = 0,
  87. .tv_nsec = 0,
  88. };
  89. struct timespec ret;
  90. if (lhs->tv_sec < rhs->tv_sec)
  91. return zero;
  92. ret.tv_sec = lhs->tv_sec - rhs->tv_sec;
  93. if (lhs->tv_nsec < rhs->tv_nsec) {
  94. if (ret.tv_sec == 0)
  95. return zero;
  96. ret.tv_sec--;
  97. ret.tv_nsec = NSEC_PER_SEC - rhs->tv_nsec + lhs->tv_nsec;
  98. } else
  99. ret.tv_nsec = lhs->tv_nsec - rhs->tv_nsec;
  100. return ret;
  101. }
  102. static int hog_cpus_timed(const char *cgroup, void *arg)
  103. {
  104. const struct cpu_hog_func_param *param =
  105. (struct cpu_hog_func_param *)arg;
  106. struct timespec ts_run = param->ts;
  107. struct timespec ts_remaining = ts_run;
  108. struct timespec ts_start;
  109. int i, ret;
  110. ret = clock_gettime(CLOCK_MONOTONIC, &ts_start);
  111. if (ret != 0)
  112. return ret;
  113. for (i = 0; i < param->nprocs; i++) {
  114. pthread_t tid;
  115. ret = pthread_create(&tid, NULL, &hog_cpu_thread_func, NULL);
  116. if (ret != 0)
  117. return ret;
  118. }
  119. while (ts_remaining.tv_sec > 0 || ts_remaining.tv_nsec > 0) {
  120. struct timespec ts_total;
  121. ret = nanosleep(&ts_remaining, NULL);
  122. if (ret && errno != EINTR)
  123. return ret;
  124. if (param->clock_type == CPU_HOG_CLOCK_PROCESS) {
  125. ret = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts_total);
  126. if (ret != 0)
  127. return ret;
  128. } else {
  129. struct timespec ts_current;
  130. ret = clock_gettime(CLOCK_MONOTONIC, &ts_current);
  131. if (ret != 0)
  132. return ret;
  133. ts_total = timespec_sub(&ts_current, &ts_start);
  134. }
  135. ts_remaining = timespec_sub(&ts_run, &ts_total);
  136. }
  137. return 0;
  138. }
  139. /*
  140. * Creates a cpu cgroup, burns a CPU for a few quanta, and verifies that
  141. * cpu.stat shows the expected output.
  142. */
  143. static int test_cpucg_stats(const char *root)
  144. {
  145. int ret = KSFT_FAIL;
  146. long usage_usec, user_usec, system_usec;
  147. long usage_seconds = 2;
  148. long expected_usage_usec = usage_seconds * USEC_PER_SEC;
  149. char *cpucg;
  150. cpucg = cg_name(root, "cpucg_test");
  151. if (!cpucg)
  152. goto cleanup;
  153. if (cg_create(cpucg))
  154. goto cleanup;
  155. usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
  156. user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
  157. system_usec = cg_read_key_long(cpucg, "cpu.stat", "system_usec");
  158. if (usage_usec != 0 || user_usec != 0 || system_usec != 0)
  159. goto cleanup;
  160. struct cpu_hog_func_param param = {
  161. .nprocs = 1,
  162. .ts = {
  163. .tv_sec = usage_seconds,
  164. .tv_nsec = 0,
  165. },
  166. .clock_type = CPU_HOG_CLOCK_PROCESS,
  167. };
  168. if (cg_run(cpucg, hog_cpus_timed, (void *)&param))
  169. goto cleanup;
  170. usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
  171. user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
  172. if (user_usec <= 0)
  173. goto cleanup;
  174. if (!values_close(usage_usec, expected_usage_usec, 1))
  175. goto cleanup;
  176. ret = KSFT_PASS;
  177. cleanup:
  178. cg_destroy(cpucg);
  179. free(cpucg);
  180. return ret;
  181. }
  182. static int
  183. run_cpucg_weight_test(
  184. const char *root,
  185. pid_t (*spawn_child)(const struct cpu_hogger *child),
  186. int (*validate)(const struct cpu_hogger *children, int num_children))
  187. {
  188. int ret = KSFT_FAIL, i;
  189. char *parent = NULL;
  190. struct cpu_hogger children[3] = {NULL};
  191. parent = cg_name(root, "cpucg_test_0");
  192. if (!parent)
  193. goto cleanup;
  194. if (cg_create(parent))
  195. goto cleanup;
  196. if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
  197. goto cleanup;
  198. for (i = 0; i < ARRAY_SIZE(children); i++) {
  199. children[i].cgroup = cg_name_indexed(parent, "cpucg_child", i);
  200. if (!children[i].cgroup)
  201. goto cleanup;
  202. if (cg_create(children[i].cgroup))
  203. goto cleanup;
  204. if (cg_write_numeric(children[i].cgroup, "cpu.weight",
  205. 50 * (i + 1)))
  206. goto cleanup;
  207. }
  208. for (i = 0; i < ARRAY_SIZE(children); i++) {
  209. pid_t pid = spawn_child(&children[i]);
  210. if (pid <= 0)
  211. goto cleanup;
  212. children[i].pid = pid;
  213. }
  214. for (i = 0; i < ARRAY_SIZE(children); i++) {
  215. int retcode;
  216. waitpid(children[i].pid, &retcode, 0);
  217. if (!WIFEXITED(retcode))
  218. goto cleanup;
  219. if (WEXITSTATUS(retcode))
  220. goto cleanup;
  221. }
  222. for (i = 0; i < ARRAY_SIZE(children); i++)
  223. children[i].usage = cg_read_key_long(children[i].cgroup,
  224. "cpu.stat", "usage_usec");
  225. if (validate(children, ARRAY_SIZE(children)))
  226. goto cleanup;
  227. ret = KSFT_PASS;
  228. cleanup:
  229. for (i = 0; i < ARRAY_SIZE(children); i++) {
  230. cg_destroy(children[i].cgroup);
  231. free(children[i].cgroup);
  232. }
  233. cg_destroy(parent);
  234. free(parent);
  235. return ret;
  236. }
  237. static pid_t weight_hog_ncpus(const struct cpu_hogger *child, int ncpus)
  238. {
  239. long usage_seconds = 10;
  240. struct cpu_hog_func_param param = {
  241. .nprocs = ncpus,
  242. .ts = {
  243. .tv_sec = usage_seconds,
  244. .tv_nsec = 0,
  245. },
  246. .clock_type = CPU_HOG_CLOCK_WALL,
  247. };
  248. return cg_run_nowait(child->cgroup, hog_cpus_timed, (void *)&param);
  249. }
  250. static pid_t weight_hog_all_cpus(const struct cpu_hogger *child)
  251. {
  252. return weight_hog_ncpus(child, get_nprocs());
  253. }
  254. static int
  255. overprovision_validate(const struct cpu_hogger *children, int num_children)
  256. {
  257. int ret = KSFT_FAIL, i;
  258. for (i = 0; i < num_children - 1; i++) {
  259. long delta;
  260. if (children[i + 1].usage <= children[i].usage)
  261. goto cleanup;
  262. delta = children[i + 1].usage - children[i].usage;
  263. if (!values_close(delta, children[0].usage, 35))
  264. goto cleanup;
  265. }
  266. ret = KSFT_PASS;
  267. cleanup:
  268. return ret;
  269. }
  270. /*
  271. * First, this test creates the following hierarchy:
  272. * A
  273. * A/B cpu.weight = 50
  274. * A/C cpu.weight = 100
  275. * A/D cpu.weight = 150
  276. *
  277. * A separate process is then created for each child cgroup which spawns as
  278. * many threads as there are cores, and hogs each CPU as much as possible
  279. * for some time interval.
  280. *
  281. * Once all of the children have exited, we verify that each child cgroup
  282. * was given proportional runtime as informed by their cpu.weight.
  283. */
  284. static int test_cpucg_weight_overprovisioned(const char *root)
  285. {
  286. return run_cpucg_weight_test(root, weight_hog_all_cpus,
  287. overprovision_validate);
  288. }
  289. static pid_t weight_hog_one_cpu(const struct cpu_hogger *child)
  290. {
  291. return weight_hog_ncpus(child, 1);
  292. }
  293. static int
  294. underprovision_validate(const struct cpu_hogger *children, int num_children)
  295. {
  296. int ret = KSFT_FAIL, i;
  297. for (i = 0; i < num_children - 1; i++) {
  298. if (!values_close(children[i + 1].usage, children[0].usage, 15))
  299. goto cleanup;
  300. }
  301. ret = KSFT_PASS;
  302. cleanup:
  303. return ret;
  304. }
  305. /*
  306. * First, this test creates the following hierarchy:
  307. * A
  308. * A/B cpu.weight = 50
  309. * A/C cpu.weight = 100
  310. * A/D cpu.weight = 150
  311. *
  312. * A separate process is then created for each child cgroup which spawns a
  313. * single thread that hogs a CPU. The testcase is only run on systems that
  314. * have at least one core per-thread in the child processes.
  315. *
  316. * Once all of the children have exited, we verify that each child cgroup
  317. * had roughly the same runtime despite having different cpu.weight.
  318. */
  319. static int test_cpucg_weight_underprovisioned(const char *root)
  320. {
  321. // Only run the test if there are enough cores to avoid overprovisioning
  322. // the system.
  323. if (get_nprocs() < 4)
  324. return KSFT_SKIP;
  325. return run_cpucg_weight_test(root, weight_hog_one_cpu,
  326. underprovision_validate);
  327. }
  328. static int
  329. run_cpucg_nested_weight_test(const char *root, bool overprovisioned)
  330. {
  331. int ret = KSFT_FAIL, i;
  332. char *parent = NULL, *child = NULL;
  333. struct cpu_hogger leaf[3] = {NULL};
  334. long nested_leaf_usage, child_usage;
  335. int nprocs = get_nprocs();
  336. if (!overprovisioned) {
  337. if (nprocs < 4)
  338. /*
  339. * Only run the test if there are enough cores to avoid overprovisioning
  340. * the system.
  341. */
  342. return KSFT_SKIP;
  343. nprocs /= 4;
  344. }
  345. parent = cg_name(root, "cpucg_test");
  346. child = cg_name(parent, "cpucg_child");
  347. if (!parent || !child)
  348. goto cleanup;
  349. if (cg_create(parent))
  350. goto cleanup;
  351. if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
  352. goto cleanup;
  353. if (cg_create(child))
  354. goto cleanup;
  355. if (cg_write(child, "cgroup.subtree_control", "+cpu"))
  356. goto cleanup;
  357. if (cg_write(child, "cpu.weight", "1000"))
  358. goto cleanup;
  359. for (i = 0; i < ARRAY_SIZE(leaf); i++) {
  360. const char *ancestor;
  361. long weight;
  362. if (i == 0) {
  363. ancestor = parent;
  364. weight = 1000;
  365. } else {
  366. ancestor = child;
  367. weight = 5000;
  368. }
  369. leaf[i].cgroup = cg_name_indexed(ancestor, "cpucg_leaf", i);
  370. if (!leaf[i].cgroup)
  371. goto cleanup;
  372. if (cg_create(leaf[i].cgroup))
  373. goto cleanup;
  374. if (cg_write_numeric(leaf[i].cgroup, "cpu.weight", weight))
  375. goto cleanup;
  376. }
  377. for (i = 0; i < ARRAY_SIZE(leaf); i++) {
  378. pid_t pid;
  379. struct cpu_hog_func_param param = {
  380. .nprocs = nprocs,
  381. .ts = {
  382. .tv_sec = 10,
  383. .tv_nsec = 0,
  384. },
  385. .clock_type = CPU_HOG_CLOCK_WALL,
  386. };
  387. pid = cg_run_nowait(leaf[i].cgroup, hog_cpus_timed,
  388. (void *)&param);
  389. if (pid <= 0)
  390. goto cleanup;
  391. leaf[i].pid = pid;
  392. }
  393. for (i = 0; i < ARRAY_SIZE(leaf); i++) {
  394. int retcode;
  395. waitpid(leaf[i].pid, &retcode, 0);
  396. if (!WIFEXITED(retcode))
  397. goto cleanup;
  398. if (WEXITSTATUS(retcode))
  399. goto cleanup;
  400. }
  401. for (i = 0; i < ARRAY_SIZE(leaf); i++) {
  402. leaf[i].usage = cg_read_key_long(leaf[i].cgroup,
  403. "cpu.stat", "usage_usec");
  404. if (leaf[i].usage <= 0)
  405. goto cleanup;
  406. }
  407. nested_leaf_usage = leaf[1].usage + leaf[2].usage;
  408. if (overprovisioned) {
  409. if (!values_close(leaf[0].usage, nested_leaf_usage, 15))
  410. goto cleanup;
  411. } else if (!values_close(leaf[0].usage * 2, nested_leaf_usage, 15))
  412. goto cleanup;
  413. child_usage = cg_read_key_long(child, "cpu.stat", "usage_usec");
  414. if (child_usage <= 0)
  415. goto cleanup;
  416. if (!values_close(child_usage, nested_leaf_usage, 1))
  417. goto cleanup;
  418. ret = KSFT_PASS;
  419. cleanup:
  420. for (i = 0; i < ARRAY_SIZE(leaf); i++) {
  421. cg_destroy(leaf[i].cgroup);
  422. free(leaf[i].cgroup);
  423. }
  424. cg_destroy(child);
  425. free(child);
  426. cg_destroy(parent);
  427. free(parent);
  428. return ret;
  429. }
  430. /*
  431. * First, this test creates the following hierarchy:
  432. * A
  433. * A/B cpu.weight = 1000
  434. * A/C cpu.weight = 1000
  435. * A/C/D cpu.weight = 5000
  436. * A/C/E cpu.weight = 5000
  437. *
  438. * A separate process is then created for each leaf, which spawn nproc threads
  439. * that burn a CPU for a few seconds.
  440. *
  441. * Once all of those processes have exited, we verify that each of the leaf
  442. * cgroups have roughly the same usage from cpu.stat.
  443. */
  444. static int
  445. test_cpucg_nested_weight_overprovisioned(const char *root)
  446. {
  447. return run_cpucg_nested_weight_test(root, true);
  448. }
  449. /*
  450. * First, this test creates the following hierarchy:
  451. * A
  452. * A/B cpu.weight = 1000
  453. * A/C cpu.weight = 1000
  454. * A/C/D cpu.weight = 5000
  455. * A/C/E cpu.weight = 5000
  456. *
  457. * A separate process is then created for each leaf, which nproc / 4 threads
  458. * that burns a CPU for a few seconds.
  459. *
  460. * Once all of those processes have exited, we verify that each of the leaf
  461. * cgroups have roughly the same usage from cpu.stat.
  462. */
  463. static int
  464. test_cpucg_nested_weight_underprovisioned(const char *root)
  465. {
  466. return run_cpucg_nested_weight_test(root, false);
  467. }
  468. /*
  469. * This test creates a cgroup with some maximum value within a period, and
  470. * verifies that a process in the cgroup is not overscheduled.
  471. */
  472. static int test_cpucg_max(const char *root)
  473. {
  474. int ret = KSFT_FAIL;
  475. long usage_usec, user_usec;
  476. long usage_seconds = 1;
  477. long expected_usage_usec = usage_seconds * USEC_PER_SEC;
  478. char *cpucg;
  479. cpucg = cg_name(root, "cpucg_test");
  480. if (!cpucg)
  481. goto cleanup;
  482. if (cg_create(cpucg))
  483. goto cleanup;
  484. if (cg_write(cpucg, "cpu.max", "1000"))
  485. goto cleanup;
  486. struct cpu_hog_func_param param = {
  487. .nprocs = 1,
  488. .ts = {
  489. .tv_sec = usage_seconds,
  490. .tv_nsec = 0,
  491. },
  492. .clock_type = CPU_HOG_CLOCK_WALL,
  493. };
  494. if (cg_run(cpucg, hog_cpus_timed, (void *)&param))
  495. goto cleanup;
  496. usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
  497. user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
  498. if (user_usec <= 0)
  499. goto cleanup;
  500. if (user_usec >= expected_usage_usec)
  501. goto cleanup;
  502. if (values_close(usage_usec, expected_usage_usec, 95))
  503. goto cleanup;
  504. ret = KSFT_PASS;
  505. cleanup:
  506. cg_destroy(cpucg);
  507. free(cpucg);
  508. return ret;
  509. }
  510. /*
  511. * This test verifies that a process inside of a nested cgroup whose parent
  512. * group has a cpu.max value set, is properly throttled.
  513. */
  514. static int test_cpucg_max_nested(const char *root)
  515. {
  516. int ret = KSFT_FAIL;
  517. long usage_usec, user_usec;
  518. long usage_seconds = 1;
  519. long expected_usage_usec = usage_seconds * USEC_PER_SEC;
  520. char *parent, *child;
  521. parent = cg_name(root, "cpucg_parent");
  522. child = cg_name(parent, "cpucg_child");
  523. if (!parent || !child)
  524. goto cleanup;
  525. if (cg_create(parent))
  526. goto cleanup;
  527. if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
  528. goto cleanup;
  529. if (cg_create(child))
  530. goto cleanup;
  531. if (cg_write(parent, "cpu.max", "1000"))
  532. goto cleanup;
  533. struct cpu_hog_func_param param = {
  534. .nprocs = 1,
  535. .ts = {
  536. .tv_sec = usage_seconds,
  537. .tv_nsec = 0,
  538. },
  539. .clock_type = CPU_HOG_CLOCK_WALL,
  540. };
  541. if (cg_run(child, hog_cpus_timed, (void *)&param))
  542. goto cleanup;
  543. usage_usec = cg_read_key_long(child, "cpu.stat", "usage_usec");
  544. user_usec = cg_read_key_long(child, "cpu.stat", "user_usec");
  545. if (user_usec <= 0)
  546. goto cleanup;
  547. if (user_usec >= expected_usage_usec)
  548. goto cleanup;
  549. if (values_close(usage_usec, expected_usage_usec, 95))
  550. goto cleanup;
  551. ret = KSFT_PASS;
  552. cleanup:
  553. cg_destroy(child);
  554. free(child);
  555. cg_destroy(parent);
  556. free(parent);
  557. return ret;
  558. }
  559. #define T(x) { x, #x }
  560. struct cpucg_test {
  561. int (*fn)(const char *root);
  562. const char *name;
  563. } tests[] = {
  564. T(test_cpucg_subtree_control),
  565. T(test_cpucg_stats),
  566. T(test_cpucg_weight_overprovisioned),
  567. T(test_cpucg_weight_underprovisioned),
  568. T(test_cpucg_nested_weight_overprovisioned),
  569. T(test_cpucg_nested_weight_underprovisioned),
  570. T(test_cpucg_max),
  571. T(test_cpucg_max_nested),
  572. };
  573. #undef T
  574. int main(int argc, char *argv[])
  575. {
  576. char root[PATH_MAX];
  577. int i, ret = EXIT_SUCCESS;
  578. if (cg_find_unified_root(root, sizeof(root)))
  579. ksft_exit_skip("cgroup v2 isn't mounted\n");
  580. if (cg_read_strstr(root, "cgroup.subtree_control", "cpu"))
  581. if (cg_write(root, "cgroup.subtree_control", "+cpu"))
  582. ksft_exit_skip("Failed to set cpu controller\n");
  583. for (i = 0; i < ARRAY_SIZE(tests); i++) {
  584. switch (tests[i].fn(root)) {
  585. case KSFT_PASS:
  586. ksft_test_result_pass("%s\n", tests[i].name);
  587. break;
  588. case KSFT_SKIP:
  589. ksft_test_result_skip("%s\n", tests[i].name);
  590. break;
  591. default:
  592. ret = EXIT_FAILURE;
  593. ksft_test_result_fail("%s\n", tests[i].name);
  594. break;
  595. }
  596. }
  597. return ret;
  598. }