123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726 |
- // SPDX-License-Identifier: GPL-2.0
- #define _GNU_SOURCE
- #include <linux/limits.h>
- #include <sys/sysinfo.h>
- #include <sys/wait.h>
- #include <errno.h>
- #include <pthread.h>
- #include <stdio.h>
- #include <time.h>
- #include "../kselftest.h"
- #include "cgroup_util.h"
- enum hog_clock_type {
- // Count elapsed time using the CLOCK_PROCESS_CPUTIME_ID clock.
- CPU_HOG_CLOCK_PROCESS,
- // Count elapsed time using system wallclock time.
- CPU_HOG_CLOCK_WALL,
- };
- struct cpu_hogger {
- char *cgroup;
- pid_t pid;
- long usage;
- };
- struct cpu_hog_func_param {
- int nprocs;
- struct timespec ts;
- enum hog_clock_type clock_type;
- };
- /*
- * This test creates two nested cgroups with and without enabling
- * the cpu controller.
- */
- static int test_cpucg_subtree_control(const char *root)
- {
- char *parent = NULL, *child = NULL, *parent2 = NULL, *child2 = NULL;
- int ret = KSFT_FAIL;
- // Create two nested cgroups with the cpu controller enabled.
- parent = cg_name(root, "cpucg_test_0");
- if (!parent)
- goto cleanup;
- if (cg_create(parent))
- goto cleanup;
- if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
- goto cleanup;
- child = cg_name(parent, "cpucg_test_child");
- if (!child)
- goto cleanup;
- if (cg_create(child))
- goto cleanup;
- if (cg_read_strstr(child, "cgroup.controllers", "cpu"))
- goto cleanup;
- // Create two nested cgroups without enabling the cpu controller.
- parent2 = cg_name(root, "cpucg_test_1");
- if (!parent2)
- goto cleanup;
- if (cg_create(parent2))
- goto cleanup;
- child2 = cg_name(parent2, "cpucg_test_child");
- if (!child2)
- goto cleanup;
- if (cg_create(child2))
- goto cleanup;
- if (!cg_read_strstr(child2, "cgroup.controllers", "cpu"))
- goto cleanup;
- ret = KSFT_PASS;
- cleanup:
- cg_destroy(child);
- free(child);
- cg_destroy(child2);
- free(child2);
- cg_destroy(parent);
- free(parent);
- cg_destroy(parent2);
- free(parent2);
- return ret;
- }
- static void *hog_cpu_thread_func(void *arg)
- {
- while (1)
- ;
- return NULL;
- }
- static struct timespec
- timespec_sub(const struct timespec *lhs, const struct timespec *rhs)
- {
- struct timespec zero = {
- .tv_sec = 0,
- .tv_nsec = 0,
- };
- struct timespec ret;
- if (lhs->tv_sec < rhs->tv_sec)
- return zero;
- ret.tv_sec = lhs->tv_sec - rhs->tv_sec;
- if (lhs->tv_nsec < rhs->tv_nsec) {
- if (ret.tv_sec == 0)
- return zero;
- ret.tv_sec--;
- ret.tv_nsec = NSEC_PER_SEC - rhs->tv_nsec + lhs->tv_nsec;
- } else
- ret.tv_nsec = lhs->tv_nsec - rhs->tv_nsec;
- return ret;
- }
- static int hog_cpus_timed(const char *cgroup, void *arg)
- {
- const struct cpu_hog_func_param *param =
- (struct cpu_hog_func_param *)arg;
- struct timespec ts_run = param->ts;
- struct timespec ts_remaining = ts_run;
- struct timespec ts_start;
- int i, ret;
- ret = clock_gettime(CLOCK_MONOTONIC, &ts_start);
- if (ret != 0)
- return ret;
- for (i = 0; i < param->nprocs; i++) {
- pthread_t tid;
- ret = pthread_create(&tid, NULL, &hog_cpu_thread_func, NULL);
- if (ret != 0)
- return ret;
- }
- while (ts_remaining.tv_sec > 0 || ts_remaining.tv_nsec > 0) {
- struct timespec ts_total;
- ret = nanosleep(&ts_remaining, NULL);
- if (ret && errno != EINTR)
- return ret;
- if (param->clock_type == CPU_HOG_CLOCK_PROCESS) {
- ret = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts_total);
- if (ret != 0)
- return ret;
- } else {
- struct timespec ts_current;
- ret = clock_gettime(CLOCK_MONOTONIC, &ts_current);
- if (ret != 0)
- return ret;
- ts_total = timespec_sub(&ts_current, &ts_start);
- }
- ts_remaining = timespec_sub(&ts_run, &ts_total);
- }
- return 0;
- }
- /*
- * Creates a cpu cgroup, burns a CPU for a few quanta, and verifies that
- * cpu.stat shows the expected output.
- */
- static int test_cpucg_stats(const char *root)
- {
- int ret = KSFT_FAIL;
- long usage_usec, user_usec, system_usec;
- long usage_seconds = 2;
- long expected_usage_usec = usage_seconds * USEC_PER_SEC;
- char *cpucg;
- cpucg = cg_name(root, "cpucg_test");
- if (!cpucg)
- goto cleanup;
- if (cg_create(cpucg))
- goto cleanup;
- usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
- user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
- system_usec = cg_read_key_long(cpucg, "cpu.stat", "system_usec");
- if (usage_usec != 0 || user_usec != 0 || system_usec != 0)
- goto cleanup;
- struct cpu_hog_func_param param = {
- .nprocs = 1,
- .ts = {
- .tv_sec = usage_seconds,
- .tv_nsec = 0,
- },
- .clock_type = CPU_HOG_CLOCK_PROCESS,
- };
- if (cg_run(cpucg, hog_cpus_timed, (void *)¶m))
- goto cleanup;
- usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
- user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
- if (user_usec <= 0)
- goto cleanup;
- if (!values_close(usage_usec, expected_usage_usec, 1))
- goto cleanup;
- ret = KSFT_PASS;
- cleanup:
- cg_destroy(cpucg);
- free(cpucg);
- return ret;
- }
- static int
- run_cpucg_weight_test(
- const char *root,
- pid_t (*spawn_child)(const struct cpu_hogger *child),
- int (*validate)(const struct cpu_hogger *children, int num_children))
- {
- int ret = KSFT_FAIL, i;
- char *parent = NULL;
- struct cpu_hogger children[3] = {NULL};
- parent = cg_name(root, "cpucg_test_0");
- if (!parent)
- goto cleanup;
- if (cg_create(parent))
- goto cleanup;
- if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
- goto cleanup;
- for (i = 0; i < ARRAY_SIZE(children); i++) {
- children[i].cgroup = cg_name_indexed(parent, "cpucg_child", i);
- if (!children[i].cgroup)
- goto cleanup;
- if (cg_create(children[i].cgroup))
- goto cleanup;
- if (cg_write_numeric(children[i].cgroup, "cpu.weight",
- 50 * (i + 1)))
- goto cleanup;
- }
- for (i = 0; i < ARRAY_SIZE(children); i++) {
- pid_t pid = spawn_child(&children[i]);
- if (pid <= 0)
- goto cleanup;
- children[i].pid = pid;
- }
- for (i = 0; i < ARRAY_SIZE(children); i++) {
- int retcode;
- waitpid(children[i].pid, &retcode, 0);
- if (!WIFEXITED(retcode))
- goto cleanup;
- if (WEXITSTATUS(retcode))
- goto cleanup;
- }
- for (i = 0; i < ARRAY_SIZE(children); i++)
- children[i].usage = cg_read_key_long(children[i].cgroup,
- "cpu.stat", "usage_usec");
- if (validate(children, ARRAY_SIZE(children)))
- goto cleanup;
- ret = KSFT_PASS;
- cleanup:
- for (i = 0; i < ARRAY_SIZE(children); i++) {
- cg_destroy(children[i].cgroup);
- free(children[i].cgroup);
- }
- cg_destroy(parent);
- free(parent);
- return ret;
- }
- static pid_t weight_hog_ncpus(const struct cpu_hogger *child, int ncpus)
- {
- long usage_seconds = 10;
- struct cpu_hog_func_param param = {
- .nprocs = ncpus,
- .ts = {
- .tv_sec = usage_seconds,
- .tv_nsec = 0,
- },
- .clock_type = CPU_HOG_CLOCK_WALL,
- };
- return cg_run_nowait(child->cgroup, hog_cpus_timed, (void *)¶m);
- }
- static pid_t weight_hog_all_cpus(const struct cpu_hogger *child)
- {
- return weight_hog_ncpus(child, get_nprocs());
- }
- static int
- overprovision_validate(const struct cpu_hogger *children, int num_children)
- {
- int ret = KSFT_FAIL, i;
- for (i = 0; i < num_children - 1; i++) {
- long delta;
- if (children[i + 1].usage <= children[i].usage)
- goto cleanup;
- delta = children[i + 1].usage - children[i].usage;
- if (!values_close(delta, children[0].usage, 35))
- goto cleanup;
- }
- ret = KSFT_PASS;
- cleanup:
- return ret;
- }
- /*
- * First, this test creates the following hierarchy:
- * A
- * A/B cpu.weight = 50
- * A/C cpu.weight = 100
- * A/D cpu.weight = 150
- *
- * A separate process is then created for each child cgroup which spawns as
- * many threads as there are cores, and hogs each CPU as much as possible
- * for some time interval.
- *
- * Once all of the children have exited, we verify that each child cgroup
- * was given proportional runtime as informed by their cpu.weight.
- */
- static int test_cpucg_weight_overprovisioned(const char *root)
- {
- return run_cpucg_weight_test(root, weight_hog_all_cpus,
- overprovision_validate);
- }
- static pid_t weight_hog_one_cpu(const struct cpu_hogger *child)
- {
- return weight_hog_ncpus(child, 1);
- }
- static int
- underprovision_validate(const struct cpu_hogger *children, int num_children)
- {
- int ret = KSFT_FAIL, i;
- for (i = 0; i < num_children - 1; i++) {
- if (!values_close(children[i + 1].usage, children[0].usage, 15))
- goto cleanup;
- }
- ret = KSFT_PASS;
- cleanup:
- return ret;
- }
- /*
- * First, this test creates the following hierarchy:
- * A
- * A/B cpu.weight = 50
- * A/C cpu.weight = 100
- * A/D cpu.weight = 150
- *
- * A separate process is then created for each child cgroup which spawns a
- * single thread that hogs a CPU. The testcase is only run on systems that
- * have at least one core per-thread in the child processes.
- *
- * Once all of the children have exited, we verify that each child cgroup
- * had roughly the same runtime despite having different cpu.weight.
- */
- static int test_cpucg_weight_underprovisioned(const char *root)
- {
- // Only run the test if there are enough cores to avoid overprovisioning
- // the system.
- if (get_nprocs() < 4)
- return KSFT_SKIP;
- return run_cpucg_weight_test(root, weight_hog_one_cpu,
- underprovision_validate);
- }
- static int
- run_cpucg_nested_weight_test(const char *root, bool overprovisioned)
- {
- int ret = KSFT_FAIL, i;
- char *parent = NULL, *child = NULL;
- struct cpu_hogger leaf[3] = {NULL};
- long nested_leaf_usage, child_usage;
- int nprocs = get_nprocs();
- if (!overprovisioned) {
- if (nprocs < 4)
- /*
- * Only run the test if there are enough cores to avoid overprovisioning
- * the system.
- */
- return KSFT_SKIP;
- nprocs /= 4;
- }
- parent = cg_name(root, "cpucg_test");
- child = cg_name(parent, "cpucg_child");
- if (!parent || !child)
- goto cleanup;
- if (cg_create(parent))
- goto cleanup;
- if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
- goto cleanup;
- if (cg_create(child))
- goto cleanup;
- if (cg_write(child, "cgroup.subtree_control", "+cpu"))
- goto cleanup;
- if (cg_write(child, "cpu.weight", "1000"))
- goto cleanup;
- for (i = 0; i < ARRAY_SIZE(leaf); i++) {
- const char *ancestor;
- long weight;
- if (i == 0) {
- ancestor = parent;
- weight = 1000;
- } else {
- ancestor = child;
- weight = 5000;
- }
- leaf[i].cgroup = cg_name_indexed(ancestor, "cpucg_leaf", i);
- if (!leaf[i].cgroup)
- goto cleanup;
- if (cg_create(leaf[i].cgroup))
- goto cleanup;
- if (cg_write_numeric(leaf[i].cgroup, "cpu.weight", weight))
- goto cleanup;
- }
- for (i = 0; i < ARRAY_SIZE(leaf); i++) {
- pid_t pid;
- struct cpu_hog_func_param param = {
- .nprocs = nprocs,
- .ts = {
- .tv_sec = 10,
- .tv_nsec = 0,
- },
- .clock_type = CPU_HOG_CLOCK_WALL,
- };
- pid = cg_run_nowait(leaf[i].cgroup, hog_cpus_timed,
- (void *)¶m);
- if (pid <= 0)
- goto cleanup;
- leaf[i].pid = pid;
- }
- for (i = 0; i < ARRAY_SIZE(leaf); i++) {
- int retcode;
- waitpid(leaf[i].pid, &retcode, 0);
- if (!WIFEXITED(retcode))
- goto cleanup;
- if (WEXITSTATUS(retcode))
- goto cleanup;
- }
- for (i = 0; i < ARRAY_SIZE(leaf); i++) {
- leaf[i].usage = cg_read_key_long(leaf[i].cgroup,
- "cpu.stat", "usage_usec");
- if (leaf[i].usage <= 0)
- goto cleanup;
- }
- nested_leaf_usage = leaf[1].usage + leaf[2].usage;
- if (overprovisioned) {
- if (!values_close(leaf[0].usage, nested_leaf_usage, 15))
- goto cleanup;
- } else if (!values_close(leaf[0].usage * 2, nested_leaf_usage, 15))
- goto cleanup;
- child_usage = cg_read_key_long(child, "cpu.stat", "usage_usec");
- if (child_usage <= 0)
- goto cleanup;
- if (!values_close(child_usage, nested_leaf_usage, 1))
- goto cleanup;
- ret = KSFT_PASS;
- cleanup:
- for (i = 0; i < ARRAY_SIZE(leaf); i++) {
- cg_destroy(leaf[i].cgroup);
- free(leaf[i].cgroup);
- }
- cg_destroy(child);
- free(child);
- cg_destroy(parent);
- free(parent);
- return ret;
- }
- /*
- * First, this test creates the following hierarchy:
- * A
- * A/B cpu.weight = 1000
- * A/C cpu.weight = 1000
- * A/C/D cpu.weight = 5000
- * A/C/E cpu.weight = 5000
- *
- * A separate process is then created for each leaf, which spawn nproc threads
- * that burn a CPU for a few seconds.
- *
- * Once all of those processes have exited, we verify that each of the leaf
- * cgroups have roughly the same usage from cpu.stat.
- */
- static int
- test_cpucg_nested_weight_overprovisioned(const char *root)
- {
- return run_cpucg_nested_weight_test(root, true);
- }
- /*
- * First, this test creates the following hierarchy:
- * A
- * A/B cpu.weight = 1000
- * A/C cpu.weight = 1000
- * A/C/D cpu.weight = 5000
- * A/C/E cpu.weight = 5000
- *
- * A separate process is then created for each leaf, which nproc / 4 threads
- * that burns a CPU for a few seconds.
- *
- * Once all of those processes have exited, we verify that each of the leaf
- * cgroups have roughly the same usage from cpu.stat.
- */
- static int
- test_cpucg_nested_weight_underprovisioned(const char *root)
- {
- return run_cpucg_nested_weight_test(root, false);
- }
- /*
- * This test creates a cgroup with some maximum value within a period, and
- * verifies that a process in the cgroup is not overscheduled.
- */
- static int test_cpucg_max(const char *root)
- {
- int ret = KSFT_FAIL;
- long usage_usec, user_usec;
- long usage_seconds = 1;
- long expected_usage_usec = usage_seconds * USEC_PER_SEC;
- char *cpucg;
- cpucg = cg_name(root, "cpucg_test");
- if (!cpucg)
- goto cleanup;
- if (cg_create(cpucg))
- goto cleanup;
- if (cg_write(cpucg, "cpu.max", "1000"))
- goto cleanup;
- struct cpu_hog_func_param param = {
- .nprocs = 1,
- .ts = {
- .tv_sec = usage_seconds,
- .tv_nsec = 0,
- },
- .clock_type = CPU_HOG_CLOCK_WALL,
- };
- if (cg_run(cpucg, hog_cpus_timed, (void *)¶m))
- goto cleanup;
- usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
- user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
- if (user_usec <= 0)
- goto cleanup;
- if (user_usec >= expected_usage_usec)
- goto cleanup;
- if (values_close(usage_usec, expected_usage_usec, 95))
- goto cleanup;
- ret = KSFT_PASS;
- cleanup:
- cg_destroy(cpucg);
- free(cpucg);
- return ret;
- }
- /*
- * This test verifies that a process inside of a nested cgroup whose parent
- * group has a cpu.max value set, is properly throttled.
- */
- static int test_cpucg_max_nested(const char *root)
- {
- int ret = KSFT_FAIL;
- long usage_usec, user_usec;
- long usage_seconds = 1;
- long expected_usage_usec = usage_seconds * USEC_PER_SEC;
- char *parent, *child;
- parent = cg_name(root, "cpucg_parent");
- child = cg_name(parent, "cpucg_child");
- if (!parent || !child)
- goto cleanup;
- if (cg_create(parent))
- goto cleanup;
- if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
- goto cleanup;
- if (cg_create(child))
- goto cleanup;
- if (cg_write(parent, "cpu.max", "1000"))
- goto cleanup;
- struct cpu_hog_func_param param = {
- .nprocs = 1,
- .ts = {
- .tv_sec = usage_seconds,
- .tv_nsec = 0,
- },
- .clock_type = CPU_HOG_CLOCK_WALL,
- };
- if (cg_run(child, hog_cpus_timed, (void *)¶m))
- goto cleanup;
- usage_usec = cg_read_key_long(child, "cpu.stat", "usage_usec");
- user_usec = cg_read_key_long(child, "cpu.stat", "user_usec");
- if (user_usec <= 0)
- goto cleanup;
- if (user_usec >= expected_usage_usec)
- goto cleanup;
- if (values_close(usage_usec, expected_usage_usec, 95))
- goto cleanup;
- ret = KSFT_PASS;
- cleanup:
- cg_destroy(child);
- free(child);
- cg_destroy(parent);
- free(parent);
- return ret;
- }
- #define T(x) { x, #x }
- struct cpucg_test {
- int (*fn)(const char *root);
- const char *name;
- } tests[] = {
- T(test_cpucg_subtree_control),
- T(test_cpucg_stats),
- T(test_cpucg_weight_overprovisioned),
- T(test_cpucg_weight_underprovisioned),
- T(test_cpucg_nested_weight_overprovisioned),
- T(test_cpucg_nested_weight_underprovisioned),
- T(test_cpucg_max),
- T(test_cpucg_max_nested),
- };
- #undef T
- int main(int argc, char *argv[])
- {
- char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
- if (cg_find_unified_root(root, sizeof(root)))
- ksft_exit_skip("cgroup v2 isn't mounted\n");
- if (cg_read_strstr(root, "cgroup.subtree_control", "cpu"))
- if (cg_write(root, "cgroup.subtree_control", "+cpu"))
- ksft_exit_skip("Failed to set cpu controller\n");
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- switch (tests[i].fn(root)) {
- case KSFT_PASS:
- ksft_test_result_pass("%s\n", tests[i].name);
- break;
- case KSFT_SKIP:
- ksft_test_result_skip("%s\n", tests[i].name);
- break;
- default:
- ret = EXIT_FAILURE;
- ksft_test_result_fail("%s\n", tests[i].name);
- break;
- }
- }
- return ret;
- }
|