Merge branch 'timers/vdso' into timers/core

so the hyper-v clocksource update can be applied.
This commit is contained in:
Thomas Gleixner
2019-07-03 10:50:21 +02:00
5513 changed files with 12165 additions and 28108 deletions

View File

@@ -21,8 +21,8 @@ LDLIBS += -lcap -lelf -lrt -lpthread
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \
test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage test_select_reuseport test_section_names \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl
BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
@@ -63,7 +63,8 @@ TEST_PROGS_EXTENDED := with_addr.sh \
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
test_lirc_mode2_user
include ../lib.mk

View File

@@ -3,6 +3,7 @@
#include <error.h>
#include <linux/if.h>
#include <linux/if_tun.h>
#include <sys/uio.h>
#define CHECK_FLOW_KEYS(desc, got, expected) \
CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \

View File

@@ -573,13 +573,13 @@ static void test_lpm_get_next_key(void)
/* add one more element (total two) */
key_p->prefixlen = 24;
inet_pton(AF_INET, "192.168.0.0", key_p->data);
inet_pton(AF_INET, "192.168.128.0", key_p->data);
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
memset(key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
key_p->data[1] == 168 && key_p->data[2] == 0);
key_p->data[1] == 168 && key_p->data[2] == 128);
memset(next_key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
@@ -592,7 +592,7 @@ static void test_lpm_get_next_key(void)
/* Add one more element (total three) */
key_p->prefixlen = 24;
inet_pton(AF_INET, "192.168.128.0", key_p->data);
inet_pton(AF_INET, "192.168.0.0", key_p->data);
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
memset(key_p, 0, key_size);
@@ -643,6 +643,41 @@ static void test_lpm_get_next_key(void)
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
errno == ENOENT);
/* Add one more element (total five) */
key_p->prefixlen = 28;
inet_pton(AF_INET, "192.168.1.128", key_p->data);
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
memset(key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
key_p->data[1] == 168 && key_p->data[2] == 0);
memset(next_key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 28 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168 && next_key_p->data[2] == 1 &&
next_key_p->data[3] == 128);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168 && next_key_p->data[2] == 1);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
errno == ENOENT);
/* no exact matching key should return the first one in post order */
key_p->prefixlen = 22;
inet_pton(AF_INET, "192.168.1.0", key_p->data);

View File

@@ -119,6 +119,16 @@ static struct sec_name_test tests[] = {
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG},
{0, BPF_CGROUP_UDP6_SENDMSG},
},
{
"cgroup/recvmsg4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG},
{0, BPF_CGROUP_UDP4_RECVMSG},
},
{
"cgroup/recvmsg6",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG},
{0, BPF_CGROUP_UDP6_RECVMSG},
},
{
"cgroup/sysctl",
{0, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL},

View File

@@ -76,6 +76,7 @@ struct sock_addr_test {
enum {
LOAD_REJECT,
ATTACH_REJECT,
ATTACH_OKAY,
SYSCALL_EPERM,
SYSCALL_ENOTSUPP,
SUCCESS,
@@ -88,9 +89,13 @@ static int connect4_prog_load(const struct sock_addr_test *test);
static int connect6_prog_load(const struct sock_addr_test *test);
static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
static int recvmsg_allow_prog_load(const struct sock_addr_test *test);
static int recvmsg_deny_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
static int recvmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
static int recvmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
@@ -507,6 +512,92 @@ static struct sock_addr_test tests[] = {
SRC6_REWRITE_IP,
SYSCALL_EPERM,
},
/* recvmsg */
{
"recvmsg4: return code ok",
recvmsg_allow_prog_load,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP4_RECVMSG,
AF_INET,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
ATTACH_OKAY,
},
{
"recvmsg4: return code !ok",
recvmsg_deny_prog_load,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP4_RECVMSG,
AF_INET,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
LOAD_REJECT,
},
{
"recvmsg6: return code ok",
recvmsg_allow_prog_load,
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
AF_INET6,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
ATTACH_OKAY,
},
{
"recvmsg6: return code !ok",
recvmsg_deny_prog_load,
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
AF_INET6,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
LOAD_REJECT,
},
{
"recvmsg4: rewrite IP & port (asm)",
recvmsg4_rw_asm_prog_load,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP4_RECVMSG,
AF_INET,
SOCK_DGRAM,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
SERV4_IP,
SUCCESS,
},
{
"recvmsg6: rewrite IP & port (asm)",
recvmsg6_rw_asm_prog_load,
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
AF_INET6,
SOCK_DGRAM,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SERV6_IP,
SUCCESS,
},
};
static int mk_sockaddr(int domain, const char *ip, unsigned short port,
@@ -765,8 +856,8 @@ static int connect6_prog_load(const struct sock_addr_test *test)
return load_path(test, CONNECT6_PROG_PATH);
}
static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
int32_t rc)
static int xmsg_ret_only_prog_load(const struct sock_addr_test *test,
int32_t rc)
{
struct bpf_insn insns[] = {
/* return rc */
@@ -778,12 +869,22 @@ static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
{
return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
return xmsg_ret_only_prog_load(test, /*rc*/ 1);
}
static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
{
return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
return xmsg_ret_only_prog_load(test, /*rc*/ 0);
}
static int recvmsg_allow_prog_load(const struct sock_addr_test *test)
{
return xmsg_ret_only_prog_load(test, /*rc*/ 1);
}
static int recvmsg_deny_prog_load(const struct sock_addr_test *test)
{
return xmsg_ret_only_prog_load(test, /*rc*/ 0);
}
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
@@ -838,6 +939,47 @@ static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
}
static int recvmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
{
struct sockaddr_in src4_rw_addr;
if (mk_sockaddr(AF_INET, SERV4_IP, SERV4_PORT,
(struct sockaddr *)&src4_rw_addr,
sizeof(src4_rw_addr)) == -1)
return -1;
struct bpf_insn insns[] = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* if (sk.family == AF_INET && */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, family)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 6),
/* sk.type == SOCK_DGRAM) { */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, type)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_DGRAM, 4),
/* user_ip4 = src4_rw_addr.sin_addr */
BPF_MOV32_IMM(BPF_REG_7, src4_rw_addr.sin_addr.s_addr),
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
offsetof(struct bpf_sock_addr, user_ip4)),
/* user_port = src4_rw_addr.sin_port */
BPF_MOV32_IMM(BPF_REG_7, src4_rw_addr.sin_port),
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
offsetof(struct bpf_sock_addr, user_port)),
/* } */
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
};
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
}
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test)
{
return load_path(test, SENDMSG4_PROG_PATH);
@@ -901,6 +1043,39 @@ static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test)
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_REWRITE_IP);
}
static int recvmsg6_rw_asm_prog_load(const struct sock_addr_test *test)
{
struct sockaddr_in6 src6_rw_addr;
if (mk_sockaddr(AF_INET6, SERV6_IP, SERV6_PORT,
(struct sockaddr *)&src6_rw_addr,
sizeof(src6_rw_addr)) == -1)
return -1;
struct bpf_insn insns[] = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* if (sk.family == AF_INET6) { */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, family)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET6, 10),
STORE_IPV6(user_ip6, src6_rw_addr.sin6_addr.s6_addr32),
/* user_port = dst6_rw_addr.sin6_port */
BPF_MOV32_IMM(BPF_REG_7, src6_rw_addr.sin6_port),
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
offsetof(struct bpf_sock_addr, user_port)),
/* } */
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
};
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
}
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
{
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
@@ -1282,13 +1457,13 @@ out:
return err;
}
static int run_sendmsg_test_case(const struct sock_addr_test *test)
static int run_xmsg_test_case(const struct sock_addr_test *test, int max_cmsg)
{
socklen_t addr_len = sizeof(struct sockaddr_storage);
struct sockaddr_storage expected_src_addr;
struct sockaddr_storage requested_addr;
struct sockaddr_storage expected_addr;
struct sockaddr_storage real_src_addr;
struct sockaddr_storage server_addr;
struct sockaddr_storage sendmsg_addr;
struct sockaddr_storage recvmsg_addr;
int clientfd = -1;
int servfd = -1;
int set_cmsg;
@@ -1297,20 +1472,19 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test)
if (test->type != SOCK_DGRAM)
goto err;
if (init_addrs(test, &requested_addr, &expected_addr,
&expected_src_addr))
if (init_addrs(test, &sendmsg_addr, &server_addr, &expected_addr))
goto err;
/* Prepare server to sendmsg to */
servfd = start_server(test->type, &expected_addr, addr_len);
servfd = start_server(test->type, &server_addr, addr_len);
if (servfd == -1)
goto err;
for (set_cmsg = 0; set_cmsg <= 1; ++set_cmsg) {
for (set_cmsg = 0; set_cmsg <= max_cmsg; ++set_cmsg) {
if (clientfd >= 0)
close(clientfd);
clientfd = sendmsg_to_server(test->type, &requested_addr,
clientfd = sendmsg_to_server(test->type, &sendmsg_addr,
addr_len, set_cmsg, /*flags*/0,
&err);
if (err)
@@ -1330,10 +1504,10 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test)
* specific packet may differ from the one used by default and
* returned by getsockname(2).
*/
if (recvmsg_from_client(servfd, &real_src_addr) == -1)
if (recvmsg_from_client(servfd, &recvmsg_addr) == -1)
goto err;
if (cmp_addr(&real_src_addr, &expected_src_addr, /*cmp_port*/0))
if (cmp_addr(&recvmsg_addr, &expected_addr, /*cmp_port*/0))
goto err;
}
@@ -1366,6 +1540,9 @@ static int run_test_case(int cgfd, const struct sock_addr_test *test)
goto out;
} else if (test->expected_result == ATTACH_REJECT || err) {
goto err;
} else if (test->expected_result == ATTACH_OKAY) {
err = 0;
goto out;
}
switch (test->attach_type) {
@@ -1379,7 +1556,11 @@ static int run_test_case(int cgfd, const struct sock_addr_test *test)
break;
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP6_SENDMSG:
err = run_sendmsg_test_case(test);
err = run_xmsg_test_case(test, 1);
break;
case BPF_CGROUP_UDP4_RECVMSG:
case BPF_CGROUP_UDP6_RECVMSG:
err = run_xmsg_test_case(test, 0);
break;
default:
goto err;

View File

@@ -29,8 +29,11 @@
"DIV64 overflow, check 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, -1),
BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 1),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
@@ -40,8 +43,11 @@
{
"DIV64 overflow, check 2",
.insns = {
BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
BPF_LD_IMM64(BPF_REG_1, LLONG_MIN),
BPF_ALU64_IMM(BPF_DIV, BPF_REG_1, -1),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,

View File

@@ -0,0 +1,533 @@
/* This file contains sub-register zero extension checks for insns defining
* sub-registers, meaning:
* - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width
* forms (BPF_END) could define sub-registers.
* - Narrow direct loads, BPF_B/H/W | BPF_LDX.
* - BPF_LD is not exposed to JIT back-ends, so no need for testing.
*
* "get_prandom_u32" is used to initialize low 32-bit of some registers to
* prevent potential optimizations done by verifier or JIT back-ends which could
* optimize register back into constant when range info shows one register is a
* constant.
*/
{
"add32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
BPF_ALU32_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"add32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
/* An insn could have no effect on the low 32-bit, for example:
* a = a + 0
* a = a | 0
* a = a & -1
* But, they should still zero high 32-bit.
*/
BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, -2),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"sub32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL),
BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"sub32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mul32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mul32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, -1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"div32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, -1),
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"div32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 2),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"or32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
BPF_ALU32_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"or32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"and32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL),
BPF_ALU32_REG(BPF_AND, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"and32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -2),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"lsh32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU32_REG(BPF_LSH, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"lsh32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"rsh32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU32_REG(BPF_RSH, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"rsh32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"neg32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_NEG, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mod32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, -1),
BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mod32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 2),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"xor32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"xor32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_XOR, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mov32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
BPF_MOV32_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mov32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"arsh32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"arsh32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"end16 (to_le) reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 16),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"end32 (to_le) reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"end16 (to_be) reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 16),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"end32 (to_be) reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"ldx_b zero extend check",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"ldx_h zero extend check",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"ldx_w zero extend check",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},

View File

@@ -28,6 +28,7 @@ ALL_TESTS="
vlan_interface_uppers_test
bridge_extern_learn_test
neigh_offload_test
nexthop_offload_test
devlink_reload_test
"
NUM_NETIFS=2
@@ -607,6 +608,52 @@ neigh_offload_test()
ip -4 address del 192.0.2.1/24 dev $swp1
}
nexthop_offload_test()
{
# Test that IPv4 and IPv6 nexthops are marked as offloaded
RET=0
sysctl_set net.ipv6.conf.$swp2.keep_addr_on_down 1
simple_if_init $swp1 192.0.2.1/24 2001:db8:1::1/64
simple_if_init $swp2 192.0.2.2/24 2001:db8:1::2/64
setup_wait
ip -4 route add 198.51.100.0/24 vrf v$swp1 \
nexthop via 192.0.2.2 dev $swp1
ip -6 route add 2001:db8:2::/64 vrf v$swp1 \
nexthop via 2001:db8:1::2 dev $swp1
ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload
check_err $? "ipv4 nexthop not marked as offloaded when should"
ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload
check_err $? "ipv6 nexthop not marked as offloaded when should"
ip link set dev $swp2 down
sleep 1
ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload
check_fail $? "ipv4 nexthop marked as offloaded when should not"
ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload
check_fail $? "ipv6 nexthop marked as offloaded when should not"
ip link set dev $swp2 up
setup_wait
ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload
check_err $? "ipv4 nexthop not marked as offloaded after neigh add"
ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload
check_err $? "ipv6 nexthop not marked as offloaded after neigh add"
log_test "nexthop offload indication"
ip -6 route del 2001:db8:2::/64 vrf v$swp1
ip -4 route del 198.51.100.0/24 vrf v$swp1
simple_if_fini $swp2 192.0.2.2/24 2001:db8:1::2/64
simple_if_fini $swp1 192.0.2.1/24 2001:db8:1::1/64
sysctl_restore net.ipv6.conf.$swp2.keep_addr_on_down
}
devlink_reload_test()
{
# Test that after executing all the above configuration tests, a

View File

@@ -1,11 +1,11 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
# ftracetest - Ftrace test shell scripts
#
# Copyright (C) Hitachi Ltd., 2014
# Written by Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
#
# Released under the terms of the GPL v2.
usage() { # errno [message]
[ ! -z "$2" ] && echo $2

View File

@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* GPIO chardev test helper
*
* Copyright (C) 2016 Bamvor Jian Zhang
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#define _GNU_SOURCE

View File

@@ -1,12 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Exercise /dev/mem mmap cases that have been troublesome in the past
*
* (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <stdlib.h>

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by the GPLv2 license.
*
* kselftest_harness.h: simple C unit test helper.
*

View File

@@ -2,6 +2,7 @@
/x86_64/evmcs_test
/x86_64/hyperv_cpuid
/x86_64/kvm_create_max_vcpus
/x86_64/mmio_warning_test
/x86_64/platform_info_test
/x86_64/set_sregs_test
/x86_64/smm_test

View File

@@ -11,23 +11,24 @@ LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebi
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c
LIBKVM_aarch64 = lib/aarch64/processor.c
TEST_GEN_PROGS_x86_64 = x86_64/platform_info_test
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/smm_test
TEST_GEN_PROGS_x86_64 += x86_64/kvm_create_max_vcpus
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
TEST_GEN_PROGS_x86_64 += x86_64/smm_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
LIBKVM += $(LIBKVM_$(UNAME_M))

View File

@@ -1,10 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/kvm_util.h
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
*/
#ifndef SELFTEST_KVM_UTIL_H
#define SELFTEST_KVM_UTIL_H
@@ -139,6 +137,8 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
void *guest_code);
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
bool vm_is_unrestricted_guest(struct kvm_vm *vm);
struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
uint64_t end);

View File

@@ -1,11 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/sparsebit.h
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
*
* Header file that describes API to the sparsebit library.
* This library provides a memory efficient means of storing
* the settings of bits indexed via a uint64_t. Memory usage

View File

@@ -1,10 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/test_util.h
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
*/
#ifndef SELFTEST_KVM_TEST_UTIL_H

View File

@@ -1,10 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/x86_64/processor.h
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
*/
#ifndef SELFTEST_KVM_PROCESSOR_H
@@ -303,6 +301,8 @@ static inline unsigned long get_xmm(int n)
return 0;
}
bool is_intel_cpu(void);
struct kvm_x86_state;
struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,

View File

@@ -1,10 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/x86_64/vmx.h
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
*/
#ifndef SELFTEST_KVM_VMX_H

View File

@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/assert.c
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*/
#define _GNU_SOURCE /* for getline(3) and strchrnul(3)*/

View File

@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/elf.c
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*/
#include "test_util.h"

View File

@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/io.c
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*/
#include "test_util.h"

View File

@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/kvm_util.c
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*/
#include "test_util.h"
@@ -1583,3 +1582,39 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
{
return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
}
/*
* Is Unrestricted Guest
*
* Input Args:
* vm - Virtual Machine
*
* Output Args: None
*
* Return: True if the unrestricted guest is set to 'Y', otherwise return false.
*
* Check if the unrestricted guest flag is enabled.
*/
bool vm_is_unrestricted_guest(struct kvm_vm *vm)
{
char val = 'N';
size_t count;
FILE *f;
if (vm == NULL) {
/* Ensure that the KVM vendor-specific module is loaded. */
f = fopen(KVM_DEV_PATH, "r");
TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d",
errno);
fclose(f);
}
f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
if (f) {
count = fread(&val, sizeof(char), 1, f);
TEST_ASSERT(count == 1, "Unable to read from param file.");
fclose(f);
}
return val == 'Y';
}

View File

@@ -1,9 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/lib/kvm_util_internal.h
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*/
#ifndef SELFTEST_KVM_UTIL_INTERNAL_H

View File

@@ -1,11 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Sparse bit array
*
* Copyright (C) 2018, Google LLC.
* Copyright (C) 2018, Red Hat, Inc. (code style cleanup and fuzzing driver)
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* This library provides functions to support a memory efficient bit array,
* with an index size of 2^64. A sparsebit array is allocated through
* the use sparsebit_alloc() and free'd via sparsebit_free(),

View File

@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/x86_64/processor.c
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*/
#define _GNU_SOURCE /* for program_invocation_name */
@@ -1137,3 +1136,19 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
r);
}
}
bool is_intel_cpu(void)
{
int eax, ebx, ecx, edx;
const uint32_t *chunk;
const int leaf = 0;
__asm__ __volatile__(
"cpuid"
: /* output */ "=a"(eax), "=b"(ebx),
"=c"(ecx), "=d"(edx)
: /* input */ "0"(leaf), "2"(0));
chunk = (const uint32_t *)("GenuineIntel");
return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
}

View File

@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/x86_64/vmx.c
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*/
#define _GNU_SOURCE /* for program_invocation_name */

View File

@@ -1,10 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* kvm_create_max_vcpus
*
* Copyright (C) 2019, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Test for KVM_CAP_MAX_VCPUS and KVM_CAP_MAX_VCPU_ID.
*/

View File

@@ -0,0 +1,126 @@
/*
* mmio_warning_test
*
* Copyright (C) 2019, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Test that we don't get a kernel warning when we call KVM_RUN after a
* triple fault occurs. To get the triple fault to occur we call KVM_RUN
* on a VCPU that hasn't been properly setup.
*
*/
#define _GNU_SOURCE
#include <fcntl.h>
#include <kvm_util.h>
#include <linux/kvm.h>
#include <processor.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <test_util.h>
#include <unistd.h>
#define NTHREAD 4
#define NPROCESS 5
struct thread_context {
int kvmcpu;
struct kvm_run *run;
};
void *thr(void *arg)
{
struct thread_context *tc = (struct thread_context *)arg;
int res;
int kvmcpu = tc->kvmcpu;
struct kvm_run *run = tc->run;
res = ioctl(kvmcpu, KVM_RUN, 0);
printf("ret1=%d exit_reason=%d suberror=%d\n",
res, run->exit_reason, run->internal.suberror);
return 0;
}
void test(void)
{
int i, kvm, kvmvm, kvmcpu;
pthread_t th[NTHREAD];
struct kvm_run *run;
struct thread_context tc;
kvm = open("/dev/kvm", O_RDWR);
TEST_ASSERT(kvm != -1, "failed to open /dev/kvm");
kvmvm = ioctl(kvm, KVM_CREATE_VM, 0);
TEST_ASSERT(kvmvm != -1, "KVM_CREATE_VM failed");
kvmcpu = ioctl(kvmvm, KVM_CREATE_VCPU, 0);
TEST_ASSERT(kvmcpu != -1, "KVM_CREATE_VCPU failed");
run = (struct kvm_run *)mmap(0, 4096, PROT_READ|PROT_WRITE, MAP_SHARED,
kvmcpu, 0);
tc.kvmcpu = kvmcpu;
tc.run = run;
srand(getpid());
for (i = 0; i < NTHREAD; i++) {
pthread_create(&th[i], NULL, thr, (void *)(uintptr_t)&tc);
usleep(rand() % 10000);
}
for (i = 0; i < NTHREAD; i++)
pthread_join(th[i], NULL);
}
int get_warnings_count(void)
{
int warnings;
FILE *f;
f = popen("dmesg | grep \"WARNING:\" | wc -l", "r");
fscanf(f, "%d", &warnings);
fclose(f);
return warnings;
}
int main(void)
{
int warnings_before, warnings_after;
if (!is_intel_cpu()) {
printf("Must be run on an Intel CPU, skipping test\n");
exit(KSFT_SKIP);
}
if (vm_is_unrestricted_guest(NULL)) {
printf("Unrestricted guest must be disabled, skipping test\n");
exit(KSFT_SKIP);
}
warnings_before = get_warnings_count();
for (int i = 0; i < NPROCESS; ++i) {
int status;
int pid = fork();
if (pid < 0)
exit(1);
if (pid == 0) {
test();
exit(0);
}
while (waitpid(pid, &status, __WALL) != pid)
;
}
warnings_after = get_warnings_count();
TEST_ASSERT(warnings_before == warnings_after,
"Warnings found in kernel. Run 'dmesg' to inspect them.");
return 0;
}

View File

@@ -1,16 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* KVM_SET_SREGS tests
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* This is a regression test for the bug fixed by the following commit:
* d3802286fa0f ("kvm: x86: Disallow illegal IA32_APIC_BASE MSR values")
*
* That bug allowed a user-mode program that called the KVM_SET_SREGS
* ioctl to put a VCPU's local APIC into an invalid state.
*
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>

View File

@@ -1,10 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* KVM_GET/SET_* tests
*
* Copyright (C) 2018, Red Hat, Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Tests for vCPU state save/restore, including nested guest state.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */

View File

@@ -1,10 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test for x86 KVM_CAP_SYNC_REGS
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Verifies expected behavior of x86 KVM_CAP_SYNC_REGS functionality,
* including requesting an invalid register set, updates to/from values
* in kvm_run.s.regs when kvm_valid_regs and kvm_dirty_regs are toggled.

View File

@@ -1,10 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* vmx_close_while_nested
*
* Copyright (C) 2019, Red Hat, Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Verify that nothing bad happens if a KVM user exits with open
* file descriptors while executing a nested guest.
*/

View File

@@ -1,10 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* vmx_set_nested_state_test
*
* Copyright (C) 2019, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* This test verifies the integrity of calling the ioctl KVM_SET_NESTED_STATE.
*/
@@ -75,7 +74,7 @@ void set_revision_id_for_vmcs12(struct kvm_nested_state *state,
u32 vmcs12_revision)
{
/* Set revision_id in vmcs12 to vmcs12_revision. */
memcpy(state->data, &vmcs12_revision, sizeof(u32));
memcpy(&state->data, &vmcs12_revision, sizeof(u32));
}
void set_default_state(struct kvm_nested_state *state)
@@ -95,9 +94,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size)
KVM_STATE_NESTED_EVMCS;
state->format = 0;
state->size = size;
state->vmx.vmxon_pa = 0x1000;
state->vmx.vmcs_pa = 0x2000;
state->vmx.smm.flags = 0;
state->hdr.vmx.vmxon_pa = 0x1000;
state->hdr.vmx.vmcs12_pa = 0x2000;
state->hdr.vmx.smm.flags = 0;
set_revision_id_for_vmcs12(state, VMCS12_REVISION);
}
@@ -123,39 +122,47 @@ void test_vmx_nested_state(struct kvm_vm *vm)
/*
* We cannot virtualize anything if the guest does not have VMX
* enabled. We expect KVM_SET_NESTED_STATE to return 0 if vmxon_pa
* is set to -1ull.
* is set to -1ull, but the flags must be zero.
*/
set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = -1ull;
state->hdr.vmx.vmxon_pa = -1ull;
test_nested_state_expect_einval(vm, state);
state->hdr.vmx.vmcs12_pa = -1ull;
state->flags = KVM_STATE_NESTED_EVMCS;
test_nested_state_expect_einval(vm, state);
state->flags = 0;
test_nested_state(vm, state);
/* Enable VMX in the guest CPUID. */
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
/* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
/*
* Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
* setting the nested state but flags other than eVMCS must be clear.
*/
set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = -1ull;
state->vmx.smm.flags = 1;
state->hdr.vmx.vmxon_pa = -1ull;
state->hdr.vmx.vmcs12_pa = -1ull;
test_nested_state_expect_einval(vm, state);
state->flags = KVM_STATE_NESTED_EVMCS;
test_nested_state(vm, state);
/* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
state->hdr.vmx.smm.flags = 1;
test_nested_state_expect_einval(vm, state);
/* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = -1ull;
state->vmx.vmcs_pa = 0;
state->hdr.vmx.vmxon_pa = -1ull;
state->flags = 0;
test_nested_state_expect_einval(vm, state);
/*
* Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
* setting the nested state.
*/
set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = -1ull;
state->vmx.vmcs_pa = -1ull;
test_nested_state(vm, state);
/* It is invalid to have vmxon_pa set to a non-page aligned address. */
set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = 1;
state->hdr.vmx.vmxon_pa = 1;
test_nested_state_expect_einval(vm, state);
/*
@@ -165,7 +172,7 @@ void test_vmx_nested_state(struct kvm_vm *vm)
set_default_vmx_state(state, state_sz);
state->flags = KVM_STATE_NESTED_GUEST_MODE |
KVM_STATE_NESTED_RUN_PENDING;
state->vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
test_nested_state_expect_einval(vm, state);
/*
@@ -174,14 +181,14 @@ void test_vmx_nested_state(struct kvm_vm *vm)
* KVM_STATE_NESTED_SMM_VMXON
*/
set_default_vmx_state(state, state_sz);
state->vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
state->hdr.vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
KVM_STATE_NESTED_SMM_VMXON);
test_nested_state_expect_einval(vm, state);
/* Outside SMM, SMM flags must be zero. */
set_default_vmx_state(state, state_sz);
state->flags = 0;
state->vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
test_nested_state_expect_einval(vm, state);
/* Size must be large enough to fit kvm_nested_state and vmcs12. */
@@ -191,8 +198,8 @@ void test_vmx_nested_state(struct kvm_vm *vm)
/* vmxon_pa cannot be the same address as vmcs_pa. */
set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = 0;
state->vmx.vmcs_pa = 0;
state->hdr.vmx.vmxon_pa = 0;
state->hdr.vmx.vmcs12_pa = 0;
test_nested_state_expect_einval(vm, state);
/* The revision id for vmcs12 must be VMCS12_REVISION. */
@@ -205,16 +212,16 @@ void test_vmx_nested_state(struct kvm_vm *vm)
* it again.
*/
set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = -1ull;
state->vmx.vmcs_pa = -1ull;
state->hdr.vmx.vmxon_pa = -1ull;
state->hdr.vmx.vmcs12_pa = -1ull;
state->flags = 0;
test_nested_state(vm, state);
vcpu_nested_state_get(vm, VCPU_ID, state);
TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
"Size must be between %d and %d. The size returned was %d.",
sizeof(*state), state_sz, state->size);
TEST_ASSERT(state->vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
TEST_ASSERT(state->vmx.vmcs_pa == -1ull, "vmcs_pa must be -1ull.");
TEST_ASSERT(state->hdr.vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
TEST_ASSERT(state->hdr.vmx.vmcs12_pa == -1ull, "vmcs_pa must be -1ull.");
free(state);
}

View File

@@ -1,11 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* vmx_tsc_adjust_test
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
*
* IA32_TSC_ADJUST test
*
* According to the SDM, "if an execution of WRMSR to the

View File

@@ -2,7 +2,8 @@
# SPDX-License-Identifier: GPL-2.0
ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \
match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test"
match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test \
match_ip_tos_test"
NUM_NETIFS=2
source tc_common.sh
source lib.sh
@@ -276,6 +277,39 @@ match_vlan_test()
log_test "VLAN match ($tcflags)"
}
match_ip_tos_test()
{
RET=0
tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
$tcflags dst_ip 192.0.2.2 ip_tos 0x20 action drop
tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
$tcflags dst_ip 192.0.2.2 ip_tos 0x18 action drop
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip tos=18 -q
tc_check_packets "dev $h2 ingress" 101 1
check_fail $? "Matched on a wrong filter (0x18)"
tc_check_packets "dev $h2 ingress" 102 1
check_err $? "Did not match on correct filter (0x18)"
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip tos=20 -q
tc_check_packets "dev $h2 ingress" 102 2
check_fail $? "Matched on a wrong filter (0x20)"
tc_check_packets "dev $h2 ingress" 101 1
check_err $? "Did not match on correct filter (0x20)"
tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
log_test "ip_tos match ($tcflags)"
}
setup_prepare()
{
h1=${NETIFS[p1]}

View File

@@ -1,3 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* POWER Data Stream Control Register (DSCR)
*
@@ -6,10 +7,6 @@
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#ifndef _SELFTESTS_POWERPC_DSCR_DSCR_H
#define _SELFTESTS_POWERPC_DSCR_DSCR_H

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) default test
*
@@ -7,10 +8,6 @@
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "dscr.h"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) explicit test
*
@@ -13,10 +14,6 @@
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "dscr.h"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) fork exec test
*
@@ -12,10 +13,6 @@
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "dscr.h"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) fork test
*
@@ -13,10 +14,6 @@
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "dscr.h"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) sysfs interface test
*
@@ -6,10 +7,6 @@
* well verified from their sysfs interfaces.
*
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "dscr.h"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) sysfs thread test
*
@@ -7,10 +8,6 @@
* executing on individual CPUs on the system.
*
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#define _GNU_SOURCE
#include "dscr.h"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) SPR test
*
@@ -14,10 +15,6 @@
*
* Copyright 2013, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "dscr.h"

View File

@@ -1,11 +1,11 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
# common_tests - Shell script commonly used by pstore test scripts
#
# Copyright (C) Hitachi Ltd., 2015
# Written by Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
#
# Released under the terms of the GPL v2.
# Utilities
errexit() { # message

View File

@@ -1,11 +1,11 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
# pstore_crash_test - Pstore test shell script which causes crash and reboot
#
# Copyright (C) Hitachi Ltd., 2015
# Written by Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
#
# Released under the terms of the GPL v2.
# exit if pstore backend is not registered
. ./common_tests

View File

@@ -1,11 +1,11 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
# pstore_post_reboot_tests - Check pstore's behavior after crash/reboot
#
# Copyright (C) Hitachi Ltd., 2015
# Written by Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
#
# Released under the terms of the GPL v2.
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4

View File

@@ -1,11 +1,11 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
# pstore_tests - Check pstore's behavior before crash/reboot
#
# Copyright (C) Hitachi Ltd., 2015
# Written by Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
#
# Released under the terms of the GPL v2.
. ./common_tests

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by the GPLv2 license.
*
* Test code for seccomp bpf.
*/

View File

@@ -1,11 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Stress userfaultfd syscall.
*
* Copyright (C) 2015 Red Hat, Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* This test allocates two virtual areas and bounces the physical
* memory across the two virtual areas (from area_src to area_dst)
* using userfaultfd.

View File

@@ -1,16 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Authors: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
* Authors: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
*/
#include <stdio.h>