Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor comment merge conflict in mlx5. Staging driver has a fixup due to the skb->xmit_more changes in 'net-next', but was removed in 'net'. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -39,6 +39,58 @@ static struct bpf_flow_keys pkt_v6_flow_keys = {
|
||||
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
|
||||
};
|
||||
|
||||
#define VLAN_HLEN 4
|
||||
|
||||
static struct {
|
||||
struct ethhdr eth;
|
||||
__u16 vlan_tci;
|
||||
__u16 vlan_proto;
|
||||
struct iphdr iph;
|
||||
struct tcphdr tcp;
|
||||
} __packed pkt_vlan_v4 = {
|
||||
.eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
|
||||
.vlan_proto = __bpf_constant_htons(ETH_P_IP),
|
||||
.iph.ihl = 5,
|
||||
.iph.protocol = IPPROTO_TCP,
|
||||
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
|
||||
.tcp.urg_ptr = 123,
|
||||
.tcp.doff = 5,
|
||||
};
|
||||
|
||||
static struct bpf_flow_keys pkt_vlan_v4_flow_keys = {
|
||||
.nhoff = VLAN_HLEN,
|
||||
.thoff = VLAN_HLEN + sizeof(struct iphdr),
|
||||
.addr_proto = ETH_P_IP,
|
||||
.ip_proto = IPPROTO_TCP,
|
||||
.n_proto = __bpf_constant_htons(ETH_P_IP),
|
||||
};
|
||||
|
||||
static struct {
|
||||
struct ethhdr eth;
|
||||
__u16 vlan_tci;
|
||||
__u16 vlan_proto;
|
||||
__u16 vlan_tci2;
|
||||
__u16 vlan_proto2;
|
||||
struct ipv6hdr iph;
|
||||
struct tcphdr tcp;
|
||||
} __packed pkt_vlan_v6 = {
|
||||
.eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
|
||||
.vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
|
||||
.vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
|
||||
.iph.nexthdr = IPPROTO_TCP,
|
||||
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
|
||||
.tcp.urg_ptr = 123,
|
||||
.tcp.doff = 5,
|
||||
};
|
||||
|
||||
static struct bpf_flow_keys pkt_vlan_v6_flow_keys = {
|
||||
.nhoff = VLAN_HLEN * 2,
|
||||
.thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr),
|
||||
.addr_proto = ETH_P_IPV6,
|
||||
.ip_proto = IPPROTO_TCP,
|
||||
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
|
||||
};
|
||||
|
||||
void test_flow_dissector(void)
|
||||
{
|
||||
struct bpf_flow_keys flow_keys;
|
||||
@@ -68,5 +120,21 @@ void test_flow_dissector(void)
|
||||
err, errno, retval, duration, size, sizeof(flow_keys));
|
||||
CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
|
||||
|
||||
err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v4, sizeof(pkt_vlan_v4),
|
||||
&flow_keys, &size, &retval, &duration);
|
||||
CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv4",
|
||||
"err %d errno %d retval %d duration %d size %u/%lu\n",
|
||||
err, errno, retval, duration, size, sizeof(flow_keys));
|
||||
CHECK_FLOW_KEYS("vlan_ipv4_flow_keys", flow_keys,
|
||||
pkt_vlan_v4_flow_keys);
|
||||
|
||||
err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v6, sizeof(pkt_vlan_v6),
|
||||
&flow_keys, &size, &retval, &duration);
|
||||
CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv6",
|
||||
"err %d errno %d retval %d duration %d size %u/%lu\n",
|
||||
err, errno, retval, duration, size, sizeof(flow_keys));
|
||||
CHECK_FLOW_KEYS("vlan_ipv6_flow_keys", flow_keys,
|
||||
pkt_vlan_v6_flow_keys);
|
||||
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
@@ -92,7 +92,6 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
|
||||
{
|
||||
struct bpf_flow_keys *keys = skb->flow_keys;
|
||||
|
||||
keys->n_proto = proto;
|
||||
switch (proto) {
|
||||
case bpf_htons(ETH_P_IP):
|
||||
bpf_tail_call(skb, &jmp_table, IP);
|
||||
@@ -119,10 +118,9 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
|
||||
SEC("flow_dissector")
|
||||
int _dissect(struct __sk_buff *skb)
|
||||
{
|
||||
if (!skb->vlan_present)
|
||||
return parse_eth_proto(skb, skb->protocol);
|
||||
else
|
||||
return parse_eth_proto(skb, skb->vlan_proto);
|
||||
struct bpf_flow_keys *keys = skb->flow_keys;
|
||||
|
||||
return parse_eth_proto(skb, keys->n_proto);
|
||||
}
|
||||
|
||||
/* Parses on IPPROTO_* */
|
||||
@@ -336,15 +334,9 @@ PROG(VLAN)(struct __sk_buff *skb)
|
||||
{
|
||||
struct bpf_flow_keys *keys = skb->flow_keys;
|
||||
struct vlan_hdr *vlan, _vlan;
|
||||
__be16 proto;
|
||||
|
||||
/* Peek back to see if single or double-tagging */
|
||||
if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
|
||||
sizeof(proto)))
|
||||
return BPF_DROP;
|
||||
|
||||
/* Account for double-tagging */
|
||||
if (proto == bpf_htons(ETH_P_8021AD)) {
|
||||
if (keys->n_proto == bpf_htons(ETH_P_8021AD)) {
|
||||
vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
|
||||
if (!vlan)
|
||||
return BPF_DROP;
|
||||
@@ -352,6 +344,7 @@ PROG(VLAN)(struct __sk_buff *skb)
|
||||
if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
|
||||
return BPF_DROP;
|
||||
|
||||
keys->nhoff += sizeof(*vlan);
|
||||
keys->thoff += sizeof(*vlan);
|
||||
}
|
||||
|
||||
@@ -359,12 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
|
||||
if (!vlan)
|
||||
return BPF_DROP;
|
||||
|
||||
keys->nhoff += sizeof(*vlan);
|
||||
keys->thoff += sizeof(*vlan);
|
||||
/* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
|
||||
if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
|
||||
vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
|
||||
return BPF_DROP;
|
||||
|
||||
keys->n_proto = vlan->h_vlan_encapsulated_proto;
|
||||
return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
|
||||
}
|
||||
|
||||
|
@@ -5776,6 +5776,53 @@ const struct btf_dedup_test dedup_tests[] = {
|
||||
.dedup_table_size = 1, /* force hash collisions */
|
||||
},
|
||||
},
|
||||
{
|
||||
.descr = "dedup: void equiv check",
|
||||
/*
|
||||
* // CU 1:
|
||||
* struct s {
|
||||
* struct {} *x;
|
||||
* };
|
||||
* // CU 2:
|
||||
* struct s {
|
||||
* int *x;
|
||||
* };
|
||||
*/
|
||||
.input = {
|
||||
.raw_types = {
|
||||
/* CU 1 */
|
||||
BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
|
||||
BTF_PTR_ENC(1), /* [2] ptr -> [1] */
|
||||
BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
|
||||
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
|
||||
/* CU 2 */
|
||||
BTF_PTR_ENC(0), /* [4] ptr -> void */
|
||||
BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
|
||||
BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0s\0x"),
|
||||
},
|
||||
.expect = {
|
||||
.raw_types = {
|
||||
/* CU 1 */
|
||||
BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
|
||||
BTF_PTR_ENC(1), /* [2] ptr -> [1] */
|
||||
BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
|
||||
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
|
||||
/* CU 2 */
|
||||
BTF_PTR_ENC(0), /* [4] ptr -> void */
|
||||
BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
|
||||
BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0s\0x"),
|
||||
},
|
||||
.opts = {
|
||||
.dont_resolve_fwds = false,
|
||||
.dedup_table_size = 1, /* force hash collisions */
|
||||
},
|
||||
},
|
||||
{
|
||||
.descr = "dedup: all possible kinds (no duplicates)",
|
||||
.input = {
|
||||
|
@@ -907,6 +907,44 @@
|
||||
.errstr = "call stack",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"calls: stack depth check in dead code",
|
||||
.insns = {
|
||||
/* main */
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
|
||||
BPF_EXIT_INSN(),
|
||||
/* A */
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
/* B */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
|
||||
BPF_EXIT_INSN(),
|
||||
/* C */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
|
||||
BPF_EXIT_INSN(),
|
||||
/* D */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
|
||||
BPF_EXIT_INSN(),
|
||||
/* E */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
|
||||
BPF_EXIT_INSN(),
|
||||
/* F */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
|
||||
BPF_EXIT_INSN(),
|
||||
/* G */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
|
||||
BPF_EXIT_INSN(),
|
||||
/* H */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.errstr = "call stack",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"calls: spill into caller stack frame",
|
||||
.insns = {
|
||||
|
@@ -29,8 +29,8 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
|
||||
INSTALL_HDR_PATH = $(top_srcdir)/usr
|
||||
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
|
||||
LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
|
||||
CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
|
||||
LDFLAGS += -pthread
|
||||
CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
|
||||
LDFLAGS += -pthread -no-pie
|
||||
|
||||
# After inclusion, $(OUTPUT) is defined and
|
||||
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
|
||||
|
@@ -102,6 +102,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
|
||||
struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_mp_state *mp_state);
|
||||
void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
|
||||
|
@@ -1121,6 +1121,22 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
|
||||
{
|
||||
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
|
||||
int ret;
|
||||
|
||||
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
|
||||
|
||||
vcpu->state->immediate_exit = 1;
|
||||
ret = ioctl(vcpu->fd, KVM_RUN, NULL);
|
||||
vcpu->state->immediate_exit = 0;
|
||||
|
||||
TEST_ASSERT(ret == -1 && errno == EINTR,
|
||||
"KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
|
||||
ret, errno);
|
||||
}
|
||||
|
||||
/*
|
||||
* VM VCPU Set MP State
|
||||
*
|
||||
|
@@ -87,22 +87,25 @@ int main(int argc, char *argv[])
|
||||
while (1) {
|
||||
rc = _vcpu_run(vm, VCPU_ID);
|
||||
|
||||
if (run->exit_reason == KVM_EXIT_IO) {
|
||||
switch (get_ucall(vm, VCPU_ID, &uc)) {
|
||||
case UCALL_SYNC:
|
||||
/* emulate hypervisor clearing CR4.OSXSAVE */
|
||||
vcpu_sregs_get(vm, VCPU_ID, &sregs);
|
||||
sregs.cr4 &= ~X86_CR4_OSXSAVE;
|
||||
vcpu_sregs_set(vm, VCPU_ID, &sregs);
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
default:
|
||||
TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
|
||||
}
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
|
||||
"Unexpected exit reason: %u (%s),\n",
|
||||
run->exit_reason,
|
||||
exit_reason_str(run->exit_reason));
|
||||
|
||||
switch (get_ucall(vm, VCPU_ID, &uc)) {
|
||||
case UCALL_SYNC:
|
||||
/* emulate hypervisor clearing CR4.OSXSAVE */
|
||||
vcpu_sregs_get(vm, VCPU_ID, &sregs);
|
||||
sregs.cr4 &= ~X86_CR4_OSXSAVE;
|
||||
vcpu_sregs_set(vm, VCPU_ID, &sregs);
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
default:
|
||||
TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -134,6 +134,11 @@ int main(int argc, char *argv[])
|
||||
|
||||
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
|
||||
|
||||
if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
|
||||
fprintf(stderr, "immediate_exit not available, skipping test\n");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
||||
/* Create VM */
|
||||
vm = vm_create_default(VCPU_ID, 0, guest_code);
|
||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||
@@ -156,8 +161,6 @@ int main(int argc, char *argv[])
|
||||
stage, run->exit_reason,
|
||||
exit_reason_str(run->exit_reason));
|
||||
|
||||
memset(®s1, 0, sizeof(regs1));
|
||||
vcpu_regs_get(vm, VCPU_ID, ®s1);
|
||||
switch (get_ucall(vm, VCPU_ID, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
|
||||
@@ -176,6 +179,17 @@ int main(int argc, char *argv[])
|
||||
uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
|
||||
stage, (ulong)uc.args[1]);
|
||||
|
||||
/*
|
||||
* When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
|
||||
* guest state is consistent only after userspace re-enters the
|
||||
* kernel with KVM_RUN. Complete IO prior to migrating state
|
||||
* to a new VM.
|
||||
*/
|
||||
vcpu_run_complete_io(vm, VCPU_ID);
|
||||
|
||||
memset(®s1, 0, sizeof(regs1));
|
||||
vcpu_regs_get(vm, VCPU_ID, ®s1);
|
||||
|
||||
state = vcpu_save_state(vm, VCPU_ID);
|
||||
kvm_vm_release(vm);
|
||||
|
||||
|
@@ -143,6 +143,30 @@
|
||||
"$TC actions flush action sample"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "7571",
|
||||
"name": "Add sample action with invalid rate",
|
||||
"category": [
|
||||
"actions",
|
||||
"sample"
|
||||
],
|
||||
"setup": [
|
||||
[
|
||||
"$TC actions flush action sample",
|
||||
0,
|
||||
1,
|
||||
255
|
||||
]
|
||||
],
|
||||
"cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2",
|
||||
"expExitCode": "255",
|
||||
"verifyCmd": "$TC actions get action sample index 2",
|
||||
"matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref",
|
||||
"matchCount": "0",
|
||||
"teardown": [
|
||||
"$TC actions flush action sample"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "b6d4",
|
||||
"name": "Add sample action with mandatory arguments and invalid control action",
|
||||
|
Reference in New Issue
Block a user