Merge remote-tracking branch 'torvalds/master' into perf/core
Minor conflict in tools/perf/arch/arm/util/auxtrace.c as one fix there was cherry-picked for the last perf/urgent pull req to Linus, so was already there. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
@@ -408,14 +408,15 @@ struct kvm_vmx_nested_state_data {
|
||||
};
|
||||
|
||||
struct kvm_vmx_nested_state_hdr {
|
||||
__u32 flags;
|
||||
__u64 vmxon_pa;
|
||||
__u64 vmcs12_pa;
|
||||
__u64 preemption_timer_deadline;
|
||||
|
||||
struct {
|
||||
__u16 flags;
|
||||
} smm;
|
||||
|
||||
__u32 flags;
|
||||
__u64 preemption_timer_deadline;
|
||||
};
|
||||
|
||||
struct kvm_svm_nested_state_data {
|
||||
|
@@ -18,8 +18,7 @@
|
||||
* position @h. For example
|
||||
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
|
||||
*/
|
||||
#if !defined(__ASSEMBLY__) && \
|
||||
(!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000)
|
||||
#if !defined(__ASSEMBLY__)
|
||||
#include <linux/build_bug.h>
|
||||
#define GENMASK_INPUT_CHECK(h, l) \
|
||||
(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
|
||||
|
@@ -3171,13 +3171,12 @@ union bpf_attr {
|
||||
* int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
|
||||
* Description
|
||||
* Copy *size* bytes from *data* into a ring buffer *ringbuf*.
|
||||
* If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
|
||||
* new data availability is sent.
|
||||
* IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
|
||||
* new data availability is sent unconditionally.
|
||||
* If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
|
||||
* of new data availability is sent.
|
||||
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
|
||||
* of new data availability is sent unconditionally.
|
||||
* Return
|
||||
* 0, on success;
|
||||
* < 0, on error.
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
*
|
||||
* void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
|
||||
* Description
|
||||
@@ -3189,20 +3188,20 @@ union bpf_attr {
|
||||
* void bpf_ringbuf_submit(void *data, u64 flags)
|
||||
* Description
|
||||
* Submit reserved ring buffer sample, pointed to by *data*.
|
||||
* If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
|
||||
* new data availability is sent.
|
||||
* IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
|
||||
* new data availability is sent unconditionally.
|
||||
* If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
|
||||
* of new data availability is sent.
|
||||
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
|
||||
* of new data availability is sent unconditionally.
|
||||
* Return
|
||||
* Nothing. Always succeeds.
|
||||
*
|
||||
* void bpf_ringbuf_discard(void *data, u64 flags)
|
||||
* Description
|
||||
* Discard reserved ring buffer sample, pointed to by *data*.
|
||||
* If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
|
||||
* new data availability is sent.
|
||||
* IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
|
||||
* new data availability is sent unconditionally.
|
||||
* If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
|
||||
* of new data availability is sent.
|
||||
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
|
||||
* of new data availability is sent unconditionally.
|
||||
* Return
|
||||
* Nothing. Always succeeds.
|
||||
*
|
||||
@@ -3210,16 +3209,18 @@ union bpf_attr {
|
||||
* Description
|
||||
* Query various characteristics of provided ring buffer. What
|
||||
* exactly is queries is determined by *flags*:
|
||||
* - BPF_RB_AVAIL_DATA - amount of data not yet consumed;
|
||||
* - BPF_RB_RING_SIZE - the size of ring buffer;
|
||||
* - BPF_RB_CONS_POS - consumer position (can wrap around);
|
||||
* - BPF_RB_PROD_POS - producer(s) position (can wrap around);
|
||||
* Data returned is just a momentary snapshots of actual values
|
||||
*
|
||||
* * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
|
||||
* * **BPF_RB_RING_SIZE**: The size of ring buffer.
|
||||
* * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
|
||||
* * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
|
||||
*
|
||||
* Data returned is just a momentary snapshot of actual values
|
||||
* and could be inaccurate, so this facility should be used to
|
||||
* power heuristics and for reporting, not to make 100% correct
|
||||
* calculation.
|
||||
* Return
|
||||
* Requested value, or 0, if flags are not recognized.
|
||||
* Requested value, or 0, if *flags* are not recognized.
|
||||
*
|
||||
* int bpf_csum_level(struct sk_buff *skb, u64 level)
|
||||
* Description
|
||||
|
@@ -233,6 +233,8 @@ LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf,
|
||||
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
|
||||
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
|
||||
__u64 *probe_offset, __u64 *probe_addr);
|
||||
|
||||
enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
|
||||
LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@@ -11,14 +11,18 @@
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <limits.h>
|
||||
#ifndef __WORDSIZE
|
||||
#define __WORDSIZE (__SIZEOF_LONG__ * 8)
|
||||
#endif
|
||||
|
||||
static inline size_t hash_bits(size_t h, int bits)
|
||||
{
|
||||
/* shuffle bits and return requested number of upper bits */
|
||||
return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
|
||||
#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
|
||||
/* LP64 case */
|
||||
return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
|
||||
#elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__)
|
||||
return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits);
|
||||
#else
|
||||
# error "Unsupported size_t size"
|
||||
#endif
|
||||
}
|
||||
|
||||
typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
|
||||
|
@@ -4818,7 +4818,13 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
prog = bpf_object__find_program_by_title(obj, sec_name);
|
||||
prog = NULL;
|
||||
for (i = 0; i < obj->nr_programs; i++) {
|
||||
if (!strcmp(obj->programs[i].section_name, sec_name)) {
|
||||
prog = &obj->programs[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!prog) {
|
||||
pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
|
||||
sec_name);
|
||||
@@ -6653,7 +6659,7 @@ static const struct bpf_sec_def section_defs[] = {
|
||||
.expected_attach_type = BPF_TRACE_ITER,
|
||||
.is_attach_btf = true,
|
||||
.attach_fn = attach_iter),
|
||||
BPF_EAPROG_SEC("xdp_devmap", BPF_PROG_TYPE_XDP,
|
||||
BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
|
||||
BPF_XDP_DEVMAP),
|
||||
BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
|
||||
BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
|
||||
|
@@ -237,6 +237,9 @@ static int get_value(struct parse_opt_ctx_t *p,
|
||||
return err;
|
||||
|
||||
case OPTION_CALLBACK:
|
||||
if (opt->set)
|
||||
*(bool *)opt->set = true;
|
||||
|
||||
if (unset)
|
||||
return (*opt->callback)(opt, NULL, 1) ? (-1) : 0;
|
||||
if (opt->flags & PARSE_OPT_NOARG)
|
||||
|
@@ -2861,6 +2861,7 @@ process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg,
|
||||
if (read_expected(TEP_EVENT_DELIM, ")") < 0)
|
||||
goto out_err;
|
||||
|
||||
free_token(token);
|
||||
type = read_token(&token);
|
||||
*tok = token;
|
||||
|
||||
|
@@ -199,7 +199,7 @@ define do_generate_dynamic_list_file
|
||||
xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
|
||||
if [ "$$symbol_type" = "U W" ];then \
|
||||
(echo '{'; \
|
||||
$(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
|
||||
$(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print "\t"$$2";"}' | sort -u;\
|
||||
echo '};'; \
|
||||
) > $2; \
|
||||
else \
|
||||
|
@@ -380,7 +380,7 @@
|
||||
{
|
||||
"Unit": "CPU-M-CF",
|
||||
"EventCode": "265",
|
||||
"EventName": "DFLT_CCERROR",
|
||||
"EventName": "DFLT_CCFINISH",
|
||||
"BriefDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2",
|
||||
"PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2"
|
||||
},
|
||||
|
@@ -12,7 +12,8 @@ skip_if_no_z_record() {
|
||||
|
||||
collect_z_record() {
|
||||
echo "Collecting compressed record file:"
|
||||
$perf_tool record -o $trace_file -g -z -F 5000 -- \
|
||||
[[ "$(uname -m)" != s390x ]] && gflag='-g'
|
||||
$perf_tool record -o $trace_file $gflag -z -F 5000 -- \
|
||||
dd count=500 if=/dev/urandom of=/dev/null
|
||||
}
|
||||
|
||||
|
@@ -11,14 +11,18 @@
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <limits.h>
|
||||
#ifndef __WORDSIZE
|
||||
#define __WORDSIZE (__SIZEOF_LONG__ * 8)
|
||||
#endif
|
||||
|
||||
static inline size_t hash_bits(size_t h, int bits)
|
||||
{
|
||||
/* shuffle bits and return requested number of upper bits */
|
||||
return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
|
||||
#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
|
||||
/* LP64 case */
|
||||
return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
|
||||
#elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__)
|
||||
return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits);
|
||||
#else
|
||||
# error "Unsupported size_t size"
|
||||
#endif
|
||||
}
|
||||
|
||||
typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
|
||||
|
@@ -5,10 +5,60 @@
|
||||
|
||||
#include "test_btf_map_in_map.skel.h"
|
||||
|
||||
static int duration;
|
||||
|
||||
static __u32 bpf_map_id(struct bpf_map *map)
|
||||
{
|
||||
struct bpf_map_info info;
|
||||
__u32 info_len = sizeof(info);
|
||||
int err;
|
||||
|
||||
memset(&info, 0, info_len);
|
||||
err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
|
||||
if (err)
|
||||
return 0;
|
||||
return info.id;
|
||||
}
|
||||
|
||||
/*
|
||||
* Trigger synchronize_rcu() in kernel.
|
||||
*
|
||||
* ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger synchronize_rcu()
|
||||
* if looking up an existing non-NULL element or updating the map with a valid
|
||||
* inner map FD. Use this fact to trigger synchronize_rcu(): create map-in-map,
|
||||
* create a trivial ARRAY map, update map-in-map with ARRAY inner map. Then
|
||||
* cleanup. At the end, at least one synchronize_rcu() would be called.
|
||||
*/
|
||||
static int kern_sync_rcu(void)
|
||||
{
|
||||
int inner_map_fd, outer_map_fd, err, zero = 0;
|
||||
|
||||
inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0);
|
||||
if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno))
|
||||
return -1;
|
||||
|
||||
outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
|
||||
sizeof(int), inner_map_fd, 1, 0);
|
||||
if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) {
|
||||
close(inner_map_fd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0);
|
||||
if (err)
|
||||
err = -errno;
|
||||
CHECK(err, "outer_map_update", "failed %d\n", err);
|
||||
close(inner_map_fd);
|
||||
close(outer_map_fd);
|
||||
return err;
|
||||
}
|
||||
|
||||
void test_btf_map_in_map(void)
|
||||
{
|
||||
int duration = 0, err, key = 0, val;
|
||||
struct test_btf_map_in_map* skel;
|
||||
int err, key = 0, val, i;
|
||||
struct test_btf_map_in_map *skel;
|
||||
int outer_arr_fd, outer_hash_fd;
|
||||
int fd, map1_fd, map2_fd, map1_id, map2_id;
|
||||
|
||||
skel = test_btf_map_in_map__open_and_load();
|
||||
if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
|
||||
@@ -18,32 +68,78 @@ void test_btf_map_in_map(void)
|
||||
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
|
||||
goto cleanup;
|
||||
|
||||
map1_fd = bpf_map__fd(skel->maps.inner_map1);
|
||||
map2_fd = bpf_map__fd(skel->maps.inner_map2);
|
||||
outer_arr_fd = bpf_map__fd(skel->maps.outer_arr);
|
||||
outer_hash_fd = bpf_map__fd(skel->maps.outer_hash);
|
||||
|
||||
/* inner1 = input, inner2 = input + 1 */
|
||||
val = bpf_map__fd(skel->maps.inner_map1);
|
||||
bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0);
|
||||
val = bpf_map__fd(skel->maps.inner_map2);
|
||||
bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0);
|
||||
map1_fd = bpf_map__fd(skel->maps.inner_map1);
|
||||
bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0);
|
||||
map2_fd = bpf_map__fd(skel->maps.inner_map2);
|
||||
bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0);
|
||||
skel->bss->input = 1;
|
||||
usleep(1);
|
||||
|
||||
bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val);
|
||||
bpf_map_lookup_elem(map1_fd, &key, &val);
|
||||
CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1);
|
||||
bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val);
|
||||
bpf_map_lookup_elem(map2_fd, &key, &val);
|
||||
CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2);
|
||||
|
||||
/* inner1 = input + 1, inner2 = input */
|
||||
val = bpf_map__fd(skel->maps.inner_map2);
|
||||
bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0);
|
||||
val = bpf_map__fd(skel->maps.inner_map1);
|
||||
bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0);
|
||||
bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0);
|
||||
bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0);
|
||||
skel->bss->input = 3;
|
||||
usleep(1);
|
||||
|
||||
bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val);
|
||||
bpf_map_lookup_elem(map1_fd, &key, &val);
|
||||
CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4);
|
||||
bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val);
|
||||
bpf_map_lookup_elem(map2_fd, &key, &val);
|
||||
CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3);
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
val = i % 2 ? map1_fd : map2_fd;
|
||||
err = bpf_map_update_elem(outer_hash_fd, &key, &val, 0);
|
||||
if (CHECK_FAIL(err)) {
|
||||
printf("failed to update hash_of_maps on iter #%d\n", i);
|
||||
goto cleanup;
|
||||
}
|
||||
err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0);
|
||||
if (CHECK_FAIL(err)) {
|
||||
printf("failed to update hash_of_maps on iter #%d\n", i);
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
map1_id = bpf_map_id(skel->maps.inner_map1);
|
||||
map2_id = bpf_map_id(skel->maps.inner_map2);
|
||||
CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
|
||||
CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
|
||||
|
||||
test_btf_map_in_map__destroy(skel);
|
||||
skel = NULL;
|
||||
|
||||
/* we need to either wait for or force synchronize_rcu(), before
|
||||
* checking for "still exists" condition, otherwise map could still be
|
||||
* resolvable by ID, causing false positives.
|
||||
*
|
||||
* Older kernels (5.8 and earlier) freed map only after two
|
||||
* synchronize_rcu()s, so trigger two, to be entirely sure.
|
||||
*/
|
||||
CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
|
||||
CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
|
||||
|
||||
fd = bpf_map_get_fd_by_id(map1_id);
|
||||
if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
|
||||
close(fd);
|
||||
goto cleanup;
|
||||
}
|
||||
fd = bpf_map_get_fd_by_id(map2_id);
|
||||
if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
|
||||
close(fd);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
test_btf_map_in_map__destroy(skel);
|
||||
}
|
||||
|
@@ -36,7 +36,7 @@ void test_fentry_fexit(void)
|
||||
fentry_res = (__u64 *)fentry_skel->bss;
|
||||
fexit_res = (__u64 *)fexit_skel->bss;
|
||||
printf("%lld\n", fentry_skel->bss->test1_result);
|
||||
for (i = 0; i < 6; i++) {
|
||||
for (i = 0; i < 8; i++) {
|
||||
CHECK(fentry_res[i] != 1, "result",
|
||||
"fentry_test%d failed err %lld\n", i + 1, fentry_res[i]);
|
||||
CHECK(fexit_res[i] != 1, "result",
|
||||
|
@@ -527,8 +527,8 @@ static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
|
||||
|
||||
run_tests_skb_less(tap_fd, skel->maps.last_dissection);
|
||||
|
||||
err = bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
|
||||
CHECK(err, "bpf_prog_detach", "err %d errno %d\n", err, errno);
|
||||
err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
|
||||
CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
|
||||
}
|
||||
|
||||
static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
|
||||
|
@@ -1,9 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Test that the flow_dissector program can be updated with a single
|
||||
* syscall by attaching a new program that replaces the existing one.
|
||||
*
|
||||
* Corner case - the same program cannot be attached twice.
|
||||
* Tests for attaching, detaching, and replacing flow_dissector BPF program.
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
@@ -116,7 +113,7 @@ static void test_prog_attach_prog_attach(int netns, int prog1, int prog2)
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2));
|
||||
|
||||
out_detach:
|
||||
err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
|
||||
err = bpf_prog_detach2(prog2, 0, BPF_FLOW_DISSECTOR);
|
||||
if (CHECK_FAIL(err))
|
||||
perror("bpf_prog_detach");
|
||||
CHECK_FAIL(prog_is_attached(netns));
|
||||
@@ -152,7 +149,7 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2)
|
||||
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
|
||||
int err, link;
|
||||
|
||||
err = bpf_prog_attach(prog1, -1, BPF_FLOW_DISSECTOR, 0);
|
||||
err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0);
|
||||
if (CHECK_FAIL(err)) {
|
||||
perror("bpf_prog_attach(prog1)");
|
||||
return;
|
||||
@@ -168,7 +165,7 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2)
|
||||
close(link);
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
|
||||
|
||||
err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR);
|
||||
err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
|
||||
if (CHECK_FAIL(err))
|
||||
perror("bpf_prog_detach");
|
||||
CHECK_FAIL(prog_is_attached(netns));
|
||||
@@ -188,7 +185,7 @@ static void test_link_create_prog_attach(int netns, int prog1, int prog2)
|
||||
|
||||
/* Expect failure attaching prog when link exists */
|
||||
errno = 0;
|
||||
err = bpf_prog_attach(prog2, -1, BPF_FLOW_DISSECTOR, 0);
|
||||
err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0);
|
||||
if (CHECK_FAIL(!err || errno != EEXIST))
|
||||
perror("bpf_prog_attach(prog2) expected EEXIST");
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
|
||||
@@ -211,7 +208,7 @@ static void test_link_create_prog_detach(int netns, int prog1, int prog2)
|
||||
|
||||
/* Expect failure detaching prog when link exists */
|
||||
errno = 0;
|
||||
err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR);
|
||||
err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
|
||||
if (CHECK_FAIL(!err || errno != EINVAL))
|
||||
perror("bpf_prog_detach expected EINVAL");
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
|
||||
@@ -231,7 +228,7 @@ static void test_prog_attach_detach_query(int netns, int prog1, int prog2)
|
||||
}
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
|
||||
|
||||
err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
|
||||
err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
|
||||
if (CHECK_FAIL(err)) {
|
||||
perror("bpf_prog_detach");
|
||||
return;
|
||||
@@ -308,6 +305,31 @@ static void test_link_update_replace_old_prog(int netns, int prog1, int prog2)
|
||||
CHECK_FAIL(prog_is_attached(netns));
|
||||
}
|
||||
|
||||
static void test_link_update_same_prog(int netns, int prog1, int prog2)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
|
||||
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
|
||||
int err, link;
|
||||
|
||||
link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
|
||||
if (CHECK_FAIL(link < 0)) {
|
||||
perror("bpf_link_create(prog1)");
|
||||
return;
|
||||
}
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
|
||||
|
||||
/* Expect success updating the prog with the same one */
|
||||
update_opts.flags = 0;
|
||||
update_opts.old_prog_fd = 0;
|
||||
err = bpf_link_update(link, prog1, &update_opts);
|
||||
if (CHECK_FAIL(err))
|
||||
perror("bpf_link_update");
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
|
||||
|
||||
close(link);
|
||||
CHECK_FAIL(prog_is_attached(netns));
|
||||
}
|
||||
|
||||
static void test_link_update_invalid_opts(int netns, int prog1, int prog2)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
|
||||
@@ -571,6 +593,8 @@ static void run_tests(int netns)
|
||||
test_link_update_no_old_prog },
|
||||
{ "link update with replace old prog",
|
||||
test_link_update_replace_old_prog },
|
||||
{ "link update with same prog",
|
||||
test_link_update_same_prog },
|
||||
{ "link update invalid opts",
|
||||
test_link_update_invalid_opts },
|
||||
{ "link update invalid prog",
|
||||
|
@@ -25,7 +25,7 @@ struct bpf_iter__netlink {
|
||||
struct netlink_sock *sk;
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
static inline struct inode *SOCK_INODE(struct socket *socket)
|
||||
static __attribute__((noinline)) struct inode *SOCK_INODE(struct socket *socket)
|
||||
{
|
||||
return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
|
||||
}
|
||||
|
@@ -55,3 +55,25 @@ int BPF_PROG(test6, __u64 a, void *b, short c, int d, void * e, __u64 f)
|
||||
e == (void *)20 && f == 21;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bpf_fentry_test_t {
|
||||
struct bpf_fentry_test_t *a;
|
||||
};
|
||||
|
||||
__u64 test7_result = 0;
|
||||
SEC("fentry/bpf_fentry_test7")
|
||||
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
if (arg == 0)
|
||||
test7_result = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u64 test8_result = 0;
|
||||
SEC("fentry/bpf_fentry_test8")
|
||||
int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
if (arg->a == 0)
|
||||
test8_result = 1;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -56,3 +56,25 @@ int BPF_PROG(test6, __u64 a, void *b, short c, int d, void *e, __u64 f, int ret)
|
||||
e == (void *)20 && f == 21 && ret == 111;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bpf_fentry_test_t {
|
||||
struct bpf_fentry_test *a;
|
||||
};
|
||||
|
||||
__u64 test7_result = 0;
|
||||
SEC("fexit/bpf_fentry_test7")
|
||||
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
if (arg == 0)
|
||||
test7_result = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u64 test8_result = 0;
|
||||
SEC("fexit/bpf_fentry_test8")
|
||||
int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
if (arg->a == 0)
|
||||
test8_result = 1;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -79,7 +79,7 @@ struct {
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 2);
|
||||
__uint(max_entries, 3);
|
||||
__type(key, int);
|
||||
__type(value, int);
|
||||
} sock_skb_opts SEC(".maps");
|
||||
@@ -94,6 +94,12 @@ struct {
|
||||
SEC("sk_skb1")
|
||||
int bpf_prog1(struct __sk_buff *skb)
|
||||
{
|
||||
int *f, two = 2;
|
||||
|
||||
f = bpf_map_lookup_elem(&sock_skb_opts, &two);
|
||||
if (f && *f) {
|
||||
return *f;
|
||||
}
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
|
@@ -27,7 +27,7 @@ int xdp_dummy_prog(struct xdp_md *ctx)
|
||||
/* valid program on DEVMAP entry via SEC name;
|
||||
* has access to egress and ingress ifindex
|
||||
*/
|
||||
SEC("xdp_devmap")
|
||||
SEC("xdp_devmap/map_prog")
|
||||
int xdp_dummy_dm(struct xdp_md *ctx)
|
||||
{
|
||||
char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
|
||||
|
@@ -789,19 +789,19 @@ static void test_sockmap(unsigned int tasks, void *data)
|
||||
}
|
||||
|
||||
err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER);
|
||||
if (err) {
|
||||
if (!err) {
|
||||
printf("Failed empty parser prog detach\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT);
|
||||
if (err) {
|
||||
if (!err) {
|
||||
printf("Failed empty verdict prog detach\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT);
|
||||
if (err) {
|
||||
if (!err) {
|
||||
printf("Failed empty msg verdict prog detach\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
@@ -1090,19 +1090,19 @@ static void test_sockmap(unsigned int tasks, void *data)
|
||||
assert(status == 0);
|
||||
}
|
||||
|
||||
err = bpf_prog_detach(map_fd_rx, __MAX_BPF_ATTACH_TYPE);
|
||||
err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE);
|
||||
if (!err) {
|
||||
printf("Detached an invalid prog type.\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
|
||||
err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
|
||||
if (err) {
|
||||
printf("Failed parser prog detach\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
|
||||
err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
|
||||
if (err) {
|
||||
printf("Failed parser prog detach\n");
|
||||
goto out_sockmap;
|
||||
|
@@ -318,6 +318,9 @@ class DebugfsDir:
|
||||
continue
|
||||
|
||||
if os.path.isfile(p):
|
||||
# We need to init trap_flow_action_cookie before read it
|
||||
if f == "trap_flow_action_cookie":
|
||||
cmd('echo deadbeef > %s/%s' % (path, f))
|
||||
_, out = cmd('cat %s/%s' % (path, f))
|
||||
dfs[f] = out.strip()
|
||||
elif os.path.isdir(p):
|
||||
|
@@ -85,6 +85,7 @@ int txmsg_ktls_skb_drop;
|
||||
int txmsg_ktls_skb_redir;
|
||||
int ktls;
|
||||
int peek_flag;
|
||||
int skb_use_parser;
|
||||
|
||||
static const struct option long_options[] = {
|
||||
{"help", no_argument, NULL, 'h' },
|
||||
@@ -174,6 +175,7 @@ static void test_reset(void)
|
||||
txmsg_apply = txmsg_cork = 0;
|
||||
txmsg_ingress = txmsg_redir_skb = 0;
|
||||
txmsg_ktls_skb = txmsg_ktls_skb_drop = txmsg_ktls_skb_redir = 0;
|
||||
skb_use_parser = 0;
|
||||
}
|
||||
|
||||
static int test_start_subtest(const struct _test *t, struct sockmap_options *o)
|
||||
@@ -1211,6 +1213,11 @@ run:
|
||||
}
|
||||
}
|
||||
|
||||
if (skb_use_parser) {
|
||||
i = 2;
|
||||
err = bpf_map_update_elem(map_fd[7], &i, &skb_use_parser, BPF_ANY);
|
||||
}
|
||||
|
||||
if (txmsg_drop)
|
||||
options->drop_expected = true;
|
||||
|
||||
@@ -1650,6 +1657,16 @@ static void test_txmsg_cork(int cgrp, struct sockmap_options *opt)
|
||||
test_send(opt, cgrp);
|
||||
}
|
||||
|
||||
static void test_txmsg_ingress_parser(int cgrp, struct sockmap_options *opt)
|
||||
{
|
||||
txmsg_pass = 1;
|
||||
skb_use_parser = 512;
|
||||
opt->iov_length = 256;
|
||||
opt->iov_count = 1;
|
||||
opt->rate = 2;
|
||||
test_exec(cgrp, opt);
|
||||
}
|
||||
|
||||
char *map_names[] = {
|
||||
"sock_map",
|
||||
"sock_map_txmsg",
|
||||
@@ -1748,6 +1765,7 @@ struct _test test[] = {
|
||||
{"txmsg test pull-data", test_txmsg_pull},
|
||||
{"txmsg test pop-data", test_txmsg_pop},
|
||||
{"txmsg test push/pop data", test_txmsg_push_pop},
|
||||
{"txmsg text ingress parser", test_txmsg_ingress_parser},
|
||||
};
|
||||
|
||||
static int check_whitelist(struct _test *t, struct sockmap_options *opt)
|
||||
|
@@ -112,6 +112,7 @@
|
||||
"perfevent for cgroup sockopt",
|
||||
.insns = { __PERF_EVENT_INSNS__ },
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT,
|
||||
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
|
||||
.fixup_map_event_output = { 4 },
|
||||
.result = ACCEPT,
|
||||
.retval = 1,
|
||||
|
@@ -63,6 +63,8 @@ ALL_TESTS="$ALL_TESTS 0008:150:1"
|
||||
ALL_TESTS="$ALL_TESTS 0009:150:1"
|
||||
ALL_TESTS="$ALL_TESTS 0010:1:1"
|
||||
ALL_TESTS="$ALL_TESTS 0011:1:1"
|
||||
ALL_TESTS="$ALL_TESTS 0012:1:1"
|
||||
ALL_TESTS="$ALL_TESTS 0013:1:1"
|
||||
|
||||
# Kselftest framework requirement - SKIP code is 4.
|
||||
ksft_skip=4
|
||||
@@ -470,6 +472,38 @@ kmod_test_0011()
|
||||
echo "$MODPROBE" > /proc/sys/kernel/modprobe
|
||||
}
|
||||
|
||||
kmod_check_visibility()
|
||||
{
|
||||
local name="$1"
|
||||
local cmd="$2"
|
||||
|
||||
modprobe $DEFAULT_KMOD_DRIVER
|
||||
|
||||
local priv=$(eval $cmd)
|
||||
local unpriv=$(capsh --drop=CAP_SYSLOG -- -c "$cmd")
|
||||
|
||||
if [ "$priv" = "$unpriv" ] || \
|
||||
[ "${priv:0:3}" = "0x0" ] || \
|
||||
[ "${unpriv:0:3}" != "0x0" ] ; then
|
||||
echo "${FUNCNAME[0]}: FAIL, $name visible to unpriv: '$priv' vs '$unpriv'" >&2
|
||||
exit 1
|
||||
else
|
||||
echo "${FUNCNAME[0]}: OK!"
|
||||
fi
|
||||
}
|
||||
|
||||
kmod_test_0012()
|
||||
{
|
||||
kmod_check_visibility /proc/modules \
|
||||
"grep '^${DEFAULT_KMOD_DRIVER}\b' /proc/modules | awk '{print \$NF}'"
|
||||
}
|
||||
|
||||
kmod_test_0013()
|
||||
{
|
||||
kmod_check_visibility '/sys/module/*/sections/*' \
|
||||
"cat /sys/module/${DEFAULT_KMOD_DRIVER}/sections/.*text | head -n1"
|
||||
}
|
||||
|
||||
list_tests()
|
||||
{
|
||||
echo "Test ID list:"
|
||||
@@ -489,6 +523,8 @@ list_tests()
|
||||
echo "0009 x $(get_test_count 0009) - multithreaded - push kmod_concurrent over max_modprobes for get_fs_type()"
|
||||
echo "0010 x $(get_test_count 0010) - test nonexistent modprobe path"
|
||||
echo "0011 x $(get_test_count 0011) - test completely disabling module autoloading"
|
||||
echo "0012 x $(get_test_count 0012) - test /proc/modules address visibility under CAP_SYSLOG"
|
||||
echo "0013 x $(get_test_count 0013) - test /sys/module/*/sections/* visibility under CAP_SYSLOG"
|
||||
}
|
||||
|
||||
usage()
|
||||
|
@@ -36,7 +36,7 @@ struct ksft_count {
|
||||
static struct ksft_count ksft_cnt;
|
||||
static unsigned int ksft_plan;
|
||||
|
||||
static inline int ksft_test_num(void)
|
||||
static inline unsigned int ksft_test_num(void)
|
||||
{
|
||||
return ksft_cnt.ksft_pass + ksft_cnt.ksft_fail +
|
||||
ksft_cnt.ksft_xfail + ksft_cnt.ksft_xpass +
|
||||
|
@@ -76,10 +76,8 @@ void set_default_state(struct kvm_nested_state *state)
|
||||
void set_default_vmx_state(struct kvm_nested_state *state, int size)
|
||||
{
|
||||
memset(state, 0, size);
|
||||
state->flags = KVM_STATE_NESTED_GUEST_MODE |
|
||||
KVM_STATE_NESTED_RUN_PENDING;
|
||||
if (have_evmcs)
|
||||
state->flags |= KVM_STATE_NESTED_EVMCS;
|
||||
state->flags = KVM_STATE_NESTED_EVMCS;
|
||||
state->format = 0;
|
||||
state->size = size;
|
||||
state->hdr.vmx.vmxon_pa = 0x1000;
|
||||
@@ -148,6 +146,11 @@ void test_vmx_nested_state(struct kvm_vm *vm)
|
||||
state->hdr.vmx.smm.flags = 1;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/* Invalid flags are rejected. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->hdr.vmx.flags = ~0;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->hdr.vmx.vmxon_pa = -1ull;
|
||||
@@ -185,22 +188,43 @@ void test_vmx_nested_state(struct kvm_vm *vm)
|
||||
state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/* Size must be large enough to fit kvm_nested_state and vmcs12. */
|
||||
/*
|
||||
* Size must be large enough to fit kvm_nested_state and vmcs12
|
||||
* if VMCS12 physical address is set
|
||||
*/
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->size = sizeof(*state);
|
||||
state->flags = 0;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->size = sizeof(*state);
|
||||
state->flags = 0;
|
||||
state->hdr.vmx.vmcs12_pa = -1;
|
||||
test_nested_state(vm, state);
|
||||
|
||||
/*
|
||||
* KVM_SET_NESTED_STATE succeeds with invalid VMCS
|
||||
* contents but L2 not running.
|
||||
*/
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->flags = 0;
|
||||
test_nested_state(vm, state);
|
||||
|
||||
/* Invalid flags are rejected, even if no VMCS loaded. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->size = sizeof(*state);
|
||||
state->flags = 0;
|
||||
state->hdr.vmx.vmcs12_pa = -1;
|
||||
state->hdr.vmx.flags = ~0;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/* vmxon_pa cannot be the same address as vmcs_pa. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->hdr.vmx.vmxon_pa = 0;
|
||||
state->hdr.vmx.vmcs12_pa = 0;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/* The revision id for vmcs12 must be VMCS12_REVISION. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
set_revision_id_for_vmcs12(state, 0);
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/*
|
||||
* Test that if we leave nesting the state reflects that when we get
|
||||
* it again.
|
||||
|
@@ -144,7 +144,7 @@ setup()
|
||||
|
||||
cleanup()
|
||||
{
|
||||
for n in h1 r1 h2 h3 h4
|
||||
for n in h0 r1 h1 h2 h3
|
||||
do
|
||||
ip netns del ${n} 2>/dev/null
|
||||
done
|
||||
|
@@ -747,6 +747,19 @@ ipv6_fcnal_runtime()
|
||||
run_cmd "$IP nexthop add id 86 via 2001:db8:91::2 dev veth1"
|
||||
run_cmd "$IP ro add 2001:db8:101::1/128 nhid 81"
|
||||
|
||||
# rpfilter and default route
|
||||
$IP nexthop flush >/dev/null 2>&1
|
||||
run_cmd "ip netns exec me ip6tables -t mangle -I PREROUTING 1 -m rpfilter --invert -j DROP"
|
||||
run_cmd "$IP nexthop add id 91 via 2001:db8:91::2 dev veth1"
|
||||
run_cmd "$IP nexthop add id 92 via 2001:db8:92::2 dev veth3"
|
||||
run_cmd "$IP nexthop add id 93 group 91/92"
|
||||
run_cmd "$IP -6 ro add default nhid 91"
|
||||
run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
|
||||
log_test $? 0 "Nexthop with default route and rpfilter"
|
||||
run_cmd "$IP -6 ro replace default nhid 93"
|
||||
run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
|
||||
log_test $? 0 "Nexthop with multipath default route and rpfilter"
|
||||
|
||||
# TO-DO:
|
||||
# existing route with old nexthop; append route with new nexthop
|
||||
# existing route with old nexthop; replace route with new
|
||||
|
@@ -252,8 +252,6 @@ check_highest_speed_is_chosen()
|
||||
fi
|
||||
|
||||
local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1))
|
||||
# Remove the first speed, h1 does not advertise this speed.
|
||||
unset speeds_arr[0]
|
||||
|
||||
max_speed=${speeds_arr[0]}
|
||||
for current in ${speeds_arr[@]}; do
|
||||
|
@@ -6,6 +6,8 @@
|
||||
set +x
|
||||
set -e
|
||||
|
||||
modprobe -q nf_defrag_ipv6
|
||||
|
||||
readonly NETNS="ns-$(mktemp -u XXXXXX)"
|
||||
|
||||
setup() {
|
||||
|
@@ -350,7 +350,8 @@ static int test_datapath(uint16_t typeflags, int port_off,
|
||||
int fds[2], fds_udp[2][2], ret;
|
||||
|
||||
fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n",
|
||||
typeflags, PORT_BASE, PORT_BASE + port_off);
|
||||
typeflags, (uint16_t)PORT_BASE,
|
||||
(uint16_t)(PORT_BASE + port_off));
|
||||
|
||||
fds[0] = sock_fanout_open(typeflags, 0);
|
||||
fds[1] = sock_fanout_open(typeflags, 0);
|
||||
|
@@ -329,8 +329,7 @@ int main(int argc, char **argv)
|
||||
bool all_tests = true;
|
||||
int arg_index = 0;
|
||||
int failures = 0;
|
||||
int s, t;
|
||||
char opt;
|
||||
int s, t, opt;
|
||||
|
||||
while ((opt = getopt_long(argc, argv, "", long_options,
|
||||
&arg_index)) != -1) {
|
||||
|
@@ -121,7 +121,7 @@ static bool do_recv_one(int fdr, struct timed_send *ts)
|
||||
if (rbuf[0] != ts->data)
|
||||
error(1, 0, "payload mismatch. expected %c", ts->data);
|
||||
|
||||
if (labs(tstop - texpect) > cfg_variance_us)
|
||||
if (llabs(tstop - texpect) > cfg_variance_us)
|
||||
error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
|
||||
|
||||
return false;
|
||||
|
@@ -344,7 +344,7 @@ int main(int argc, char *argv[])
|
||||
{
|
||||
struct sockaddr_storage listenaddr, addr;
|
||||
unsigned int max_pacing_rate = 0;
|
||||
size_t total = 0;
|
||||
uint64_t total = 0;
|
||||
char *host = NULL;
|
||||
int fd, c, on = 1;
|
||||
char *buffer;
|
||||
@@ -473,12 +473,12 @@ int main(int argc, char *argv[])
|
||||
zflg = 0;
|
||||
}
|
||||
while (total < FILE_SZ) {
|
||||
ssize_t wr = FILE_SZ - total;
|
||||
int64_t wr = FILE_SZ - total;
|
||||
|
||||
if (wr > chunk_size)
|
||||
wr = chunk_size;
|
||||
/* Note : we just want to fill the pipe with 0 bytes */
|
||||
wr = send(fd, buffer, wr, zflg ? MSG_ZEROCOPY : 0);
|
||||
wr = send(fd, buffer, (size_t)wr, zflg ? MSG_ZEROCOPY : 0);
|
||||
if (wr <= 0)
|
||||
break;
|
||||
total += wr;
|
||||
|
@@ -75,7 +75,7 @@ main() {
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ "$(ip netns identify)" == "root" ]]; then
|
||||
if [[ -z "$(ip netns identify)" ]]; then
|
||||
./in_netns.sh $0 $@
|
||||
else
|
||||
main $@
|
||||
|
@@ -698,13 +698,13 @@ restart_nx:
|
||||
|
||||
switch (cc) {
|
||||
|
||||
case ERR_NX_TRANSLATION:
|
||||
case ERR_NX_AT_FAULT:
|
||||
|
||||
/* We touched the pages ahead of time. In the most common case
|
||||
* we shouldn't be here. But may be some pages were paged out.
|
||||
* Kernel should have placed the faulting address to fsaddr.
|
||||
*/
|
||||
NXPRT(fprintf(stderr, "ERR_NX_TRANSLATION %p\n",
|
||||
NXPRT(fprintf(stderr, "ERR_NX_AT_FAULT %p\n",
|
||||
(void *)cmdp->crb.csb.fsaddr));
|
||||
|
||||
if (pgfault_retries == NX_MAX_FAULTS) {
|
||||
|
@@ -306,13 +306,13 @@ int compress_file(int argc, char **argv, void *handle)
|
||||
lzcounts, cmdp, handle);
|
||||
|
||||
if (cc != ERR_NX_OK && cc != ERR_NX_TPBC_GT_SPBC &&
|
||||
cc != ERR_NX_TRANSLATION) {
|
||||
cc != ERR_NX_AT_FAULT) {
|
||||
fprintf(stderr, "nx error: cc= %d\n", cc);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
/* Page faults are handled by the user code */
|
||||
if (cc == ERR_NX_TRANSLATION) {
|
||||
if (cc == ERR_NX_AT_FAULT) {
|
||||
NXPRT(fprintf(stderr, "page fault: cc= %d, ", cc));
|
||||
NXPRT(fprintf(stderr, "try= %d, fsa= %08llx\n",
|
||||
fault_tries,
|
||||
|
@@ -6,5 +6,5 @@ ksft_skip=4
|
||||
|
||||
[ -e /dev/tpm0 ] || exit $ksft_skip
|
||||
|
||||
python -m unittest -v tpm2_tests.SmokeTest
|
||||
python -m unittest -v tpm2_tests.AsyncTest
|
||||
python3 -m unittest -v tpm2_tests.SmokeTest
|
||||
python3 -m unittest -v tpm2_tests.AsyncTest
|
||||
|
@@ -6,4 +6,4 @@ ksft_skip=4
|
||||
|
||||
[ -e /dev/tpmrm0 ] || exit $ksft_skip
|
||||
|
||||
python -m unittest -v tpm2_tests.SpaceTest
|
||||
python3 -m unittest -v tpm2_tests.SpaceTest
|
||||
|
@@ -247,14 +247,14 @@ class ProtocolError(Exception):
|
||||
class AuthCommand(object):
|
||||
"""TPMS_AUTH_COMMAND"""
|
||||
|
||||
def __init__(self, session_handle=TPM2_RS_PW, nonce='', session_attributes=0,
|
||||
hmac=''):
|
||||
def __init__(self, session_handle=TPM2_RS_PW, nonce=bytes(),
|
||||
session_attributes=0, hmac=bytes()):
|
||||
self.session_handle = session_handle
|
||||
self.nonce = nonce
|
||||
self.session_attributes = session_attributes
|
||||
self.hmac = hmac
|
||||
|
||||
def __str__(self):
|
||||
def __bytes__(self):
|
||||
fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac))
|
||||
return struct.pack(fmt, self.session_handle, len(self.nonce),
|
||||
self.nonce, self.session_attributes, len(self.hmac),
|
||||
@@ -268,11 +268,11 @@ class AuthCommand(object):
|
||||
class SensitiveCreate(object):
|
||||
"""TPMS_SENSITIVE_CREATE"""
|
||||
|
||||
def __init__(self, user_auth='', data=''):
|
||||
def __init__(self, user_auth=bytes(), data=bytes()):
|
||||
self.user_auth = user_auth
|
||||
self.data = data
|
||||
|
||||
def __str__(self):
|
||||
def __bytes__(self):
|
||||
fmt = '>H%us H%us' % (len(self.user_auth), len(self.data))
|
||||
return struct.pack(fmt, len(self.user_auth), self.user_auth,
|
||||
len(self.data), self.data)
|
||||
@@ -296,8 +296,9 @@ class Public(object):
|
||||
return '>HHIH%us%usH%us' % \
|
||||
(len(self.auth_policy), len(self.parameters), len(self.unique))
|
||||
|
||||
def __init__(self, object_type, name_alg, object_attributes, auth_policy='',
|
||||
parameters='', unique=''):
|
||||
def __init__(self, object_type, name_alg, object_attributes,
|
||||
auth_policy=bytes(), parameters=bytes(),
|
||||
unique=bytes()):
|
||||
self.object_type = object_type
|
||||
self.name_alg = name_alg
|
||||
self.object_attributes = object_attributes
|
||||
@@ -305,7 +306,7 @@ class Public(object):
|
||||
self.parameters = parameters
|
||||
self.unique = unique
|
||||
|
||||
def __str__(self):
|
||||
def __bytes__(self):
|
||||
return struct.pack(self.__fmt(),
|
||||
self.object_type,
|
||||
self.name_alg,
|
||||
@@ -343,7 +344,7 @@ def get_algorithm(name):
|
||||
|
||||
def hex_dump(d):
|
||||
d = [format(ord(x), '02x') for x in d]
|
||||
d = [d[i: i + 16] for i in xrange(0, len(d), 16)]
|
||||
d = [d[i: i + 16] for i in range(0, len(d), 16)]
|
||||
d = [' '.join(x) for x in d]
|
||||
d = os.linesep.join(d)
|
||||
|
||||
@@ -401,7 +402,7 @@ class Client:
|
||||
pcrsel_len = max((i >> 3) + 1, 3)
|
||||
pcrsel = [0] * pcrsel_len
|
||||
pcrsel[i >> 3] = 1 << (i & 7)
|
||||
pcrsel = ''.join(map(chr, pcrsel))
|
||||
pcrsel = ''.join(map(chr, pcrsel)).encode()
|
||||
|
||||
fmt = '>HII IHB%us' % (pcrsel_len)
|
||||
cmd = struct.pack(fmt,
|
||||
@@ -443,7 +444,7 @@ class Client:
|
||||
TPM2_CC_PCR_EXTEND,
|
||||
i,
|
||||
len(auth_cmd),
|
||||
str(auth_cmd),
|
||||
bytes(auth_cmd),
|
||||
1, bank_alg, dig)
|
||||
|
||||
self.send_cmd(cmd)
|
||||
@@ -457,7 +458,7 @@ class Client:
|
||||
TPM2_RH_NULL,
|
||||
TPM2_RH_NULL,
|
||||
16,
|
||||
'\0' * 16,
|
||||
('\0' * 16).encode(),
|
||||
0,
|
||||
session_type,
|
||||
TPM2_ALG_NULL,
|
||||
@@ -472,7 +473,7 @@ class Client:
|
||||
|
||||
for i in pcrs:
|
||||
pcr = self.read_pcr(i, bank_alg)
|
||||
if pcr == None:
|
||||
if pcr is None:
|
||||
return None
|
||||
x += pcr
|
||||
|
||||
@@ -489,7 +490,7 @@ class Client:
|
||||
pcrsel = [0] * pcrsel_len
|
||||
for i in pcrs:
|
||||
pcrsel[i >> 3] |= 1 << (i & 7)
|
||||
pcrsel = ''.join(map(chr, pcrsel))
|
||||
pcrsel = ''.join(map(chr, pcrsel)).encode()
|
||||
|
||||
fmt = '>HII IH%usIHB3s' % ds
|
||||
cmd = struct.pack(fmt,
|
||||
@@ -497,7 +498,8 @@ class Client:
|
||||
struct.calcsize(fmt),
|
||||
TPM2_CC_POLICY_PCR,
|
||||
handle,
|
||||
len(dig), str(dig),
|
||||
len(dig),
|
||||
bytes(dig),
|
||||
1,
|
||||
bank_alg,
|
||||
pcrsel_len, pcrsel)
|
||||
@@ -534,7 +536,7 @@ class Client:
|
||||
|
||||
self.send_cmd(cmd)
|
||||
|
||||
def create_root_key(self, auth_value = ''):
|
||||
def create_root_key(self, auth_value = bytes()):
|
||||
attributes = \
|
||||
Public.FIXED_TPM | \
|
||||
Public.FIXED_PARENT | \
|
||||
@@ -570,11 +572,11 @@ class Client:
|
||||
TPM2_CC_CREATE_PRIMARY,
|
||||
TPM2_RH_OWNER,
|
||||
len(auth_cmd),
|
||||
str(auth_cmd),
|
||||
bytes(auth_cmd),
|
||||
len(sensitive),
|
||||
str(sensitive),
|
||||
bytes(sensitive),
|
||||
len(public),
|
||||
str(public),
|
||||
bytes(public),
|
||||
0, 0)
|
||||
|
||||
return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
|
||||
@@ -587,7 +589,7 @@ class Client:
|
||||
attributes = 0
|
||||
if not policy_dig:
|
||||
attributes |= Public.USER_WITH_AUTH
|
||||
policy_dig = ''
|
||||
policy_dig = bytes()
|
||||
|
||||
auth_cmd = AuthCommand()
|
||||
sensitive = SensitiveCreate(user_auth=auth_value, data=data)
|
||||
@@ -608,11 +610,11 @@ class Client:
|
||||
TPM2_CC_CREATE,
|
||||
parent_key,
|
||||
len(auth_cmd),
|
||||
str(auth_cmd),
|
||||
bytes(auth_cmd),
|
||||
len(sensitive),
|
||||
str(sensitive),
|
||||
bytes(sensitive),
|
||||
len(public),
|
||||
str(public),
|
||||
bytes(public),
|
||||
0, 0)
|
||||
|
||||
rsp = self.send_cmd(cmd)
|
||||
@@ -635,7 +637,7 @@ class Client:
|
||||
TPM2_CC_LOAD,
|
||||
parent_key,
|
||||
len(auth_cmd),
|
||||
str(auth_cmd),
|
||||
bytes(auth_cmd),
|
||||
blob)
|
||||
|
||||
data_handle = struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
|
||||
@@ -653,7 +655,7 @@ class Client:
|
||||
TPM2_CC_UNSEAL,
|
||||
data_handle,
|
||||
len(auth_cmd),
|
||||
str(auth_cmd))
|
||||
bytes(auth_cmd))
|
||||
|
||||
try:
|
||||
rsp = self.send_cmd(cmd)
|
||||
@@ -675,7 +677,7 @@ class Client:
|
||||
TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET,
|
||||
TPM2_RH_LOCKOUT,
|
||||
len(auth_cmd),
|
||||
str(auth_cmd))
|
||||
bytes(auth_cmd))
|
||||
|
||||
self.send_cmd(cmd)
|
||||
|
||||
@@ -693,7 +695,7 @@ class Client:
|
||||
more_data, cap, cnt = struct.unpack('>BII', rsp[:9])
|
||||
rsp = rsp[9:]
|
||||
|
||||
for i in xrange(0, cnt):
|
||||
for i in range(0, cnt):
|
||||
handle = struct.unpack('>I', rsp[:4])[0]
|
||||
handles.append(handle)
|
||||
rsp = rsp[4:]
|
||||
|
@@ -20,8 +20,8 @@ class SmokeTest(unittest.TestCase):
|
||||
self.client.close()
|
||||
|
||||
def test_seal_with_auth(self):
|
||||
data = 'X' * 64
|
||||
auth = 'A' * 15
|
||||
data = ('X' * 64).encode()
|
||||
auth = ('A' * 15).encode()
|
||||
|
||||
blob = self.client.seal(self.root_key, data, auth, None)
|
||||
result = self.client.unseal(self.root_key, blob, auth, None)
|
||||
@@ -30,8 +30,8 @@ class SmokeTest(unittest.TestCase):
|
||||
def test_seal_with_policy(self):
|
||||
handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
|
||||
|
||||
data = 'X' * 64
|
||||
auth = 'A' * 15
|
||||
data = ('X' * 64).encode()
|
||||
auth = ('A' * 15).encode()
|
||||
pcrs = [16]
|
||||
|
||||
try:
|
||||
@@ -58,14 +58,15 @@ class SmokeTest(unittest.TestCase):
|
||||
self.assertEqual(data, result)
|
||||
|
||||
def test_unseal_with_wrong_auth(self):
|
||||
data = 'X' * 64
|
||||
auth = 'A' * 20
|
||||
data = ('X' * 64).encode()
|
||||
auth = ('A' * 20).encode()
|
||||
rc = 0
|
||||
|
||||
blob = self.client.seal(self.root_key, data, auth, None)
|
||||
try:
|
||||
result = self.client.unseal(self.root_key, blob, auth[:-1] + 'B', None)
|
||||
except ProtocolError, e:
|
||||
result = self.client.unseal(self.root_key, blob,
|
||||
auth[:-1] + 'B'.encode(), None)
|
||||
except ProtocolError as e:
|
||||
rc = e.rc
|
||||
|
||||
self.assertEqual(rc, tpm2.TPM2_RC_AUTH_FAIL)
|
||||
@@ -73,8 +74,8 @@ class SmokeTest(unittest.TestCase):
|
||||
def test_unseal_with_wrong_policy(self):
|
||||
handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
|
||||
|
||||
data = 'X' * 64
|
||||
auth = 'A' * 17
|
||||
data = ('X' * 64).encode()
|
||||
auth = ('A' * 17).encode()
|
||||
pcrs = [16]
|
||||
|
||||
try:
|
||||
@@ -91,7 +92,7 @@ class SmokeTest(unittest.TestCase):
|
||||
# This should succeed.
|
||||
|
||||
ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
|
||||
self.client.extend_pcr(1, 'X' * ds)
|
||||
self.client.extend_pcr(1, ('X' * ds).encode())
|
||||
|
||||
handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
|
||||
|
||||
@@ -108,7 +109,7 @@ class SmokeTest(unittest.TestCase):
|
||||
|
||||
# Then, extend a PCR that is part of the policy and try to unseal.
|
||||
# This should fail.
|
||||
self.client.extend_pcr(16, 'X' * ds)
|
||||
self.client.extend_pcr(16, ('X' * ds).encode())
|
||||
|
||||
handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
|
||||
|
||||
@@ -119,7 +120,7 @@ class SmokeTest(unittest.TestCase):
|
||||
self.client.policy_password(handle)
|
||||
|
||||
result = self.client.unseal(self.root_key, blob, auth, handle)
|
||||
except ProtocolError, e:
|
||||
except ProtocolError as e:
|
||||
rc = e.rc
|
||||
self.client.flush_context(handle)
|
||||
except:
|
||||
@@ -130,13 +131,13 @@ class SmokeTest(unittest.TestCase):
|
||||
|
||||
def test_seal_with_too_long_auth(self):
|
||||
ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
|
||||
data = 'X' * 64
|
||||
auth = 'A' * (ds + 1)
|
||||
data = ('X' * 64).encode()
|
||||
auth = ('A' * (ds + 1)).encode()
|
||||
|
||||
rc = 0
|
||||
try:
|
||||
blob = self.client.seal(self.root_key, data, auth, None)
|
||||
except ProtocolError, e:
|
||||
except ProtocolError as e:
|
||||
rc = e.rc
|
||||
|
||||
self.assertEqual(rc, tpm2.TPM2_RC_SIZE)
|
||||
@@ -152,7 +153,7 @@ class SmokeTest(unittest.TestCase):
|
||||
0xDEADBEEF)
|
||||
|
||||
self.client.send_cmd(cmd)
|
||||
except IOError, e:
|
||||
except IOError as e:
|
||||
rejected = True
|
||||
except:
|
||||
pass
|
||||
@@ -212,7 +213,7 @@ class SmokeTest(unittest.TestCase):
|
||||
self.client.tpm.write(cmd)
|
||||
rsp = self.client.tpm.read()
|
||||
|
||||
except IOError, e:
|
||||
except IOError as e:
|
||||
# read the response
|
||||
rsp = self.client.tpm.read()
|
||||
rejected = True
|
||||
@@ -283,7 +284,7 @@ class SpaceTest(unittest.TestCase):
|
||||
rc = 0
|
||||
try:
|
||||
space1.send_cmd(cmd)
|
||||
except ProtocolError, e:
|
||||
except ProtocolError as e:
|
||||
rc = e.rc
|
||||
|
||||
self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE |
|
||||
|
Reference in New Issue
Block a user