Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:
 "Another batch of fixes:

  1) Remove nft_compat counter flush optimization, it generates warnings
     from the refcount infrastructure. From Florian Westphal.

  2) Fix BPF to search for build id more robustly, from Jiri Olsa.

  3) Handle bogus getopt lengths in ebtables, from Florian Westphal.

  4) Infoleak and other fixes to j1939 CAN driver, from Eric Dumazet and
     Oleksij Rempel.

  5) Reset iter properly on mptcp sendmsg() error, from Florian
     Westphal.

  6) Show a saner speed in bonding broadcast mode, from Jarod Wilson.

  7) Various kerneldoc fixes in bonding and elsewhere, from Lee Jones.

  8) Fix double unregister in bonding during namespace tear down, from
     Cong Wang.

  9) Disable RP filter during icmp_redirect selftest, from David Ahern"

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (75 commits)
  otx2_common: Use devm_kcalloc() in otx2_config_npa()
  net: qrtr: fix usage of idr in port assignment to socket
  selftests: disable rp_filter for icmp_redirect.sh
  Revert "net: xdp: pull ethernet header off packet after computing skb->protocol"
  phylink: <linux/phylink.h>: fix function prototype kernel-doc warning
  mptcp: sendmsg: reset iter on error redux
  net: devlink: Remove overzealous WARN_ON with snapshots
  tipc: not enable tipc when ipv6 works as a module
  tipc: fix uninit skb->data in tipc_nl_compat_dumpit()
  net: Fix potential wrong skb->protocol in skb_vlan_untag()
  net: xdp: pull ethernet header off packet after computing skb->protocol
  ipvlan: fix device features
  bonding: fix a potential double-unregister
  can: j1939: add rxtimer for multipacket broadcast session
  can: j1939: abort multipacket broadcast session when timeout occurs
  can: j1939: cancel rxtimer on multipacket broadcast session complete
  can: j1939: fix support for multipacket broadcast message
  net: fddi: skfp: cfm: Remove seemingly unused variable 'ID_sccs'
  net: fddi: skfp: cfm: Remove set but unused variable 'oldstate'
  net: fddi: skfp: smt: Remove seemingly unused variable 'ID_sccs'
  ...
This commit is contained in:
Linus Torvalds
2020-08-17 17:09:50 -07:00
commit 4cf7562190
76 muutettua tiedostoa jossa 770 lisäystä ja 351 poistoa

Näytä tiedosto

@@ -67,7 +67,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
if (!info->btf_id || !info->nr_func_info ||
btf__get_from_id(info->btf_id, &prog_btf))
goto print;
finfo = (struct bpf_func_info *)info->func_info;
finfo = u64_to_ptr(info->func_info);
func_type = btf__type_by_id(prog_btf, finfo->type_id);
if (!func_type || !btf_is_func(func_type))
goto print;

Näytä tiedosto

@@ -143,6 +143,20 @@ static int codegen_datasec_def(struct bpf_object *obj,
var_name, align);
return -EINVAL;
}
/* Assume 32-bit architectures when generating data section
* struct memory layout. Given bpftool can't know which target
* host architecture it's emitting skeleton for, we need to be
* conservative and assume 32-bit one to ensure enough padding
* bytes are generated for pointer and long types. This will
* still work correctly for 64-bit architectures, because in
* the worst case we'll generate unnecessary padding field,
* which on 64-bit architectures is not strictly necessary and
* would be handled by natural 8-byte alignment. But it still
* will be a correct memory layout, based on recorded offsets
* in BTF.
*/
if (align > 4)
align = 4;
align_off = (off + align - 1) / align * align;
if (align_off != need_off) {
@@ -397,7 +411,7 @@ static int do_skeleton(int argc, char **argv)
{ \n\
struct %1$s *obj; \n\
\n\
obj = (typeof(obj))calloc(1, sizeof(*obj)); \n\
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
if (!obj) \n\
return NULL; \n\
if (%1$s__create_skeleton(obj)) \n\
@@ -461,7 +475,7 @@ static int do_skeleton(int argc, char **argv)
{ \n\
struct bpf_object_skeleton *s; \n\
\n\
s = (typeof(s))calloc(1, sizeof(*s)); \n\
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
if (!s) \n\
return -1; \n\
obj->skeleton = s; \n\
@@ -479,7 +493,7 @@ static int do_skeleton(int argc, char **argv)
/* maps */ \n\
s->map_cnt = %zu; \n\
s->map_skel_sz = sizeof(*s->maps); \n\
s->maps = (typeof(s->maps))calloc(s->map_cnt, s->map_skel_sz);\n\
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
if (!s->maps) \n\
goto err; \n\
",
@@ -515,7 +529,7 @@ static int do_skeleton(int argc, char **argv)
/* programs */ \n\
s->prog_cnt = %zu; \n\
s->prog_skel_sz = sizeof(*s->progs); \n\
s->progs = (typeof(s->progs))calloc(s->prog_cnt, s->prog_skel_sz);\n\
s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
if (!s->progs) \n\
goto err; \n\
",

Näytä tiedosto

@@ -106,7 +106,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
switch (info->type) {
case BPF_LINK_TYPE_RAW_TRACEPOINT:
jsonw_string_field(json_wtr, "tp_name",
(const char *)info->raw_tracepoint.tp_name);
u64_to_ptr(info->raw_tracepoint.tp_name));
break;
case BPF_LINK_TYPE_TRACING:
err = get_prog_info(info->prog_id, &prog_info);
@@ -185,7 +185,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
switch (info->type) {
case BPF_LINK_TYPE_RAW_TRACEPOINT:
printf("\n\ttp '%s' ",
(const char *)info->raw_tracepoint.tp_name);
(const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
break;
case BPF_LINK_TYPE_TRACING:
err = get_prog_info(info->prog_id, &prog_info);

Näytä tiedosto

@@ -21,7 +21,15 @@
/* Make sure we do not use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64)(unsigned long)ptr;
}
static inline void *u64_to_ptr(__u64 ptr)
{
return (void *)(unsigned long)ptr;
}
#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })

Näytä tiedosto

@@ -428,14 +428,14 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
p_info("no instructions returned");
return -1;
}
buf = (unsigned char *)(info->jited_prog_insns);
buf = u64_to_ptr(info->jited_prog_insns);
member_len = info->jited_prog_len;
} else { /* DUMP_XLATED */
if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
return -1;
}
buf = (unsigned char *)info->xlated_prog_insns;
buf = u64_to_ptr(info->xlated_prog_insns);
member_len = info->xlated_prog_len;
}
@@ -444,7 +444,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
return -1;
}
func_info = (void *)info->func_info;
func_info = u64_to_ptr(info->func_info);
if (info->nr_line_info) {
prog_linfo = bpf_prog_linfo__new(info);
@@ -462,7 +462,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
n = write(fd, buf, member_len);
close(fd);
if (n != member_len) {
if (n != (ssize_t)member_len) {
p_err("error writing output file: %s",
n < 0 ? strerror(errno) : "short write");
return -1;
@@ -492,13 +492,13 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
__u32 i;
if (info->nr_jited_ksyms) {
kernel_syms_load(&dd);
ksyms = (__u64 *) info->jited_ksyms;
ksyms = u64_to_ptr(info->jited_ksyms);
}
if (json_output)
jsonw_start_array(json_wtr);
lens = (__u32 *) info->jited_func_lens;
lens = u64_to_ptr(info->jited_func_lens);
for (i = 0; i < info->nr_jited_func_lens; i++) {
if (ksyms) {
sym = kernel_syms_search(&dd, ksyms[i]);
@@ -559,7 +559,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
} else {
kernel_syms_load(&dd);
dd.nr_jited_ksyms = info->nr_jited_ksyms;
dd.jited_ksyms = (__u64 *) info->jited_ksyms;
dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
dd.btf = btf;
dd.func_info = func_info;
dd.finfo_rec_size = info->func_info_rec_size;
@@ -1681,7 +1681,7 @@ static char *profile_target_name(int tgt_fd)
goto out;
}
func_info = (struct bpf_func_info *)(info_linear->info.func_info);
func_info = u64_to_ptr(info_linear->info.func_info);
t = btf__type_by_id(btf, func_info[0].type_id);
if (!t) {
p_err("btf %d doesn't have type %d",

Näytä tiedosto

@@ -40,7 +40,7 @@
* Helper macro to manipulate data structures
*/
#ifndef offsetof
#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
#endif
#ifndef container_of
#define container_of(ptr, type, member) \

Näytä tiedosto

@@ -41,6 +41,7 @@ struct btf {
__u32 types_size;
__u32 data_size;
int fd;
int ptr_sz;
};
static inline __u64 ptr_to_u64(const void *ptr)
@@ -221,6 +222,70 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
return btf->types[type_id];
}
static int determine_ptr_size(const struct btf *btf)
{
const struct btf_type *t;
const char *name;
int i;
for (i = 1; i <= btf->nr_types; i++) {
t = btf__type_by_id(btf, i);
if (!btf_is_int(t))
continue;
name = btf__name_by_offset(btf, t->name_off);
if (!name)
continue;
if (strcmp(name, "long int") == 0 ||
strcmp(name, "long unsigned int") == 0) {
if (t->size != 4 && t->size != 8)
continue;
return t->size;
}
}
return -1;
}
static size_t btf_ptr_sz(const struct btf *btf)
{
if (!btf->ptr_sz)
((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
}
/* Return pointer size this BTF instance assumes. The size is heuristically
* determined by looking for 'long' or 'unsigned long' integer type and
* recording its size in bytes. If BTF type information doesn't have any such
* type, this function returns 0. In the latter case, native architecture's
* pointer size is assumed, so will be either 4 or 8, depending on
* architecture that libbpf was compiled for. It's possible to override
* guessed value by using btf__set_pointer_size() API.
*/
size_t btf__pointer_size(const struct btf *btf)
{
if (!btf->ptr_sz)
((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
if (btf->ptr_sz < 0)
/* not enough BTF type info to guess */
return 0;
return btf->ptr_sz;
}
/* Override or set pointer size in bytes. Only values of 4 and 8 are
* supported.
*/
int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
{
if (ptr_sz != 4 && ptr_sz != 8)
return -EINVAL;
btf->ptr_sz = ptr_sz;
return 0;
}
static bool btf_type_is_void(const struct btf_type *t)
{
return t == &btf_void || btf_is_fwd(t);
@@ -253,7 +318,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
size = t->size;
goto done;
case BTF_KIND_PTR:
size = sizeof(void *);
size = btf_ptr_sz(btf);
goto done;
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
@@ -293,9 +358,9 @@ int btf__align_of(const struct btf *btf, __u32 id)
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
return min(sizeof(void *), (size_t)t->size);
return min(btf_ptr_sz(btf), (size_t)t->size);
case BTF_KIND_PTR:
return sizeof(void *);
return btf_ptr_sz(btf);
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
@@ -533,6 +598,18 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
if (IS_ERR(btf))
goto done;
switch (gelf_getclass(elf)) {
case ELFCLASS32:
btf__set_pointer_size(btf, 4);
break;
case ELFCLASS64:
btf__set_pointer_size(btf, 8);
break;
default:
pr_warn("failed to get ELF class (bitness) for %s\n", path);
break;
}
if (btf_ext && btf_ext_data) {
*btf_ext = btf_ext__new(btf_ext_data->d_buf,
btf_ext_data->d_size);

Näytä tiedosto

@@ -76,6 +76,8 @@ LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id);
LIBBPF_API size_t btf__pointer_size(const struct btf *btf);
LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz);
LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);

Näytä tiedosto

@@ -13,6 +13,7 @@
#include <errno.h>
#include <linux/err.h>
#include <linux/btf.h>
#include <linux/kernel.h>
#include "btf.h"
#include "hashmap.h"
#include "libbpf.h"
@@ -60,6 +61,7 @@ struct btf_dump {
const struct btf_ext *btf_ext;
btf_dump_printf_fn_t printf_fn;
struct btf_dump_opts opts;
int ptr_sz;
bool strip_mods;
/* per-type auxiliary state */
@@ -138,6 +140,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
d->btf_ext = btf_ext;
d->printf_fn = printf_fn;
d->opts.ctx = opts ? opts->ctx : NULL;
d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
if (IS_ERR(d->type_names)) {
@@ -549,6 +552,9 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
}
}
static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
const struct btf_type *t);
static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t);
static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
@@ -671,6 +677,9 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
switch (kind) {
case BTF_KIND_INT:
/* Emit type alias definitions if necessary */
btf_dump_emit_missing_aliases(d, id, t);
tstate->emit_state = EMITTED;
break;
case BTF_KIND_ENUM:
@@ -797,7 +806,7 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
int align, int lvl)
{
int off_diff = m_off - cur_off;
int ptr_bits = sizeof(void *) * 8;
int ptr_bits = d->ptr_sz * 8;
if (off_diff <= 0)
/* no gap */
@@ -870,7 +879,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
btf_dump_printf(d, ": %d", m_sz);
off = m_off + m_sz;
} else {
m_sz = max(0, btf__resolve_size(d->btf, m->type));
m_sz = max(0LL, btf__resolve_size(d->btf, m->type));
off = m_off + m_sz * 8;
}
btf_dump_printf(d, ";");
@@ -890,6 +899,32 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
btf_dump_printf(d, " __attribute__((packed))");
}
static const char *missing_base_types[][2] = {
/*
* GCC emits typedefs to its internal __PolyX_t types when compiling Arm
* SIMD intrinsics. Alias them to standard base types.
*/
{ "__Poly8_t", "unsigned char" },
{ "__Poly16_t", "unsigned short" },
{ "__Poly64_t", "unsigned long long" },
{ "__Poly128_t", "unsigned __int128" },
};
static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
const struct btf_type *t)
{
const char *name = btf_dump_type_name(d, id);
int i;
for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) {
if (strcmp(name, missing_base_types[i][0]) == 0) {
btf_dump_printf(d, "typedef %s %s;\n\n",
missing_base_types[i][1], name);
break;
}
}
}
static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t)
{

Näytä tiedosto

@@ -2434,6 +2434,8 @@ static int bpf_object__init_btf(struct bpf_object *obj,
BTF_ELF_SEC, err);
goto out;
}
/* enforce 8-byte pointers for BPF-targeted BTFs */
btf__set_pointer_size(obj->btf, 8);
err = 0;
}
if (btf_ext_data) {
@@ -2542,6 +2544,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
if (IS_ERR(kern_btf))
return PTR_ERR(kern_btf);
/* enforce 8-byte pointers for BPF-targeted BTFs */
btf__set_pointer_size(obj->btf, 8);
bpf_object__sanitize_btf(obj, kern_btf);
}
@@ -3478,10 +3482,11 @@ bpf_object__probe_global_data(struct bpf_object *obj)
map = bpf_create_map_xattr(&map_attr);
if (map < 0) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
ret = -errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
__func__, cp, errno);
return -errno;
__func__, cp, -ret);
return ret;
}
insns[0].imm = map;
@@ -5194,7 +5199,8 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
static int bpf_object__collect_map_relos(struct bpf_object *obj,
GElf_Shdr *shdr, Elf_Data *data)
{
int i, j, nrels, new_sz, ptr_sz = sizeof(void *);
const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
int i, j, nrels, new_sz;
const struct btf_var_secinfo *vi = NULL;
const struct btf_type *sec, *var, *def;
const struct btf_member *member;
@@ -5243,7 +5249,7 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
vi = btf_var_secinfos(sec) + map->btf_var_idx;
if (vi->offset <= rel.r_offset &&
rel.r_offset + sizeof(void *) <= vi->offset + vi->size)
rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
break;
}
if (j == obj->nr_maps) {
@@ -5279,17 +5285,20 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
return -EINVAL;
moff = rel.r_offset - vi->offset - moff;
if (moff % ptr_sz)
/* here we use BPF pointer size, which is always 64 bit, as we
* are parsing ELF that was built for BPF target
*/
if (moff % bpf_ptr_sz)
return -EINVAL;
moff /= ptr_sz;
moff /= bpf_ptr_sz;
if (moff >= map->init_slots_sz) {
new_sz = moff + 1;
tmp = realloc(map->init_slots, new_sz * ptr_sz);
tmp = realloc(map->init_slots, new_sz * host_ptr_sz);
if (!tmp)
return -ENOMEM;
map->init_slots = tmp;
memset(map->init_slots + map->init_slots_sz, 0,
(new_sz - map->init_slots_sz) * ptr_sz);
(new_sz - map->init_slots_sz) * host_ptr_sz);
map->init_slots_sz = new_sz;
}
map->init_slots[moff] = targ_map;
@@ -6012,9 +6021,10 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
}
if (bpf_obj_pin(prog->instances.fds[instance], path)) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warn("failed to pin program: %s\n", cp);
return -errno;
return err;
}
pr_debug("pinned program '%s'\n", path);

Näytä tiedosto

@@ -295,5 +295,7 @@ LIBBPF_0.1.0 {
bpf_program__set_sk_lookup;
btf__parse;
btf__parse_raw;
btf__pointer_size;
btf__set_fd;
btf__set_pointer_size;
} LIBBPF_0.0.9;

Näytä tiedosto

@@ -159,15 +159,15 @@ void test_bpf_obj_id(void)
/* Check getting link info */
info_len = sizeof(struct bpf_link_info) * 2;
bzero(&link_infos[i], info_len);
link_infos[i].raw_tracepoint.tp_name = (__u64)&tp_name;
link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name);
link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]),
&link_infos[i], &info_len);
if (CHECK(err ||
link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
link_infos[i].prog_id != prog_infos[i].id ||
link_infos[i].raw_tracepoint.tp_name != (__u64)&tp_name ||
strcmp((char *)link_infos[i].raw_tracepoint.tp_name,
link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) ||
strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
"sys_enter") ||
info_len != sizeof(struct bpf_link_info),
"get-link-info(fd)",
@@ -178,7 +178,7 @@ void test_bpf_obj_id(void)
link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT,
link_infos[i].id,
link_infos[i].prog_id, prog_infos[i].id,
(char *)link_infos[i].raw_tracepoint.tp_name,
(const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
"sys_enter"))
goto done;

Näytä tiedosto

@@ -12,15 +12,16 @@ void btf_dump_printf(void *ctx, const char *fmt, va_list args)
static struct btf_dump_test_case {
const char *name;
const char *file;
bool known_ptr_sz;
struct btf_dump_opts opts;
} btf_dump_test_cases[] = {
{"btf_dump: syntax", "btf_dump_test_case_syntax", {}},
{"btf_dump: ordering", "btf_dump_test_case_ordering", {}},
{"btf_dump: padding", "btf_dump_test_case_padding", {}},
{"btf_dump: packing", "btf_dump_test_case_packing", {}},
{"btf_dump: bitfields", "btf_dump_test_case_bitfields", {}},
{"btf_dump: multidim", "btf_dump_test_case_multidim", {}},
{"btf_dump: namespacing", "btf_dump_test_case_namespacing", {}},
{"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}},
{"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}},
{"btf_dump: padding", "btf_dump_test_case_padding", true, {}},
{"btf_dump: packing", "btf_dump_test_case_packing", true, {}},
{"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}},
{"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}},
{"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}},
};
static int btf_dump_all_types(const struct btf *btf,
@@ -62,6 +63,18 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
goto done;
}
/* tests with t->known_ptr_sz have no "long" or "unsigned long" type,
* so it's impossible to determine correct pointer size; but if they
* do, it should be 8 regardless of host architecture, becaues BPF
* target is always 64-bit
*/
if (!t->known_ptr_sz) {
btf__set_pointer_size(btf, 8);
} else {
CHECK(btf__pointer_size(btf) != 8, "ptr_sz", "exp %d, got %zu\n",
8, btf__pointer_size(btf));
}
snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
fd = mkstemp(out_file);
if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) {

Näytä tiedosto

@@ -159,8 +159,8 @@ void test_core_extern(void)
exp = (uint64_t *)&t->data;
for (j = 0; j < n; j++) {
CHECK(got[j] != exp[j], "check_res",
"result #%d: expected %lx, but got %lx\n",
j, exp[j], got[j]);
"result #%d: expected %llx, but got %llx\n",
j, (__u64)exp[j], (__u64)got[j]);
}
cleanup:
test_core_extern__destroy(skel);

Näytä tiedosto

@@ -237,8 +237,8 @@
.union_sz = sizeof(((type *)0)->union_field), \
.arr_sz = sizeof(((type *)0)->arr_field), \
.arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \
.ptr_sz = sizeof(((type *)0)->ptr_field), \
.enum_sz = sizeof(((type *)0)->enum_field), \
.ptr_sz = 8, /* always 8-byte pointer for BPF */ \
.enum_sz = sizeof(((type *)0)->enum_field), \
}
#define SIZE_CASE(name) { \
@@ -432,20 +432,20 @@ static struct core_reloc_test_case test_cases[] = {
.sb4 = -1,
.sb20 = -0x17654321,
.u32 = 0xBEEF,
.s32 = -0x3FEDCBA987654321,
.s32 = -0x3FEDCBA987654321LL,
}),
BITFIELDS_CASE(bitfields___bitfield_vs_int, {
.ub1 = 0xFEDCBA9876543210,
.ub1 = 0xFEDCBA9876543210LL,
.ub2 = 0xA6,
.ub7 = -0x7EDCBA987654321,
.sb4 = -0x6123456789ABCDE,
.sb20 = 0xD00D,
.ub7 = -0x7EDCBA987654321LL,
.sb4 = -0x6123456789ABCDELL,
.sb20 = 0xD00DLL,
.u32 = -0x76543,
.s32 = 0x0ADEADBEEFBADB0B,
.s32 = 0x0ADEADBEEFBADB0BLL,
}),
BITFIELDS_CASE(bitfields___just_big_enough, {
.ub1 = 0xF,
.ub2 = 0x0812345678FEDCBA,
.ub1 = 0xFLL,
.ub2 = 0x0812345678FEDCBALL,
}),
BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),

Näytä tiedosto

@@ -16,7 +16,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
__u32 duration = 0, retval;
struct bpf_map *data_map;
const int zero = 0;
u64 *result = NULL;
__u64 *result = NULL;
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
@@ -29,7 +29,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
link = calloc(sizeof(struct bpf_link *), prog_cnt);
prog = calloc(sizeof(struct bpf_program *), prog_cnt);
result = malloc((prog_cnt + 32 /* spare */) * sizeof(u64));
result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64));
if (CHECK(!link || !prog || !result, "alloc_memory",
"failed to alloc memory"))
goto close_prog;
@@ -72,7 +72,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
goto close_prog;
for (i = 0; i < prog_cnt; i++)
if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n",
if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %llu\n",
result[i]))
goto close_prog;

Näytä tiedosto

@@ -591,7 +591,7 @@ void test_flow_dissector(void)
CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
err || tattr.retval != 1,
tests[i].name,
"err %d errno %d retval %d duration %d size %u/%lu\n",
"err %d errno %d retval %d duration %d size %u/%zu\n",
err, errno, tattr.retval, tattr.duration,
tattr.data_size_out, sizeof(flow_keys));
CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);

Näytä tiedosto

@@ -5,7 +5,7 @@
static void test_global_data_number(struct bpf_object *obj, __u32 duration)
{
int i, err, map_fd;
uint64_t num;
__u64 num;
map_fd = bpf_find_map(__func__, obj, "result_number");
if (CHECK_FAIL(map_fd < 0))
@@ -14,7 +14,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
struct {
char *name;
uint32_t key;
uint64_t num;
__u64 num;
} tests[] = {
{ "relocate .bss reference", 0, 0 },
{ "relocate .data reference", 1, 42 },
@@ -32,7 +32,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
CHECK(err || num != tests[i].num, tests[i].name,
"err %d result %lx expected %lx\n",
"err %d result %llx expected %llx\n",
err, num, tests[i].num);
}
}

Näytä tiedosto

@@ -21,7 +21,7 @@ void test_mmap(void)
const long page_size = sysconf(_SC_PAGE_SIZE);
int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
struct bpf_map *data_map, *bss_map;
void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2;
struct test_mmap__bss *bss_data;
struct bpf_map_info map_info;
__u32 map_info_sz = sizeof(map_info);
@@ -183,16 +183,23 @@ void test_mmap(void)
/* check some more advanced mmap() manipulations */
/* map all but last page: pages 1-3 mapped */
tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED,
data_map_fd, 0);
if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno))
tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno))
goto cleanup;
/* map all but last page: pages 1-3 mapped */
tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
data_map_fd, 0);
if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) {
munmap(tmp0, 4 * page_size);
goto cleanup;
}
/* unmap second page: pages 1, 3 mapped */
err = munmap(tmp1 + page_size, page_size);
if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
munmap(tmp1, map_sz);
munmap(tmp1, 4 * page_size);
goto cleanup;
}
@@ -201,7 +208,7 @@ void test_mmap(void)
MAP_SHARED | MAP_FIXED, data_map_fd, 0);
if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
munmap(tmp1, page_size);
munmap(tmp1 + 2*page_size, page_size);
munmap(tmp1 + 2*page_size, 2 * page_size);
goto cleanup;
}
CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
@@ -211,7 +218,7 @@ void test_mmap(void)
tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
data_map_fd, 0);
if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
munmap(tmp1, 3 * page_size); /* unmap page 1 */
munmap(tmp1, 4 * page_size); /* unmap page 1 */
goto cleanup;
}
CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);

Näytä tiedosto

@@ -28,7 +28,7 @@ void test_prog_run_xattr(void)
"err %d errno %d retval %d\n", err, errno, tattr.retval);
CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
"incorrect output size, want %lu have %u\n",
"incorrect output size, want %zu have %u\n",
sizeof(pkt_v4), tattr.data_size_out);
CHECK_ATTR(buf[5] != 0, "overflow",

Näytä tiedosto

@@ -309,6 +309,7 @@ static void v4_to_v6(struct sockaddr_storage *ss)
v6->sin6_addr.s6_addr[10] = 0xff;
v6->sin6_addr.s6_addr[11] = 0xff;
memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4);
memset(&v6->sin6_addr.s6_addr[0], 0, 10);
}
static int udp_recv_send(int server_fd)

Näytä tiedosto

@@ -81,7 +81,7 @@ void test_skb_ctx(void)
CHECK_ATTR(tattr.ctx_size_out != sizeof(skb),
"ctx_size_out",
"incorrect output size, want %lu have %u\n",
"incorrect output size, want %zu have %u\n",
sizeof(skb), tattr.ctx_size_out);
for (i = 0; i < 5; i++)

Näytä tiedosto

@@ -44,25 +44,25 @@ void test_varlen(void)
CHECK_VAL(bss->payload1_len2, size2);
CHECK_VAL(bss->total1, size1 + size2);
CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check",
"doesn't match!");
"doesn't match!\n");
CHECK_VAL(data->payload2_len1, size1);
CHECK_VAL(data->payload2_len2, size2);
CHECK_VAL(data->total2, size1 + size2);
CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check",
"doesn't match!");
"doesn't match!\n");
CHECK_VAL(data->payload3_len1, size1);
CHECK_VAL(data->payload3_len2, size2);
CHECK_VAL(data->total3, size1 + size2);
CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check",
"doesn't match!");
"doesn't match!\n");
CHECK_VAL(data->payload4_len1, size1);
CHECK_VAL(data->payload4_len2, size2);
CHECK_VAL(data->total4, size1 + size2);
CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
"doesn't match!");
"doesn't match!\n");
cleanup:
test_varlen__destroy(skel);
}

Näytä tiedosto

@@ -1,5 +1,10 @@
#include <stdint.h>
#include <stdbool.h>
void preserce_ptr_sz_fn(long x) {}
#define __bpf_aligned __attribute__((aligned(8)))
/*
* KERNEL
*/
@@ -444,51 +449,51 @@ struct core_reloc_primitives {
char a;
int b;
enum core_reloc_primitives_enum c;
void *d;
int (*f)(const char *);
void *d __bpf_aligned;
int (*f)(const char *) __bpf_aligned;
};
struct core_reloc_primitives___diff_enum_def {
char a;
int b;
void *d;
int (*f)(const char *);
void *d __bpf_aligned;
int (*f)(const char *) __bpf_aligned;
enum {
X = 100,
Y = 200,
} c; /* inline enum def with differing set of values */
} c __bpf_aligned; /* inline enum def with differing set of values */
};
struct core_reloc_primitives___diff_func_proto {
void (*f)(int); /* incompatible function prototype */
void *d;
enum core_reloc_primitives_enum c;
void (*f)(int) __bpf_aligned; /* incompatible function prototype */
void *d __bpf_aligned;
enum core_reloc_primitives_enum c __bpf_aligned;
int b;
char a;
};
struct core_reloc_primitives___diff_ptr_type {
const char * const d; /* different pointee type + modifiers */
char a;
const char * const d __bpf_aligned; /* different pointee type + modifiers */
char a __bpf_aligned;
int b;
enum core_reloc_primitives_enum c;
int (*f)(const char *);
int (*f)(const char *) __bpf_aligned;
};
struct core_reloc_primitives___err_non_enum {
char a[1];
int b;
int c; /* int instead of enum */
void *d;
int (*f)(const char *);
void *d __bpf_aligned;
int (*f)(const char *) __bpf_aligned;
};
struct core_reloc_primitives___err_non_int {
char a[1];
int *b; /* ptr instead of int */
enum core_reloc_primitives_enum c;
void *d;
int (*f)(const char *);
int *b __bpf_aligned; /* ptr instead of int */
enum core_reloc_primitives_enum c __bpf_aligned;
void *d __bpf_aligned;
int (*f)(const char *) __bpf_aligned;
};
struct core_reloc_primitives___err_non_ptr {
@@ -496,7 +501,7 @@ struct core_reloc_primitives___err_non_ptr {
int b;
enum core_reloc_primitives_enum c;
int d; /* int instead of ptr */
int (*f)(const char *);
int (*f)(const char *) __bpf_aligned;
};
/*
@@ -507,7 +512,7 @@ struct core_reloc_mods_output {
};
typedef const int int_t;
typedef const char *char_ptr_t;
typedef const char *char_ptr_t __bpf_aligned;
typedef const int arr_t[7];
struct core_reloc_mods_substruct {
@@ -523,9 +528,9 @@ typedef struct {
struct core_reloc_mods {
int a;
int_t b;
char *c;
char *c __bpf_aligned;
char_ptr_t d;
int e[3];
int e[3] __bpf_aligned;
arr_t f;
struct core_reloc_mods_substruct g;
core_reloc_mods_substruct_t h;
@@ -535,9 +540,9 @@ struct core_reloc_mods {
struct core_reloc_mods___mod_swap {
int b;
int_t a;
char *d;
char *d __bpf_aligned;
char_ptr_t c;
int f[3];
int f[3] __bpf_aligned;
arr_t e;
struct {
int y;
@@ -555,7 +560,7 @@ typedef arr1_t arr2_t;
typedef arr2_t arr3_t;
typedef arr3_t arr4_t;
typedef const char * const volatile fancy_char_ptr_t;
typedef const char * const volatile fancy_char_ptr_t __bpf_aligned;
typedef core_reloc_mods_substruct_t core_reloc_mods_substruct_tt;
@@ -567,7 +572,7 @@ struct core_reloc_mods___typedefs {
arr4_t e;
fancy_char_ptr_t d;
fancy_char_ptr_t c;
int3_t b;
int3_t b __bpf_aligned;
int3_t a;
};
@@ -739,19 +744,19 @@ struct core_reloc_bitfields___bit_sz_change {
int8_t sb4: 1; /* 4 -> 1 */
int32_t sb20: 30; /* 20 -> 30 */
/* non-bitfields */
uint16_t u32; /* 32 -> 16 */
int64_t s32; /* 32 -> 64 */
uint16_t u32; /* 32 -> 16 */
int64_t s32 __bpf_aligned; /* 32 -> 64 */
};
/* turn bitfield into non-bitfield and vice versa */
struct core_reloc_bitfields___bitfield_vs_int {
uint64_t ub1; /* 3 -> 64 non-bitfield */
uint8_t ub2; /* 20 -> 8 non-bitfield */
int64_t ub7; /* 7 -> 64 non-bitfield signed */
int64_t sb4; /* 4 -> 64 non-bitfield signed */
uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */
int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */
int64_t ub7 __bpf_aligned; /* 7 -> 64 non-bitfield signed */
int64_t sb4 __bpf_aligned; /* 4 -> 64 non-bitfield signed */
uint64_t sb20 __bpf_aligned; /* 20 -> 16 non-bitfield unsigned */
int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
uint64_t s32: 60 __bpf_aligned; /* 32 non-bitfield -> 60 bitfield */
};
struct core_reloc_bitfields___just_big_enough {

Näytä tiedosto

@@ -54,6 +54,7 @@ SEC("sockops")
int bpf_testcb(struct bpf_sock_ops *skops)
{
char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)];
struct bpf_sock_ops *reuse = skops;
struct tcphdr *thdr;
int good_call_rv = 0;
int bad_call_rv = 0;
@@ -62,6 +63,46 @@ int bpf_testcb(struct bpf_sock_ops *skops)
int v = 0;
int op;
/* Test reading fields in bpf_sock_ops using single register */
asm volatile (
"%[reuse] = *(u32 *)(%[reuse] +96)"
: [reuse] "+r"(reuse)
:);
asm volatile (
"%[op] = *(u32 *)(%[skops] +96)"
: [op] "+r"(op)
: [skops] "r"(skops)
:);
asm volatile (
"r9 = %[skops];\n"
"r8 = *(u32 *)(r9 +164);\n"
"*(u32 *)(r9 +164) = r8;\n"
:: [skops] "r"(skops)
: "r9", "r8");
asm volatile (
"r1 = %[skops];\n"
"r1 = *(u64 *)(r1 +184);\n"
"if r1 == 0 goto +1;\n"
"r1 = *(u32 *)(r1 +4);\n"
:: [skops] "r"(skops):"r1");
asm volatile (
"r9 = %[skops];\n"
"r9 = *(u64 *)(r9 +184);\n"
"if r9 == 0 goto +1;\n"
"r9 = *(u32 *)(r9 +4);\n"
:: [skops] "r"(skops):"r9");
asm volatile (
"r1 = %[skops];\n"
"r2 = *(u64 *)(r1 +184);\n"
"if r2 == 0 goto +1;\n"
"r2 = *(u32 *)(r2 +4);\n"
:: [skops] "r"(skops):"r1", "r2");
op = (int) skops->op;
update_event_map(op);

Näytä tiedosto

@@ -15,9 +15,9 @@ int test_pid = 0;
bool capture = false;
/* .bss */
long payload1_len1 = 0;
long payload1_len2 = 0;
long total1 = 0;
__u64 payload1_len1 = 0;
__u64 payload1_len2 = 0;
__u64 total1 = 0;
char payload1[MAX_LEN + MAX_LEN] = {};
/* .data */

Näytä tiedosto

@@ -3883,7 +3883,7 @@ static int test_big_btf_info(unsigned int test_num)
info_garbage.garbage = 0;
err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
if (CHECK(err || info_len != sizeof(*info),
"err:%d errno:%d info_len:%u sizeof(*info):%lu",
"err:%d errno:%d info_len:%u sizeof(*info):%zu",
err, errno, info_len, sizeof(*info))) {
err = -1;
goto done;
@@ -4094,7 +4094,7 @@ static int do_test_get_info(unsigned int test_num)
if (CHECK(err || !info.id || info_len != sizeof(info) ||
info.btf_size != raw_btf_size ||
(ret = memcmp(raw_btf, user_btf, expected_nbytes)),
"err:%d errno:%d info.id:%u info_len:%u sizeof(info):%lu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
"err:%d errno:%d info.id:%u info_len:%u sizeof(info):%zu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
err, errno, info.id, info_len, sizeof(info),
raw_btf_size, info.btf_size, expected_nbytes, ret)) {
err = -1;
@@ -4730,7 +4730,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
nexpected_line = snprintf(expected_line, line_size,
"%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
"{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
"{%llu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
"%u,0x%x,[[%d,%d],[%d,%d]]}\n",
percpu_map ? "\tcpu" : "",
percpu_map ? cpu : next_key,
@@ -4738,7 +4738,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
v->unused_bits2a,
v->bits28,
v->unused_bits2b,
v->ui64,
(__u64)v->ui64,
v->ui8a[0], v->ui8a[1],
v->ui8a[2], v->ui8a[3],
v->ui8a[4], v->ui8a[5],

Näytä tiedosto

@@ -135,6 +135,11 @@ static inline __u64 ptr_to_u64(const void *ptr)
return (__u64) (unsigned long) ptr;
}
static inline void *u64_to_ptr(__u64 ptr)
{
return (void *) (unsigned long) ptr;
}
int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
int compare_map_keys(int map1_fd, int map2_fd);
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);

Näytä tiedosto

@@ -180,6 +180,8 @@ setup()
;;
r[12]) ip netns exec $ns sysctl -q -w net.ipv4.ip_forward=1
ip netns exec $ns sysctl -q -w net.ipv4.conf.all.send_redirects=1
ip netns exec $ns sysctl -q -w net.ipv4.conf.default.rp_filter=0
ip netns exec $ns sysctl -q -w net.ipv4.conf.all.rp_filter=0
ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=1
ip netns exec $ns sysctl -q -w net.ipv6.route.mtu_expires=10

Näytä tiedosto

@@ -2,13 +2,18 @@
# SPDX-License-Identifier: GPL-2.0
#
# This tests basic flowtable functionality.
# Creates following topology:
# Creates following default topology:
#
# Originator (MTU 9000) <-Router1-> MTU 1500 <-Router2-> Responder (MTU 2000)
# Router1 is the one doing flow offloading, Router2 has no special
# purpose other than having a link that is smaller than either Originator
# and responder, i.e. TCPMSS announced values are too large and will still
# result in fragmentation and/or PMTU discovery.
#
# You can check with different Orgininator/Link/Responder MTU eg:
# sh nft_flowtable.sh -o1000 -l500 -r100
#
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
@@ -21,29 +26,18 @@ ns2out=""
log_netns=$(sysctl -n net.netfilter.nf_log_all_netns)
nft --version > /dev/null 2>&1
if [ $? -ne 0 ];then
echo "SKIP: Could not run test without nft tool"
exit $ksft_skip
fi
checktool (){
$1 > /dev/null 2>&1
if [ $? -ne 0 ];then
echo "SKIP: Could not $2"
exit $ksft_skip
fi
}
ip -Version > /dev/null 2>&1
if [ $? -ne 0 ];then
echo "SKIP: Could not run test without ip tool"
exit $ksft_skip
fi
which nc > /dev/null 2>&1
if [ $? -ne 0 ];then
echo "SKIP: Could not run test without nc (netcat)"
exit $ksft_skip
fi
ip netns add nsr1
if [ $? -ne 0 ];then
echo "SKIP: Could not create net namespace"
exit $ksft_skip
fi
checktool "nft --version" "run test without nft tool"
checktool "ip -Version" "run test without ip tool"
checktool "which nc" "run test without nc (netcat)"
checktool "ip netns add nsr1" "create net namespace"
ip netns add ns1
ip netns add ns2
@@ -89,11 +83,24 @@ ip -net nsr2 addr add dead:2::1/64 dev veth1
# ns2 is going via nsr2 with a smaller mtu, so that TCPMSS announced by both peers
# is NOT the lowest link mtu.
ip -net nsr1 link set veth0 mtu 9000
ip -net ns1 link set eth0 mtu 9000
omtu=9000
lmtu=1500
rmtu=2000
ip -net nsr2 link set veth1 mtu 2000
ip -net ns2 link set eth0 mtu 2000
while getopts "o:l:r:" o
do
case $o in
o) omtu=$OPTARG;;
l) lmtu=$OPTARG;;
r) rmtu=$OPTARG;;
esac
done
ip -net nsr1 link set veth0 mtu $omtu
ip -net ns1 link set eth0 mtu $omtu
ip -net nsr2 link set veth1 mtu $rmtu
ip -net ns2 link set eth0 mtu $rmtu
# transfer-net between nsr1 and nsr2.
# these addresses are not used for connections.
@@ -147,7 +154,7 @@ table inet filter {
# as PMTUd is off.
# This rule is deleted for the last test, when we expect PMTUd
# to kick in and ensure all packets meet mtu requirements.
meta length gt 1500 accept comment something-to-grep-for
meta length gt $lmtu accept comment something-to-grep-for
# next line blocks connection w.o. working offload.
# we only do this for reverse dir, because we expect packets to
@@ -243,8 +250,14 @@ test_tcp_forwarding_ip()
sleep 3
kill $lpid
kill $cpid
if ps -p $lpid > /dev/null;then
kill $lpid
fi
if ps -p $cpid > /dev/null;then
kill $cpid
fi
wait
check_transfer "$ns1in" "$ns2out" "ns1 -> ns2"