Merge branch 'for-4.14/fs' into libnvdimm-for-next

This commit is contained in:
Dan Williams
2017-08-31 16:25:59 -07:00
1053 changed files with 15829 additions and 19020 deletions

View File

@@ -11,6 +11,8 @@
# define __NR_bpf 280
# elif defined(__sparc__)
# define __NR_bpf 349
# elif defined(__s390__)
# define __NR_bpf 351
# else
# error __NR_bpf not defined. libbpf does not support your arch.
# endif

View File

@@ -474,7 +474,7 @@ class Provider(object):
@staticmethod
def is_field_wanted(fields_filter, field):
"""Indicate whether field is valid according to fields_filter."""
if not fields_filter:
if not fields_filter or fields_filter == "help":
return True
return re.match(fields_filter, field) is not None
@@ -1413,8 +1413,8 @@ performance.
Requirements:
- Access to:
/sys/kernel/debug/kvm
/sys/kernel/debug/trace/events/*
%s
%s/events/*
/proc/pid/task
- /proc/sys/kernel/perf_event_paranoid < 1 if user has no
CAP_SYS_ADMIN and perf events are used.
@@ -1434,7 +1434,7 @@ Interactive Commands:
s set update interval
x toggle reporting of stats for individual child trace events
Press any other key to refresh statistics immediately.
"""
""" % (PATH_DEBUGFS_KVM, PATH_DEBUGFS_TRACING)
class PlainHelpFormatter(optparse.IndentedHelpFormatter):
def format_description(self, description):
@@ -1496,7 +1496,8 @@ Press any other key to refresh statistics immediately.
action='store',
default=DEFAULT_REGEX,
dest='fields',
help='fields to display (regex)',
help='''fields to display (regex)
"-f help" for a list of available events''',
)
optparser.add_option('-p', '--pid',
action='store',
@@ -1559,6 +1560,17 @@ def main():
stats = Stats(options)
if options.fields == "help":
event_list = "\n"
s = stats.get()
for key in s.keys():
if key.find('(') != -1:
key = key[0:key.find('(')]
if event_list.find('\n' + key + '\n') == -1:
event_list += key + '\n'
sys.stdout.write(event_list)
return ""
if options.log:
log(stats)
elif not options.once:

View File

@@ -39,6 +39,8 @@
# define __NR_bpf 280
# elif defined(__sparc__)
# define __NR_bpf 349
# elif defined(__s390__)
# define __NR_bpf 351
# else
# error __NR_bpf not defined. libbpf does not support your arch.
# endif
@@ -314,7 +316,6 @@ int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
int err;
bzero(&attr, sizeof(attr));
bzero(info, *info_len);
attr.info.bpf_fd = prog_fd;
attr.info.info_len = *info_len;
attr.info.info = ptr_to_u64(info);

View File

@@ -12,12 +12,23 @@
int _version SEC("version") = 1;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define TEST_FIELD(TYPE, FIELD, MASK) \
{ \
TYPE tmp = *(volatile TYPE *)&skb->FIELD; \
if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
return TC_ACT_SHOT; \
}
#else
#define TEST_FIELD_OFFSET(a, b) ((sizeof(a) - sizeof(b)) / sizeof(b))
#define TEST_FIELD(TYPE, FIELD, MASK) \
{ \
TYPE tmp = *((volatile TYPE *)&skb->FIELD + \
TEST_FIELD_OFFSET(skb->FIELD, TYPE)); \
if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
return TC_ACT_SHOT; \
}
#endif
SEC("test1")
int process(struct __sk_buff *skb)

View File

@@ -340,6 +340,7 @@ static void test_bpf_obj_id(void)
/* Check getting prog info */
info_len = sizeof(struct bpf_prog_info) * 2;
bzero(&prog_infos[i], info_len);
prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
prog_infos[i].jited_prog_len = sizeof(jited_insns);
prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
@@ -369,6 +370,7 @@ static void test_bpf_obj_id(void)
/* Check getting map info */
info_len = sizeof(struct bpf_map_info) * 2;
bzero(&map_infos[i], info_len);
err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
&info_len);
if (CHECK(err ||
@@ -394,7 +396,7 @@ static void test_bpf_obj_id(void)
nr_id_found = 0;
next_id = 0;
while (!bpf_prog_get_next_id(next_id, &next_id)) {
struct bpf_prog_info prog_info;
struct bpf_prog_info prog_info = {};
int prog_fd;
info_len = sizeof(prog_info);
@@ -418,6 +420,8 @@ static void test_bpf_obj_id(void)
nr_id_found++;
err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
prog_infos[i].jited_prog_insns = 0;
prog_infos[i].xlated_prog_insns = 0;
CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
memcmp(&prog_info, &prog_infos[i], info_len),
"get-prog-info(next_id->fd)",
@@ -436,7 +440,7 @@ static void test_bpf_obj_id(void)
nr_id_found = 0;
next_id = 0;
while (!bpf_map_get_next_id(next_id, &next_id)) {
struct bpf_map_info map_info;
struct bpf_map_info map_info = {};
int map_fd;
info_len = sizeof(map_info);

View File

@@ -8,6 +8,7 @@
* License as published by the Free Software Foundation.
*/
#include <endian.h>
#include <asm/types.h>
#include <linux/types.h>
#include <stdint.h>
@@ -1098,7 +1099,7 @@ static struct bpf_test tests[] = {
"check skb->hash byte load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#else
@@ -1135,7 +1136,7 @@ static struct bpf_test tests[] = {
"check skb->hash byte load not permitted 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3),
#else
@@ -1244,7 +1245,7 @@ static struct bpf_test tests[] = {
"check skb->hash half load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#else
@@ -1259,7 +1260,7 @@ static struct bpf_test tests[] = {
"check skb->hash half load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
#else
@@ -5422,7 +5423,7 @@ static struct bpf_test tests[] = {
"check bpf_perf_event_data->sample_period byte load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
@@ -5438,7 +5439,7 @@ static struct bpf_test tests[] = {
"check bpf_perf_event_data->sample_period half load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
@@ -5454,7 +5455,7 @@ static struct bpf_test tests[] = {
"check bpf_perf_event_data->sample_period word load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
@@ -5481,7 +5482,7 @@ static struct bpf_test tests[] = {
"check skb->data half load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data)),
#else
@@ -5497,7 +5498,7 @@ static struct bpf_test tests[] = {
"check skb->tc_classid half load not permitted for lwt prog",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#ifdef __LITTLE_ENDIAN
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
#else
@@ -5980,6 +5981,34 @@ static struct bpf_test tests[] = {
.result = REJECT,
.result_unpriv = REJECT,
},
{
"subtraction bounds (map value)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
.result_unpriv = REJECT,
},
};
static int probe_filter_length(const struct bpf_insn *fp)

View File

@@ -14,7 +14,7 @@ all:
done
override define RUN_TESTS
@if [ `dirname $(OUTPUT)` = $(PWD) ]; then ./run.sh; fi
$(OUTPUT)/run.sh
endef
override define INSTALL_RULE

4
tools/testing/selftests/kmod/kmod.sh Normal file → Executable file
View File

@@ -473,8 +473,8 @@ usage()
echo " all Runs all tests (default)"
echo " -t Run test ID the number amount of times is recommended"
echo " -w Watch test ID run until it runs into an error"
echo " -c Run test ID once"
echo " -s Run test ID x test-count number of times"
echo " -s Run test ID once"
echo " -c Run test ID x test-count number of times"
echo " -l List all test ID list"
echo " -h|--help Help"
echo

0
tools/testing/selftests/sysctl/sysctl.sh Normal file → Executable file
View File

View File

@@ -229,10 +229,9 @@ static void init_test(void)
printf("CLOCK_MONOTONIC_RAW+CLOCK_MONOTONIC precision: %.0f ns\t\t",
1e9 * precision);
if (precision > MAX_PRECISION) {
printf("[SKIP]\n");
ksft_exit_skip();
}
if (precision > MAX_PRECISION)
ksft_exit_skip("precision: %.0f ns > MAX_PRECISION: %.0f ns\n",
1e9 * precision, 1e9 * MAX_PRECISION);
printf("[OK]\n");
srand(ts.tv_sec ^ ts.tv_nsec);