Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from Davic Miller: 1) Support busy polling generically, for all NAPI drivers. From Eric Dumazet. 2) Add byte/packet counter support to nft_ct, from Floriani Westphal. 3) Add RSS/XPS support to mvneta driver, from Gregory Clement. 4) Implement IPV6_HDRINCL socket option for raw sockets, from Hannes Frederic Sowa. 5) Add support for T6 adapter to cxgb4 driver, from Hariprasad Shenai. 6) Add support for VLAN device bridging to mlxsw switch driver, from Ido Schimmel. 7) Add driver for Netronome NFP4000/NFP6000, from Jakub Kicinski. 8) Provide hwmon interface to mlxsw switch driver, from Jiri Pirko. 9) Reorganize wireless drivers into per-vendor directories just like we do for ethernet drivers. From Kalle Valo. 10) Provide a way for administrators "destroy" connected sockets via the SOCK_DESTROY socket netlink diag operation. From Lorenzo Colitti. 11) Add support to add/remove multicast routes via netlink, from Nikolay Aleksandrov. 12) Make TCP keepalive settings per-namespace, from Nikolay Borisov. 13) Add forwarding and packet duplication facilities to nf_tables, from Pablo Neira Ayuso. 14) Dead route support in MPLS, from Roopa Prabhu. 15) TSO support for thunderx chips, from Sunil Goutham. 16) Add driver for IBM's System i/p VNIC protocol, from Thomas Falcon. 17) Rationalize, consolidate, and more completely document the checksum offloading facilities in the networking stack. From Tom Herbert. 18) Support aborting an ongoing scan in mac80211/cfg80211, from Vidyullatha Kanchanapally. 19) Use per-bucket spinlock for bpf hash facility, from Tom Leiming. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1375 commits) net: bnxt: always return values from _bnxt_get_max_rings net: bpf: reject invalid shifts phonet: properly unshare skbs in phonet_rcv() dwc_eth_qos: Fix dma address for multi-fragment skbs phy: remove an unneeded condition mdio: remove an unneed condition mdio_bus: NULL dereference on allocation error net: Fix typo in netdev_intersect_features net: freescale: mac-fec: Fix build error from phy_device API change net: freescale: ucc_geth: Fix build error from phy_device API change bonding: Prevent IPv6 link local address on enslaved devices IB/mlx5: Add flow steering support net/mlx5_core: Export flow steering API net/mlx5_core: Make ipv4/ipv6 location more clear net/mlx5_core: Enable flow steering support for the IB driver net/mlx5_core: Initialize namespaces only when supported by device net/mlx5_core: Set priority attributes net/mlx5_core: Connect flow tables net/mlx5_core: Introduce modify flow table command net/mlx5_core: Managing root flow table ...
此提交包含在:
@@ -1495,6 +1495,29 @@ config OF_RECONFIG_NOTIFIER_ERROR_INJECT
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config NETDEV_NOTIFIER_ERROR_INJECT
|
||||
tristate "Netdev notifier error injection module"
|
||||
depends on NET && NOTIFIER_ERROR_INJECTION
|
||||
help
|
||||
This option provides the ability to inject artificial errors to
|
||||
netdevice notifier chain callbacks. It is controlled through debugfs
|
||||
interface /sys/kernel/debug/notifier-error-inject/netdev
|
||||
|
||||
If the notifier call chain should be failed with some events
|
||||
notified, write the error code to "actions/<notifier event>/error".
|
||||
|
||||
Example: Inject netdevice mtu change error (-22 = -EINVAL)
|
||||
|
||||
# cd /sys/kernel/debug/notifier-error-inject/netdev
|
||||
# echo -22 > actions/NETDEV_CHANGEMTU/error
|
||||
# ip link set eth0 mtu 1024
|
||||
RTNETLINK answers: Invalid argument
|
||||
|
||||
To compile this code as a module, choose M here: the module will
|
||||
be called netdev-notifier-error-inject.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config FAULT_INJECTION
|
||||
bool "Fault-injection framework"
|
||||
depends on DEBUG_KERNEL
|
||||
|
@@ -120,6 +120,7 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
|
||||
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
|
||||
obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
|
||||
obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
|
||||
obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
|
||||
obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
|
||||
obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
|
||||
of-reconfig-notifier-error-inject.o
|
||||
|
@@ -0,0 +1,55 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#include "notifier-error-inject.h"
|
||||
|
||||
static int priority;
|
||||
module_param(priority, int, 0);
|
||||
MODULE_PARM_DESC(priority, "specify netdevice notifier priority");
|
||||
|
||||
static struct notifier_err_inject netdev_notifier_err_inject = {
|
||||
.actions = {
|
||||
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_REGISTER) },
|
||||
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEMTU) },
|
||||
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGENAME) },
|
||||
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_UP) },
|
||||
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_TYPE_CHANGE) },
|
||||
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_POST_INIT) },
|
||||
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEMTU) },
|
||||
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEUPPER) },
|
||||
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEUPPER) },
|
||||
{}
|
||||
}
|
||||
};
|
||||
|
||||
static struct dentry *dir;
|
||||
|
||||
static int netdev_err_inject_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
dir = notifier_err_inject_init("netdev", notifier_err_inject_dir,
|
||||
&netdev_notifier_err_inject, priority);
|
||||
if (IS_ERR(dir))
|
||||
return PTR_ERR(dir);
|
||||
|
||||
err = register_netdevice_notifier(&netdev_notifier_err_inject.nb);
|
||||
if (err)
|
||||
debugfs_remove_recursive(dir);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void netdev_err_inject_exit(void)
|
||||
{
|
||||
unregister_netdevice_notifier(&netdev_notifier_err_inject.nb);
|
||||
debugfs_remove_recursive(dir);
|
||||
}
|
||||
|
||||
module_init(netdev_err_inject_init);
|
||||
module_exit(netdev_err_inject_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Netdevice notifier error injection module");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Nikolay Aleksandrov <razor@blackwall.org>");
|
@@ -231,9 +231,6 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
|
||||
*/
|
||||
rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
|
||||
|
||||
/* Ensure the new table is visible to readers. */
|
||||
smp_wmb();
|
||||
|
||||
spin_unlock_bh(old_tbl->locks);
|
||||
|
||||
return 0;
|
||||
|
120
lib/test_bpf.c
120
lib/test_bpf.c
@@ -1685,6 +1685,126 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, 0x35d97ef2 } }
|
||||
},
|
||||
{ /* Mainly checking JIT here. */
|
||||
"MOV REG64",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
|
||||
BPF_MOV64_REG(R1, R0),
|
||||
BPF_MOV64_REG(R2, R1),
|
||||
BPF_MOV64_REG(R3, R2),
|
||||
BPF_MOV64_REG(R4, R3),
|
||||
BPF_MOV64_REG(R5, R4),
|
||||
BPF_MOV64_REG(R6, R5),
|
||||
BPF_MOV64_REG(R7, R6),
|
||||
BPF_MOV64_REG(R8, R7),
|
||||
BPF_MOV64_REG(R9, R8),
|
||||
BPF_ALU64_IMM(BPF_MOV, R0, 0),
|
||||
BPF_ALU64_IMM(BPF_MOV, R1, 0),
|
||||
BPF_ALU64_IMM(BPF_MOV, R2, 0),
|
||||
BPF_ALU64_IMM(BPF_MOV, R3, 0),
|
||||
BPF_ALU64_IMM(BPF_MOV, R4, 0),
|
||||
BPF_ALU64_IMM(BPF_MOV, R5, 0),
|
||||
BPF_ALU64_IMM(BPF_MOV, R6, 0),
|
||||
BPF_ALU64_IMM(BPF_MOV, R7, 0),
|
||||
BPF_ALU64_IMM(BPF_MOV, R8, 0),
|
||||
BPF_ALU64_IMM(BPF_MOV, R9, 0),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R0),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R1),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R2),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R3),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R4),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R5),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R6),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R7),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R8),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R9),
|
||||
BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0xfefe } }
|
||||
},
|
||||
{ /* Mainly checking JIT here. */
|
||||
"MOV REG32",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
|
||||
BPF_MOV64_REG(R1, R0),
|
||||
BPF_MOV64_REG(R2, R1),
|
||||
BPF_MOV64_REG(R3, R2),
|
||||
BPF_MOV64_REG(R4, R3),
|
||||
BPF_MOV64_REG(R5, R4),
|
||||
BPF_MOV64_REG(R6, R5),
|
||||
BPF_MOV64_REG(R7, R6),
|
||||
BPF_MOV64_REG(R8, R7),
|
||||
BPF_MOV64_REG(R9, R8),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0),
|
||||
BPF_ALU32_IMM(BPF_MOV, R1, 0),
|
||||
BPF_ALU32_IMM(BPF_MOV, R2, 0),
|
||||
BPF_ALU32_IMM(BPF_MOV, R3, 0),
|
||||
BPF_ALU32_IMM(BPF_MOV, R4, 0),
|
||||
BPF_ALU32_IMM(BPF_MOV, R5, 0),
|
||||
BPF_ALU32_IMM(BPF_MOV, R6, 0),
|
||||
BPF_ALU32_IMM(BPF_MOV, R7, 0),
|
||||
BPF_ALU32_IMM(BPF_MOV, R8, 0),
|
||||
BPF_ALU32_IMM(BPF_MOV, R9, 0),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R0),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R1),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R2),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R3),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R4),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R5),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R6),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R7),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R8),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R9),
|
||||
BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0xfefe } }
|
||||
},
|
||||
{ /* Mainly checking JIT here. */
|
||||
"LD IMM64",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
|
||||
BPF_MOV64_REG(R1, R0),
|
||||
BPF_MOV64_REG(R2, R1),
|
||||
BPF_MOV64_REG(R3, R2),
|
||||
BPF_MOV64_REG(R4, R3),
|
||||
BPF_MOV64_REG(R5, R4),
|
||||
BPF_MOV64_REG(R6, R5),
|
||||
BPF_MOV64_REG(R7, R6),
|
||||
BPF_MOV64_REG(R8, R7),
|
||||
BPF_MOV64_REG(R9, R8),
|
||||
BPF_LD_IMM64(R0, 0x0LL),
|
||||
BPF_LD_IMM64(R1, 0x0LL),
|
||||
BPF_LD_IMM64(R2, 0x0LL),
|
||||
BPF_LD_IMM64(R3, 0x0LL),
|
||||
BPF_LD_IMM64(R4, 0x0LL),
|
||||
BPF_LD_IMM64(R5, 0x0LL),
|
||||
BPF_LD_IMM64(R6, 0x0LL),
|
||||
BPF_LD_IMM64(R7, 0x0LL),
|
||||
BPF_LD_IMM64(R8, 0x0LL),
|
||||
BPF_LD_IMM64(R9, 0x0LL),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R0),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R1),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R2),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R3),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R4),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R5),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R6),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R7),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R8),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R9),
|
||||
BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0xfefe } }
|
||||
},
|
||||
{
|
||||
"INT: ALU MIX",
|
||||
.u.insns_int = {
|
||||
|
@@ -36,9 +36,9 @@ static int runs = 4;
|
||||
module_param(runs, int, 0);
|
||||
MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
|
||||
|
||||
static int max_size = 65536;
|
||||
static int max_size = 0;
|
||||
module_param(max_size, int, 0);
|
||||
MODULE_PARM_DESC(runs, "Maximum table size (default: 65536)");
|
||||
MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)");
|
||||
|
||||
static bool shrinking = false;
|
||||
module_param(shrinking, bool, 0);
|
||||
@@ -52,6 +52,10 @@ static int tcount = 10;
|
||||
module_param(tcount, int, 0);
|
||||
MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
|
||||
|
||||
static bool enomem_retry = false;
|
||||
module_param(enomem_retry, bool, 0);
|
||||
MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)");
|
||||
|
||||
struct test_obj {
|
||||
int value;
|
||||
struct rhash_head node;
|
||||
@@ -76,6 +80,28 @@ static struct rhashtable_params test_rht_params = {
|
||||
static struct semaphore prestart_sem;
|
||||
static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
|
||||
|
||||
static int insert_retry(struct rhashtable *ht, struct rhash_head *obj,
|
||||
const struct rhashtable_params params)
|
||||
{
|
||||
int err, retries = -1, enomem_retries = 0;
|
||||
|
||||
do {
|
||||
retries++;
|
||||
cond_resched();
|
||||
err = rhashtable_insert_fast(ht, obj, params);
|
||||
if (err == -ENOMEM && enomem_retry) {
|
||||
enomem_retries++;
|
||||
err = -EBUSY;
|
||||
}
|
||||
} while (err == -EBUSY);
|
||||
|
||||
if (enomem_retries)
|
||||
pr_info(" %u insertions retried after -ENOMEM\n",
|
||||
enomem_retries);
|
||||
|
||||
return err ? : retries;
|
||||
}
|
||||
|
||||
static int __init test_rht_lookup(struct rhashtable *ht)
|
||||
{
|
||||
unsigned int i;
|
||||
@@ -157,7 +183,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
|
||||
{
|
||||
struct test_obj *obj;
|
||||
int err;
|
||||
unsigned int i, insert_fails = 0;
|
||||
unsigned int i, insert_retries = 0;
|
||||
s64 start, end;
|
||||
|
||||
/*
|
||||
@@ -170,22 +196,16 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
|
||||
struct test_obj *obj = &array[i];
|
||||
|
||||
obj->value = i * 2;
|
||||
|
||||
err = rhashtable_insert_fast(ht, &obj->node, test_rht_params);
|
||||
if (err == -ENOMEM || err == -EBUSY) {
|
||||
/* Mark failed inserts but continue */
|
||||
obj->value = TEST_INSERT_FAIL;
|
||||
insert_fails++;
|
||||
} else if (err) {
|
||||
err = insert_retry(ht, &obj->node, test_rht_params);
|
||||
if (err > 0)
|
||||
insert_retries += err;
|
||||
else if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (insert_fails)
|
||||
pr_info(" %u insertions failed due to memory pressure\n",
|
||||
insert_fails);
|
||||
if (insert_retries)
|
||||
pr_info(" %u insertions retried due to memory pressure\n",
|
||||
insert_retries);
|
||||
|
||||
test_bucket_stats(ht);
|
||||
rcu_read_lock();
|
||||
@@ -236,13 +256,15 @@ static int thread_lookup_test(struct thread_data *tdata)
|
||||
obj->value, key);
|
||||
err++;
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int threadfunc(void *data)
|
||||
{
|
||||
int i, step, err = 0, insert_fails = 0;
|
||||
int i, step, err = 0, insert_retries = 0;
|
||||
struct thread_data *tdata = data;
|
||||
|
||||
up(&prestart_sem);
|
||||
@@ -251,20 +273,18 @@ static int threadfunc(void *data)
|
||||
|
||||
for (i = 0; i < entries; i++) {
|
||||
tdata->objs[i].value = (tdata->id << 16) | i;
|
||||
err = rhashtable_insert_fast(&ht, &tdata->objs[i].node,
|
||||
test_rht_params);
|
||||
if (err == -ENOMEM || err == -EBUSY) {
|
||||
tdata->objs[i].value = TEST_INSERT_FAIL;
|
||||
insert_fails++;
|
||||
err = insert_retry(&ht, &tdata->objs[i].node, test_rht_params);
|
||||
if (err > 0) {
|
||||
insert_retries += err;
|
||||
} else if (err) {
|
||||
pr_err(" thread[%d]: rhashtable_insert_fast failed\n",
|
||||
tdata->id);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (insert_fails)
|
||||
pr_info(" thread[%d]: %d insert failures\n",
|
||||
tdata->id, insert_fails);
|
||||
if (insert_retries)
|
||||
pr_info(" thread[%d]: %u insertions retried due to memory pressure\n",
|
||||
tdata->id, insert_retries);
|
||||
|
||||
err = thread_lookup_test(tdata);
|
||||
if (err) {
|
||||
@@ -285,6 +305,8 @@ static int threadfunc(void *data)
|
||||
goto out;
|
||||
}
|
||||
tdata->objs[i].value = TEST_INSERT_FAIL;
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
err = thread_lookup_test(tdata);
|
||||
if (err) {
|
||||
@@ -311,7 +333,7 @@ static int __init test_rht_init(void)
|
||||
entries = min(entries, MAX_ENTRIES);
|
||||
|
||||
test_rht_params.automatic_shrinking = shrinking;
|
||||
test_rht_params.max_size = max_size;
|
||||
test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries);
|
||||
test_rht_params.nelem_hint = size;
|
||||
|
||||
pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n",
|
||||
@@ -357,6 +379,8 @@ static int __init test_rht_init(void)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
test_rht_params.max_size = max_size ? :
|
||||
roundup_pow_of_two(tcount * entries);
|
||||
err = rhashtable_init(&ht, &test_rht_params);
|
||||
if (err < 0) {
|
||||
pr_warn("Test failed: Unable to initialize hashtable: %d\n",
|
||||
|
新增問題並參考
封鎖使用者