Merge tag v4.15 of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
To resolve conflicts in: drivers/infiniband/hw/mlx5/main.c drivers/infiniband/hw/mlx5/qp.c From patches merged into the -rc cycle. The conflict resolution matches what linux-next has been carrying. Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
@@ -1099,8 +1099,6 @@ config PROVE_LOCKING
|
||||
select DEBUG_MUTEXES
|
||||
select DEBUG_RT_MUTEXES if RT_MUTEXES
|
||||
select DEBUG_LOCK_ALLOC
|
||||
select LOCKDEP_CROSSRELEASE
|
||||
select LOCKDEP_COMPLETIONS
|
||||
select TRACE_IRQFLAGS
|
||||
default n
|
||||
help
|
||||
@@ -1170,37 +1168,6 @@ config LOCK_STAT
|
||||
CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
|
||||
(CONFIG_LOCKDEP defines "acquire" and "release" events.)
|
||||
|
||||
config LOCKDEP_CROSSRELEASE
|
||||
bool
|
||||
help
|
||||
This makes lockdep work for crosslock which is a lock allowed to
|
||||
be released in a different context from the acquisition context.
|
||||
Normally a lock must be released in the context acquiring the lock.
|
||||
However, relexing this constraint helps synchronization primitives
|
||||
such as page locks or completions can use the lock correctness
|
||||
detector, lockdep.
|
||||
|
||||
config LOCKDEP_COMPLETIONS
|
||||
bool
|
||||
help
|
||||
A deadlock caused by wait_for_completion() and complete() can be
|
||||
detected by lockdep using crossrelease feature.
|
||||
|
||||
config BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
|
||||
bool "Enable the boot parameter, crossrelease_fullstack"
|
||||
depends on LOCKDEP_CROSSRELEASE
|
||||
default n
|
||||
help
|
||||
The lockdep "cross-release" feature needs to record stack traces
|
||||
(of calling functions) for all acquisitions, for eventual later
|
||||
use during analysis. By default only a single caller is recorded,
|
||||
because the unwind operation can be very expensive with deeper
|
||||
stack chains.
|
||||
|
||||
However a boot parameter, crossrelease_fullstack, was
|
||||
introduced since sometimes deeper traces are required for full
|
||||
analysis. This option turns on the boot parameter.
|
||||
|
||||
config DEBUG_LOCKDEP
|
||||
bool "Lock dependency engine debugging"
|
||||
depends on DEBUG_KERNEL && LOCKDEP
|
||||
|
@@ -313,42 +313,47 @@ next_op:
|
||||
|
||||
/* Decide how to handle the operation */
|
||||
switch (op) {
|
||||
case ASN1_OP_MATCH_ANY_ACT:
|
||||
case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
|
||||
case ASN1_OP_COND_MATCH_ANY_ACT:
|
||||
case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
|
||||
ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto skip_data;
|
||||
|
||||
case ASN1_OP_MATCH_ACT:
|
||||
case ASN1_OP_MATCH_ACT_OR_SKIP:
|
||||
case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
|
||||
ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto skip_data;
|
||||
|
||||
case ASN1_OP_MATCH:
|
||||
case ASN1_OP_MATCH_OR_SKIP:
|
||||
case ASN1_OP_MATCH_ACT:
|
||||
case ASN1_OP_MATCH_ACT_OR_SKIP:
|
||||
case ASN1_OP_MATCH_ANY:
|
||||
case ASN1_OP_MATCH_ANY_OR_SKIP:
|
||||
case ASN1_OP_MATCH_ANY_ACT:
|
||||
case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
|
||||
case ASN1_OP_COND_MATCH_OR_SKIP:
|
||||
case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
|
||||
case ASN1_OP_COND_MATCH_ANY:
|
||||
case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
|
||||
skip_data:
|
||||
case ASN1_OP_COND_MATCH_ANY_ACT:
|
||||
case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
|
||||
|
||||
if (!(flags & FLAG_CONS)) {
|
||||
if (flags & FLAG_INDEFINITE_LENGTH) {
|
||||
size_t tmp = dp;
|
||||
|
||||
ret = asn1_find_indefinite_length(
|
||||
data, datalen, &dp, &len, &errmsg);
|
||||
data, datalen, &tmp, &len, &errmsg);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
} else {
|
||||
dp += len;
|
||||
}
|
||||
pr_debug("- LEAF: %zu\n", len);
|
||||
}
|
||||
|
||||
if (op & ASN1_OP_MATCH__ACT) {
|
||||
unsigned char act;
|
||||
|
||||
if (op & ASN1_OP_MATCH__ANY)
|
||||
act = machine[pc + 1];
|
||||
else
|
||||
act = machine[pc + 2];
|
||||
ret = actions[act](context, hdr, tag, data + dp, len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!(flags & FLAG_CONS))
|
||||
dp += len;
|
||||
pc += asn1_op_lengths[op];
|
||||
goto next_op;
|
||||
|
||||
@@ -434,6 +439,8 @@ next_op:
|
||||
else
|
||||
act = machine[pc + 1];
|
||||
ret = actions[act](context, hdr, 0, data + tdp, len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
pc += asn1_op_lengths[op];
|
||||
goto next_op;
|
||||
|
@@ -346,7 +346,8 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj,
|
||||
static void zap_modalias_env(struct kobj_uevent_env *env)
|
||||
{
|
||||
static const char modalias_prefix[] = "MODALIAS=";
|
||||
int i;
|
||||
size_t len;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < env->envp_idx;) {
|
||||
if (strncmp(env->envp[i], modalias_prefix,
|
||||
@@ -355,11 +356,18 @@ static void zap_modalias_env(struct kobj_uevent_env *env)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (i != env->envp_idx - 1)
|
||||
memmove(&env->envp[i], &env->envp[i + 1],
|
||||
sizeof(env->envp[i]) * env->envp_idx - 1);
|
||||
len = strlen(env->envp[i]) + 1;
|
||||
|
||||
if (i != env->envp_idx - 1) {
|
||||
memmove(env->envp[i], env->envp[i + 1],
|
||||
env->buflen - len);
|
||||
|
||||
for (j = i; j < env->envp_idx - 1; j++)
|
||||
env->envp[j] = env->envp[j + 1] - len;
|
||||
}
|
||||
|
||||
env->envp_idx--;
|
||||
env->buflen -= len;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -671,7 +671,23 @@ do { \
|
||||
************** MIPS/64 **************
|
||||
***************************************/
|
||||
#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
|
||||
#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
|
||||
#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
|
||||
/*
|
||||
* GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
|
||||
* code below, so we special case MIPS64r6 until the compiler can do better.
|
||||
*/
|
||||
#define umul_ppmm(w1, w0, u, v) \
|
||||
do { \
|
||||
__asm__ ("dmulu %0,%1,%2" \
|
||||
: "=d" ((UDItype)(w0)) \
|
||||
: "d" ((UDItype)(u)), \
|
||||
"d" ((UDItype)(v))); \
|
||||
__asm__ ("dmuhu %0,%1,%2" \
|
||||
: "=d" ((UDItype)(w1)) \
|
||||
: "d" ((UDItype)(u)), \
|
||||
"d" ((UDItype)(v))); \
|
||||
} while (0)
|
||||
#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
|
||||
#define umul_ppmm(w1, w0, u, v) \
|
||||
do { \
|
||||
typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
|
||||
|
22
lib/nlattr.c
22
lib/nlattr.c
@@ -15,7 +15,11 @@
|
||||
#include <linux/types.h>
|
||||
#include <net/netlink.h>
|
||||
|
||||
/* for these data types attribute length must be exactly given size */
|
||||
/* For these data types, attribute length should be exactly the given
|
||||
* size. However, to maintain compatibility with broken commands, if the
|
||||
* attribute length does not match the expected size a warning is emitted
|
||||
* to the user that the command is sending invalid data and needs to be fixed.
|
||||
*/
|
||||
static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
|
||||
[NLA_U8] = sizeof(u8),
|
||||
[NLA_U16] = sizeof(u16),
|
||||
@@ -28,8 +32,16 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
|
||||
};
|
||||
|
||||
static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
|
||||
[NLA_U8] = sizeof(u8),
|
||||
[NLA_U16] = sizeof(u16),
|
||||
[NLA_U32] = sizeof(u32),
|
||||
[NLA_U64] = sizeof(u64),
|
||||
[NLA_MSECS] = sizeof(u64),
|
||||
[NLA_NESTED] = NLA_HDRLEN,
|
||||
[NLA_S8] = sizeof(s8),
|
||||
[NLA_S16] = sizeof(s16),
|
||||
[NLA_S32] = sizeof(s32),
|
||||
[NLA_S64] = sizeof(s64),
|
||||
};
|
||||
|
||||
static int validate_nla_bitfield32(const struct nlattr *nla,
|
||||
@@ -69,11 +81,9 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
|
||||
|
||||
BUG_ON(pt->type > NLA_TYPE_MAX);
|
||||
|
||||
/* for data types NLA_U* and NLA_S* require exact length */
|
||||
if (nla_attr_len[pt->type]) {
|
||||
if (attrlen != nla_attr_len[pt->type])
|
||||
return -ERANGE;
|
||||
return 0;
|
||||
if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
|
||||
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
|
||||
current->comm, type);
|
||||
}
|
||||
|
||||
switch (pt->type) {
|
||||
|
@@ -116,14 +116,14 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
|
||||
int count;
|
||||
|
||||
if (v >= end)
|
||||
return -EBADMSG;
|
||||
goto bad;
|
||||
|
||||
n = *v++;
|
||||
ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40);
|
||||
if (count >= bufsize)
|
||||
return -ENOBUFS;
|
||||
buffer += count;
|
||||
bufsize -= count;
|
||||
if (bufsize == 0)
|
||||
return -ENOBUFS;
|
||||
|
||||
while (v < end) {
|
||||
num = 0;
|
||||
@@ -134,20 +134,24 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
|
||||
num = n & 0x7f;
|
||||
do {
|
||||
if (v >= end)
|
||||
return -EBADMSG;
|
||||
goto bad;
|
||||
n = *v++;
|
||||
num <<= 7;
|
||||
num |= n & 0x7f;
|
||||
} while (n & 0x80);
|
||||
}
|
||||
ret += count = snprintf(buffer, bufsize, ".%lu", num);
|
||||
buffer += count;
|
||||
if (bufsize <= count)
|
||||
if (count >= bufsize)
|
||||
return -ENOBUFS;
|
||||
buffer += count;
|
||||
bufsize -= count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
bad:
|
||||
snprintf(buffer, bufsize, "(bad)");
|
||||
return -EBADMSG;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sprint_oid);
|
||||
|
||||
|
10
lib/rbtree.c
10
lib/rbtree.c
@@ -603,6 +603,16 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
||||
}
|
||||
EXPORT_SYMBOL(rb_replace_node);
|
||||
|
||||
void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root_cached *root)
|
||||
{
|
||||
rb_replace_node(victim, new, &root->rb_root);
|
||||
|
||||
if (root->rb_leftmost == victim)
|
||||
root->rb_leftmost = new;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_replace_node_cached);
|
||||
|
||||
void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root *root)
|
||||
{
|
||||
|
@@ -435,6 +435,41 @@ loop:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
|
||||
{
|
||||
struct bpf_insn *insn;
|
||||
|
||||
insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
|
||||
if (!insn)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Due to func address being non-const, we need to
|
||||
* assemble this here.
|
||||
*/
|
||||
insn[0] = BPF_MOV64_REG(R6, R1);
|
||||
insn[1] = BPF_LD_ABS(BPF_B, 0);
|
||||
insn[2] = BPF_LD_ABS(BPF_H, 0);
|
||||
insn[3] = BPF_LD_ABS(BPF_W, 0);
|
||||
insn[4] = BPF_MOV64_REG(R7, R6);
|
||||
insn[5] = BPF_MOV64_IMM(R6, 0);
|
||||
insn[6] = BPF_MOV64_REG(R1, R7);
|
||||
insn[7] = BPF_MOV64_IMM(R2, 1);
|
||||
insn[8] = BPF_MOV64_IMM(R3, 2);
|
||||
insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
bpf_skb_vlan_push_proto.func - __bpf_call_base);
|
||||
insn[10] = BPF_MOV64_REG(R6, R7);
|
||||
insn[11] = BPF_LD_ABS(BPF_B, 0);
|
||||
insn[12] = BPF_LD_ABS(BPF_H, 0);
|
||||
insn[13] = BPF_LD_ABS(BPF_W, 0);
|
||||
insn[14] = BPF_MOV64_IMM(R0, 42);
|
||||
insn[15] = BPF_EXIT_INSN();
|
||||
|
||||
self->u.ptr.insns = insn;
|
||||
self->u.ptr.len = 16;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
|
||||
{
|
||||
unsigned int len = BPF_MAXINSNS;
|
||||
@@ -6066,6 +6101,14 @@ static struct bpf_test tests[] = {
|
||||
{},
|
||||
{ {0x1, 0x42 } },
|
||||
},
|
||||
{
|
||||
"LD_ABS with helper changing skb data",
|
||||
{ },
|
||||
INTERNAL,
|
||||
{ 0x34 },
|
||||
{ { ETH_HLEN, 42 } },
|
||||
.fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
|
||||
},
|
||||
};
|
||||
|
||||
static struct net_device dev;
|
||||
@@ -6207,9 +6250,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
/* We don't expect to fail. */
|
||||
if (*err) {
|
||||
pr_cont("FAIL to attach err=%d len=%d\n",
|
||||
pr_cont("FAIL to prog_create err=%d len=%d\n",
|
||||
*err, fprog.len);
|
||||
return NULL;
|
||||
}
|
||||
@@ -6233,6 +6275,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
|
||||
* checks.
|
||||
*/
|
||||
fp = bpf_prog_select_runtime(fp, err);
|
||||
if (*err) {
|
||||
pr_cont("FAIL to select_runtime err=%d\n", *err);
|
||||
return NULL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -6418,8 +6464,8 @@ static __init int test_bpf(void)
|
||||
pass_cnt++;
|
||||
continue;
|
||||
}
|
||||
|
||||
return err;
|
||||
err_cnt++;
|
||||
continue;
|
||||
}
|
||||
|
||||
pr_cont("jited:%u ", fp->jited);
|
||||
|
@@ -33,8 +33,9 @@
|
||||
* @head: head of timerqueue
|
||||
* @node: timer node to be added
|
||||
*
|
||||
* Adds the timer node to the timerqueue, sorted by the
|
||||
* node's expires value.
|
||||
* Adds the timer node to the timerqueue, sorted by the node's expires
|
||||
* value. Returns true if the newly added timer is the first expiring timer in
|
||||
* the queue.
|
||||
*/
|
||||
bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
|
||||
{
|
||||
@@ -70,7 +71,8 @@ EXPORT_SYMBOL_GPL(timerqueue_add);
|
||||
* @head: head of timerqueue
|
||||
* @node: timer node to be removed
|
||||
*
|
||||
* Removes the timer node from the timerqueue.
|
||||
* Removes the timer node from the timerqueue. Returns true if the queue is
|
||||
* not empty after the remove.
|
||||
*/
|
||||
bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
|
||||
{
|
||||
|
Reference in New Issue
Block a user