Merge tag 'v4.11-rc7' into drm-next
Backmerge Linux 4.11-rc7 from Linus tree, to fix some conflicts that were causing problems with the rerere cache in drm-tip.
This commit is contained in:
@@ -160,7 +160,6 @@ static LIST_HEAD(audit_freelist);
|
||||
|
||||
/* queue msgs to send via kauditd_task */
|
||||
static struct sk_buff_head audit_queue;
|
||||
static void kauditd_hold_skb(struct sk_buff *skb);
|
||||
/* queue msgs due to temporary unicast send problems */
|
||||
static struct sk_buff_head audit_retry_queue;
|
||||
/* queue msgs waiting for new auditd connection */
|
||||
@@ -453,30 +452,6 @@ static void auditd_set(int pid, u32 portid, struct net *net)
|
||||
spin_unlock_irqrestore(&auditd_conn.lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* auditd_reset - Disconnect the auditd connection
|
||||
*
|
||||
* Description:
|
||||
* Break the auditd/kauditd connection and move all the queued records into the
|
||||
* hold queue in case auditd reconnects.
|
||||
*/
|
||||
static void auditd_reset(void)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* if it isn't already broken, break the connection */
|
||||
rcu_read_lock();
|
||||
if (auditd_conn.pid)
|
||||
auditd_set(0, 0, NULL);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* flush all of the main and retry queues to the hold queue */
|
||||
while ((skb = skb_dequeue(&audit_retry_queue)))
|
||||
kauditd_hold_skb(skb);
|
||||
while ((skb = skb_dequeue(&audit_queue)))
|
||||
kauditd_hold_skb(skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* kauditd_print_skb - Print the audit record to the ring buffer
|
||||
* @skb: audit record
|
||||
@@ -505,9 +480,6 @@ static void kauditd_rehold_skb(struct sk_buff *skb)
|
||||
{
|
||||
/* put the record back in the queue at the same place */
|
||||
skb_queue_head(&audit_hold_queue, skb);
|
||||
|
||||
/* fail the auditd connection */
|
||||
auditd_reset();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -544,9 +516,6 @@ static void kauditd_hold_skb(struct sk_buff *skb)
|
||||
/* we have no other options - drop the message */
|
||||
audit_log_lost("kauditd hold queue overflow");
|
||||
kfree_skb(skb);
|
||||
|
||||
/* fail the auditd connection */
|
||||
auditd_reset();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -566,6 +535,30 @@ static void kauditd_retry_skb(struct sk_buff *skb)
|
||||
skb_queue_tail(&audit_retry_queue, skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* auditd_reset - Disconnect the auditd connection
|
||||
*
|
||||
* Description:
|
||||
* Break the auditd/kauditd connection and move all the queued records into the
|
||||
* hold queue in case auditd reconnects.
|
||||
*/
|
||||
static void auditd_reset(void)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* if it isn't already broken, break the connection */
|
||||
rcu_read_lock();
|
||||
if (auditd_conn.pid)
|
||||
auditd_set(0, 0, NULL);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* flush all of the main and retry queues to the hold queue */
|
||||
while ((skb = skb_dequeue(&audit_retry_queue)))
|
||||
kauditd_hold_skb(skb);
|
||||
while ((skb = skb_dequeue(&audit_queue)))
|
||||
kauditd_hold_skb(skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* auditd_send_unicast_skb - Send a record via unicast to auditd
|
||||
* @skb: audit record
|
||||
@@ -758,6 +751,7 @@ static int kauditd_thread(void *dummy)
|
||||
NULL, kauditd_rehold_skb);
|
||||
if (rc < 0) {
|
||||
sk = NULL;
|
||||
auditd_reset();
|
||||
goto main_queue;
|
||||
}
|
||||
|
||||
@@ -767,6 +761,7 @@ static int kauditd_thread(void *dummy)
|
||||
NULL, kauditd_hold_skb);
|
||||
if (rc < 0) {
|
||||
sk = NULL;
|
||||
auditd_reset();
|
||||
goto main_queue;
|
||||
}
|
||||
|
||||
@@ -775,16 +770,18 @@ main_queue:
|
||||
* unicast, dump failed record sends to the retry queue; if
|
||||
* sk == NULL due to previous failures we will just do the
|
||||
* multicast send and move the record to the retry queue */
|
||||
kauditd_send_queue(sk, portid, &audit_queue, 1,
|
||||
kauditd_send_multicast_skb,
|
||||
kauditd_retry_skb);
|
||||
rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
|
||||
kauditd_send_multicast_skb,
|
||||
kauditd_retry_skb);
|
||||
if (sk == NULL || rc < 0)
|
||||
auditd_reset();
|
||||
sk = NULL;
|
||||
|
||||
/* drop our netns reference, no auditd sends past this line */
|
||||
if (net) {
|
||||
put_net(net);
|
||||
net = NULL;
|
||||
}
|
||||
sk = NULL;
|
||||
|
||||
/* we have processed all the queues so wake everyone */
|
||||
wake_up(&audit_backlog_wait);
|
||||
|
@@ -1162,12 +1162,12 @@ out:
|
||||
LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
|
||||
off = IMM;
|
||||
load_word:
|
||||
/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
|
||||
* only appearing in the programs where ctx ==
|
||||
* skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
|
||||
* == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
|
||||
* internal BPF verifier will check that BPF_R6 ==
|
||||
* ctx.
|
||||
/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
|
||||
* appearing in the programs where ctx == skb
|
||||
* (see may_access_skb() in the verifier). All programs
|
||||
* keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
|
||||
* bpf_convert_filter() saves it in BPF_R6, internal BPF
|
||||
* verifier will check that BPF_R6 == ctx.
|
||||
*
|
||||
* BPF_ABS and BPF_IND are wrappers of function calls,
|
||||
* so they scratch BPF_R1-BPF_R5 registers, preserve
|
||||
|
@@ -2425,11 +2425,12 @@ ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
|
||||
tsk = tsk->group_leader;
|
||||
|
||||
/*
|
||||
* Workqueue threads may acquire PF_NO_SETAFFINITY and become
|
||||
* trapped in a cpuset, or RT worker may be born in a cgroup
|
||||
* with no rt_runtime allocated. Just say no.
|
||||
* kthreads may acquire PF_NO_SETAFFINITY during initialization.
|
||||
* If userland migrates such a kthread to a non-root cgroup, it can
|
||||
* become trapped in a cpuset, or RT kthread may be born in a
|
||||
* cgroup with no rt_runtime allocated. Just say no.
|
||||
*/
|
||||
if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
|
||||
if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock_rcu;
|
||||
}
|
||||
|
@@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
|
||||
struct cpumask *
|
||||
irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
{
|
||||
int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec;
|
||||
int n, nodes, cpus_per_vec, extra_vecs, curvec;
|
||||
int affv = nvecs - affd->pre_vectors - affd->post_vectors;
|
||||
int last_affv = affv + affd->pre_vectors;
|
||||
nodemask_t nodemsk = NODE_MASK_NONE;
|
||||
@@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Spread the vectors per node */
|
||||
vecs_per_node = affv / nodes;
|
||||
/* Account for rounding errors */
|
||||
extra_vecs = affv - (nodes * vecs_per_node);
|
||||
|
||||
for_each_node_mask(n, nodemsk) {
|
||||
int ncpus, v, vecs_to_assign = vecs_per_node;
|
||||
int ncpus, v, vecs_to_assign, vecs_per_node;
|
||||
|
||||
/* Spread the vectors per node */
|
||||
vecs_per_node = (affv - curvec) / nodes;
|
||||
|
||||
/* Get the cpus on this node which are in the mask */
|
||||
cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
|
||||
|
||||
/* Calculate the number of cpus per vector */
|
||||
ncpus = cpumask_weight(nmsk);
|
||||
vecs_to_assign = min(vecs_per_node, ncpus);
|
||||
|
||||
/* Account for rounding errors */
|
||||
extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
|
||||
|
||||
for (v = 0; curvec < last_affv && v < vecs_to_assign;
|
||||
curvec++, v++) {
|
||||
@@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
/* Account for extra vectors to compensate rounding errors */
|
||||
if (extra_vecs) {
|
||||
cpus_per_vec++;
|
||||
if (!--extra_vecs)
|
||||
vecs_per_node++;
|
||||
--extra_vecs;
|
||||
}
|
||||
irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
|
||||
}
|
||||
|
||||
if (curvec >= last_affv)
|
||||
break;
|
||||
--nodes;
|
||||
}
|
||||
|
||||
done:
|
||||
|
@@ -20,6 +20,7 @@
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/cgroup.h>
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
static DEFINE_SPINLOCK(kthread_create_lock);
|
||||
@@ -225,6 +226,7 @@ static int kthread(void *_create)
|
||||
|
||||
ret = -EINTR;
|
||||
if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
|
||||
cgroup_kthread_ready();
|
||||
__kthread_parkme(self);
|
||||
ret = threadfn(data);
|
||||
}
|
||||
@@ -538,6 +540,7 @@ int kthreadd(void *unused)
|
||||
set_mems_allowed(node_states[N_MEMORY]);
|
||||
|
||||
current->flags |= PF_NOFREEZE;
|
||||
cgroup_init_kthreadd();
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
@@ -3755,23 +3755,24 @@ static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
|
||||
ftrace_probe_registered = 1;
|
||||
}
|
||||
|
||||
static void __disable_ftrace_function_probe(void)
|
||||
static bool __disable_ftrace_function_probe(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!ftrace_probe_registered)
|
||||
return;
|
||||
return false;
|
||||
|
||||
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
|
||||
struct hlist_head *hhd = &ftrace_func_hash[i];
|
||||
if (hhd->first)
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* no more funcs left */
|
||||
ftrace_shutdown(&trace_probe_ops, 0);
|
||||
|
||||
ftrace_probe_registered = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -3901,6 +3902,7 @@ static void
|
||||
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||
void *data, int flags)
|
||||
{
|
||||
struct ftrace_ops_hash old_hash_ops;
|
||||
struct ftrace_func_entry *rec_entry;
|
||||
struct ftrace_func_probe *entry;
|
||||
struct ftrace_func_probe *p;
|
||||
@@ -3912,6 +3914,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||
struct hlist_node *tmp;
|
||||
char str[KSYM_SYMBOL_LEN];
|
||||
int i, ret;
|
||||
bool disabled;
|
||||
|
||||
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
|
||||
func_g.search = NULL;
|
||||
@@ -3930,6 +3933,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||
|
||||
mutex_lock(&trace_probe_ops.func_hash->regex_lock);
|
||||
|
||||
old_hash_ops.filter_hash = old_hash;
|
||||
/* Probes only have filters */
|
||||
old_hash_ops.notrace_hash = NULL;
|
||||
|
||||
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
|
||||
if (!hash)
|
||||
/* Hmm, should report this somehow */
|
||||
@@ -3967,12 +3974,17 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||
}
|
||||
}
|
||||
mutex_lock(&ftrace_lock);
|
||||
__disable_ftrace_function_probe();
|
||||
disabled = __disable_ftrace_function_probe();
|
||||
/*
|
||||
* Remove after the disable is called. Otherwise, if the last
|
||||
* probe is removed, a null hash means *all enabled*.
|
||||
*/
|
||||
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
|
||||
|
||||
/* still need to update the function call sites */
|
||||
if (ftrace_enabled && !disabled)
|
||||
ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
|
||||
&old_hash_ops);
|
||||
synchronize_sched();
|
||||
if (!ret)
|
||||
free_ftrace_hash_rcu(old_hash);
|
||||
|
Reference in New Issue
Block a user