Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
The BTF conflicts were simple overlapping changes. The virtio_net conflict was an overlap of a fix of statistics counter, happening alongisde a move over to a bonafide statistics structure rather than counting value on the stack. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -1279,8 +1279,12 @@ static void show_special(struct audit_context *context, int *call_panic)
|
||||
break;
|
||||
case AUDIT_KERN_MODULE:
|
||||
audit_log_format(ab, "name=");
|
||||
audit_log_untrustedstring(ab, context->module.name);
|
||||
kfree(context->module.name);
|
||||
if (context->module.name) {
|
||||
audit_log_untrustedstring(ab, context->module.name);
|
||||
kfree(context->module.name);
|
||||
} else
|
||||
audit_log_format(ab, "(null)");
|
||||
|
||||
break;
|
||||
}
|
||||
audit_log_end(ab);
|
||||
@@ -2411,8 +2415,9 @@ void __audit_log_kern_module(char *name)
|
||||
{
|
||||
struct audit_context *context = audit_context();
|
||||
|
||||
context->module.name = kmalloc(strlen(name) + 1, GFP_KERNEL);
|
||||
strcpy(context->module.name, name);
|
||||
context->module.name = kstrdup(name, GFP_KERNEL);
|
||||
if (!context->module.name)
|
||||
audit_log_lost("out of memory in __audit_log_kern_module");
|
||||
context->type = AUDIT_KERN_MODULE;
|
||||
}
|
||||
|
||||
|
@@ -378,7 +378,7 @@ static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf,
|
||||
return -EINVAL;
|
||||
|
||||
value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
|
||||
if (!value_type || value_size > map->value_size)
|
||||
if (!value_type || value_size != map->value_size)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
@@ -1519,9 +1519,9 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
|
||||
{
|
||||
bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
|
||||
const struct btf_member *member;
|
||||
u32 meta_needed, last_offset;
|
||||
struct btf *btf = env->btf;
|
||||
u32 struct_size = t->size;
|
||||
u32 meta_needed;
|
||||
u16 i;
|
||||
|
||||
meta_needed = btf_type_vlen(t) * sizeof(*member);
|
||||
@@ -1534,6 +1534,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
|
||||
|
||||
btf_verifier_log_type(env, t, NULL);
|
||||
|
||||
last_offset = 0;
|
||||
for_each_member(i, t, member) {
|
||||
if (!btf_name_offset_valid(btf, member->name_off)) {
|
||||
btf_verifier_log_member(env, t, member,
|
||||
@@ -1555,6 +1556,16 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* ">" instead of ">=" because the last member could be
|
||||
* "char a[0];"
|
||||
*/
|
||||
if (last_offset > member->offset) {
|
||||
btf_verifier_log_member(env, t, member,
|
||||
"Invalid member bits_offset");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
|
||||
btf_verifier_log_member(env, t, member,
|
||||
"Memmber bits_offset exceeds its struct size");
|
||||
@@ -1562,6 +1573,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
|
||||
}
|
||||
|
||||
btf_verifier_log_member(env, t, member, NULL);
|
||||
last_offset = member->offset;
|
||||
}
|
||||
|
||||
return meta_needed;
|
||||
|
@@ -6343,7 +6343,7 @@ static u64 perf_virt_to_phys(u64 virt)
|
||||
|
||||
static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
|
||||
|
||||
static struct perf_callchain_entry *
|
||||
struct perf_callchain_entry *
|
||||
perf_callchain(struct perf_event *event, struct pt_regs *regs)
|
||||
{
|
||||
bool kernel = !event->attr.exclude_callchain_kernel;
|
||||
@@ -6382,7 +6382,9 @@ void perf_prepare_sample(struct perf_event_header *header,
|
||||
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
|
||||
int size = 1;
|
||||
|
||||
data->callchain = perf_callchain(event, regs);
|
||||
if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
|
||||
data->callchain = perf_callchain(event, regs);
|
||||
|
||||
size += data->callchain->nr;
|
||||
|
||||
header->size += size * sizeof(u64);
|
||||
@@ -7335,6 +7337,10 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
|
||||
struct file *file, unsigned long offset,
|
||||
unsigned long size)
|
||||
{
|
||||
/* d_inode(NULL) won't be equal to any mapped user-space file */
|
||||
if (!filter->path.dentry)
|
||||
return false;
|
||||
|
||||
if (d_inode(filter->path.dentry) != file_inode(file))
|
||||
return false;
|
||||
|
||||
|
@@ -312,10 +312,8 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||
|
||||
if (vma) {
|
||||
vma->vm_mm = mm;
|
||||
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
||||
}
|
||||
if (vma)
|
||||
vma_init(vma, mm);
|
||||
return vma;
|
||||
}
|
||||
|
||||
|
@@ -325,8 +325,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
|
||||
task = create->result;
|
||||
if (!IS_ERR(task)) {
|
||||
static const struct sched_param param = { .sched_priority = 0 };
|
||||
char name[TASK_COMM_LEN];
|
||||
|
||||
vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
|
||||
/*
|
||||
* task is already visible to other tasks, so updating
|
||||
* COMM must be protected.
|
||||
*/
|
||||
vsnprintf(name, sizeof(name), namefmt, args);
|
||||
set_task_comm(task, name);
|
||||
/*
|
||||
* root may have changed our (kthreadd's) priority or CPU mask.
|
||||
* The kernel thread should not inherit these properties.
|
||||
|
@@ -1465,6 +1465,29 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
|
||||
rt_mutex_postunlock(&wake_q);
|
||||
}
|
||||
|
||||
static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
||||
rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
/**
|
||||
* rt_mutex_lock_nested - lock a rt_mutex
|
||||
*
|
||||
* @lock: the rt_mutex to be locked
|
||||
* @subclass: the lockdep subclass
|
||||
*/
|
||||
void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
|
||||
{
|
||||
__rt_mutex_lock(lock, subclass);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
/**
|
||||
* rt_mutex_lock - lock a rt_mutex
|
||||
*
|
||||
@@ -1472,12 +1495,10 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
|
||||
*/
|
||||
void __sched rt_mutex_lock(struct rt_mutex *lock)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
||||
rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
|
||||
__rt_mutex_lock(lock, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* rt_mutex_lock_interruptible - lock a rt_mutex interruptible
|
||||
|
@@ -176,10 +176,27 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
||||
unsigned long pfn, pgoff, order;
|
||||
pgprot_t pgprot = PAGE_KERNEL;
|
||||
int error, nid, is_ram;
|
||||
struct dev_pagemap *conflict_pgmap;
|
||||
|
||||
align_start = res->start & ~(SECTION_SIZE - 1);
|
||||
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
|
||||
- align_start;
|
||||
align_end = align_start + align_size - 1;
|
||||
|
||||
conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
|
||||
if (conflict_pgmap) {
|
||||
dev_WARN(dev, "Conflicting mapping in same section\n");
|
||||
put_dev_pagemap(conflict_pgmap);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
|
||||
if (conflict_pgmap) {
|
||||
dev_WARN(dev, "Conflicting mapping in same section\n");
|
||||
put_dev_pagemap(conflict_pgmap);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
is_ram = region_intersects(align_start, align_size,
|
||||
IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
|
||||
|
||||
@@ -199,7 +216,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
||||
|
||||
mutex_lock(&pgmap_lock);
|
||||
error = 0;
|
||||
align_end = align_start + align_size - 1;
|
||||
|
||||
foreach_order_pgoff(res, order, pgoff) {
|
||||
error = __radix_tree_insert(&pgmap_radix,
|
||||
@@ -305,7 +321,7 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap);
|
||||
|
||||
#ifdef CONFIG_DEV_PAGEMAP_OPS
|
||||
DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
|
||||
EXPORT_SYMBOL_GPL(devmap_managed_key);
|
||||
EXPORT_SYMBOL(devmap_managed_key);
|
||||
static atomic_t devmap_enable;
|
||||
|
||||
/*
|
||||
@@ -346,5 +362,5 @@ void __put_devmap_managed_page(struct page *page)
|
||||
} else if (!count)
|
||||
__put_page(page);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__put_devmap_managed_page);
|
||||
EXPORT_SYMBOL(__put_devmap_managed_page);
|
||||
#endif /* CONFIG_DEV_PAGEMAP_OPS */
|
||||
|
@@ -2090,8 +2090,14 @@ retry:
|
||||
sub_rq_bw(&next_task->dl, &rq->dl);
|
||||
set_task_cpu(next_task, later_rq->cpu);
|
||||
add_rq_bw(&next_task->dl, &later_rq->dl);
|
||||
|
||||
/*
|
||||
* Update the later_rq clock here, because the clock is used
|
||||
* by the cpufreq_update_util() inside __add_running_bw().
|
||||
*/
|
||||
update_rq_clock(later_rq);
|
||||
add_running_bw(&next_task->dl, &later_rq->dl);
|
||||
activate_task(later_rq, next_task, 0);
|
||||
activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
|
||||
ret = 1;
|
||||
|
||||
resched_curr(later_rq);
|
||||
|
@@ -836,6 +836,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||
* can be time-consuming. Try to avoid it when possible.
|
||||
*/
|
||||
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||
if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
|
||||
rt_rq->rt_runtime = rt_b->rt_runtime;
|
||||
skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
|
||||
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
if (skip)
|
||||
|
@@ -47,7 +47,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
||||
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
|
||||
printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
|
||||
}
|
||||
if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
|
||||
if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
|
||||
printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
|
||||
}
|
||||
|
||||
|
@@ -260,6 +260,15 @@ retry:
|
||||
err = 0;
|
||||
__cpu_stop_queue_work(stopper1, work1, &wakeq);
|
||||
__cpu_stop_queue_work(stopper2, work2, &wakeq);
|
||||
/*
|
||||
* The waking up of stopper threads has to happen
|
||||
* in the same scheduling context as the queueing.
|
||||
* Otherwise, there is a possibility of one of the
|
||||
* above stoppers being woken up by another CPU,
|
||||
* and preempting us. This will cause us to n ot
|
||||
* wake up the other stopper forever.
|
||||
*/
|
||||
preempt_disable();
|
||||
unlock:
|
||||
raw_spin_unlock(&stopper2->lock);
|
||||
raw_spin_unlock_irq(&stopper1->lock);
|
||||
@@ -271,7 +280,6 @@ unlock:
|
||||
}
|
||||
|
||||
if (!err) {
|
||||
preempt_disable();
|
||||
wake_up_q(&wakeq);
|
||||
preempt_enable();
|
||||
}
|
||||
|
@@ -3226,6 +3226,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
|
||||
return !atomic_read(&buffer->record_disabled);
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_record_is_set_on - return true if the ring buffer is set writable
|
||||
* @buffer: The ring buffer to see if write is set enabled
|
||||
*
|
||||
* Returns true if the ring buffer is set writable by ring_buffer_record_on().
|
||||
* Note that this does NOT mean it is in a writable state.
|
||||
*
|
||||
* It may return true when the ring buffer has been disabled by
|
||||
* ring_buffer_record_disable(), as that is a temporary disabling of
|
||||
* the ring buffer.
|
||||
*/
|
||||
int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
|
||||
{
|
||||
return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
|
||||
* @buffer: The ring buffer to stop writes to.
|
||||
|
@@ -1373,6 +1373,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
|
||||
arch_spin_lock(&tr->max_lock);
|
||||
|
||||
/* Inherit the recordable setting from trace_buffer */
|
||||
if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
|
||||
ring_buffer_record_on(tr->max_buffer.buffer);
|
||||
else
|
||||
ring_buffer_record_off(tr->max_buffer.buffer);
|
||||
|
||||
swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
|
||||
|
||||
__update_max_tr(tr, tsk, cpu);
|
||||
|
@@ -679,6 +679,8 @@ event_trigger_callback(struct event_command *cmd_ops,
|
||||
goto out_free;
|
||||
|
||||
out_reg:
|
||||
/* Up the trigger_data count to make sure reg doesn't free it on failure */
|
||||
event_trigger_init(trigger_ops, trigger_data);
|
||||
ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
|
||||
/*
|
||||
* The above returns on success the # of functions enabled,
|
||||
@@ -686,11 +688,13 @@ event_trigger_callback(struct event_command *cmd_ops,
|
||||
* Consider no functions a failure too.
|
||||
*/
|
||||
if (!ret) {
|
||||
cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
|
||||
ret = -ENOENT;
|
||||
goto out_free;
|
||||
} else if (ret < 0)
|
||||
goto out_free;
|
||||
ret = 0;
|
||||
} else if (ret > 0)
|
||||
ret = 0;
|
||||
|
||||
/* Down the counter of trigger_data or free it if not used anymore */
|
||||
event_trigger_free(trigger_ops, trigger_data);
|
||||
out:
|
||||
return ret;
|
||||
|
||||
@@ -1416,6 +1420,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Up the trigger_data count to make sure nothing frees it on failure */
|
||||
event_trigger_init(trigger_ops, trigger_data);
|
||||
|
||||
if (trigger) {
|
||||
number = strsep(&trigger, ":");
|
||||
|
||||
@@ -1466,6 +1473,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
|
||||
goto out_disable;
|
||||
/* Just return zero, not the number of enabled functions */
|
||||
ret = 0;
|
||||
event_trigger_free(trigger_ops, trigger_data);
|
||||
out:
|
||||
return ret;
|
||||
|
||||
@@ -1476,7 +1484,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
|
||||
out_free:
|
||||
if (cmd_ops->set_filter)
|
||||
cmd_ops->set_filter(NULL, trigger_data, NULL);
|
||||
kfree(trigger_data);
|
||||
event_trigger_free(trigger_ops, trigger_data);
|
||||
kfree(enable_data);
|
||||
goto out;
|
||||
}
|
||||
|
@@ -400,11 +400,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
|
||||
static int
|
||||
enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
|
||||
{
|
||||
struct event_file_link *link = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (file) {
|
||||
struct event_file_link *link;
|
||||
|
||||
link = kmalloc(sizeof(*link), GFP_KERNEL);
|
||||
if (!link) {
|
||||
ret = -ENOMEM;
|
||||
@@ -424,6 +423,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
|
||||
else
|
||||
ret = enable_kprobe(&tk->rp.kp);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
if (file) {
|
||||
/* Notice the if is true on not WARN() */
|
||||
if (!WARN_ON_ONCE(!link))
|
||||
list_del_rcu(&link->list);
|
||||
kfree(link);
|
||||
tk->tp.flags &= ~TP_FLAG_TRACE;
|
||||
} else {
|
||||
tk->tp.flags &= ~TP_FLAG_PROFILE;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
Reference in New Issue
Block a user