Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case BPF_TYPE_PROG:
|
||||
atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
|
||||
raw = bpf_prog_inc(raw);
|
||||
break;
|
||||
case BPF_TYPE_MAP:
|
||||
bpf_map_inc(raw, true);
|
||||
raw = bpf_map_inc(raw, true);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
@@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
|
||||
goto out;
|
||||
|
||||
raw = bpf_any_get(inode->i_private, *type);
|
||||
touch_atime(&path);
|
||||
if (!IS_ERR(raw))
|
||||
touch_atime(&path);
|
||||
|
||||
path_put(&path);
|
||||
return raw;
|
||||
|
@@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
|
||||
return f.file->private_data;
|
||||
}
|
||||
|
||||
void bpf_map_inc(struct bpf_map *map, bool uref)
|
||||
/* prog's and map's refcnt limit */
|
||||
#define BPF_MAX_REFCNT 32768
|
||||
|
||||
struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
|
||||
{
|
||||
atomic_inc(&map->refcnt);
|
||||
if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
|
||||
atomic_dec(&map->refcnt);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
if (uref)
|
||||
atomic_inc(&map->usercnt);
|
||||
return map;
|
||||
}
|
||||
|
||||
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
|
||||
@@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
|
||||
if (IS_ERR(map))
|
||||
return map;
|
||||
|
||||
bpf_map_inc(map, true);
|
||||
map = bpf_map_inc(map, true);
|
||||
fdput(f);
|
||||
|
||||
return map;
|
||||
@@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
|
||||
return f.file->private_data;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
|
||||
{
|
||||
if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
|
||||
atomic_dec(&prog->aux->refcnt);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
return prog;
|
||||
}
|
||||
|
||||
/* called by sockets/tracing/seccomp before attaching program to an event
|
||||
* pairs with bpf_prog_put()
|
||||
*/
|
||||
@@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
|
||||
if (IS_ERR(prog))
|
||||
return prog;
|
||||
|
||||
atomic_inc(&prog->aux->refcnt);
|
||||
prog = bpf_prog_inc(prog);
|
||||
fdput(f);
|
||||
|
||||
return prog;
|
||||
|
@@ -239,16 +239,6 @@ static const char * const reg_type_str[] = {
|
||||
[CONST_IMM] = "imm",
|
||||
};
|
||||
|
||||
static const struct {
|
||||
int map_type;
|
||||
int func_id;
|
||||
} func_limit[] = {
|
||||
{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
|
||||
{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
|
||||
{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
|
||||
{BPF_MAP_TYPE_STACK_TRACE, BPF_FUNC_get_stackid},
|
||||
};
|
||||
|
||||
static void print_verifier_state(struct verifier_env *env)
|
||||
{
|
||||
enum bpf_reg_type t;
|
||||
@@ -921,27 +911,52 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
|
||||
|
||||
static int check_map_func_compatibility(struct bpf_map *map, int func_id)
|
||||
{
|
||||
bool bool_map, bool_func;
|
||||
int i;
|
||||
|
||||
if (!map)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
|
||||
bool_map = (map->map_type == func_limit[i].map_type);
|
||||
bool_func = (func_id == func_limit[i].func_id);
|
||||
/* only when map & func pair match it can continue.
|
||||
* don't allow any other map type to be passed into
|
||||
* the special func;
|
||||
*/
|
||||
if (bool_func && bool_map != bool_func) {
|
||||
verbose("cannot pass map_type %d into func %d\n",
|
||||
map->map_type, func_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* We need a two way check, first is from map perspective ... */
|
||||
switch (map->map_type) {
|
||||
case BPF_MAP_TYPE_PROG_ARRAY:
|
||||
if (func_id != BPF_FUNC_tail_call)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
|
||||
if (func_id != BPF_FUNC_perf_event_read &&
|
||||
func_id != BPF_FUNC_perf_event_output)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_MAP_TYPE_STACK_TRACE:
|
||||
if (func_id != BPF_FUNC_get_stackid)
|
||||
goto error;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* ... and second from the function itself. */
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_tail_call:
|
||||
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_FUNC_perf_event_read:
|
||||
case BPF_FUNC_perf_event_output:
|
||||
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_FUNC_get_stackid:
|
||||
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
|
||||
goto error;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
error:
|
||||
verbose("cannot pass map_type %d into func %d\n",
|
||||
map->map_type, func_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int check_call(struct verifier_env *env, int func_id)
|
||||
@@ -2049,15 +2064,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
/* remember this map */
|
||||
env->used_maps[env->used_map_cnt++] = map;
|
||||
|
||||
/* hold the map. If the program is rejected by verifier,
|
||||
* the map will be released by release_maps() or it
|
||||
* will be used by the valid program until it's unloaded
|
||||
* and all maps are released in free_bpf_prog_info()
|
||||
*/
|
||||
bpf_map_inc(map, false);
|
||||
map = bpf_map_inc(map, false);
|
||||
if (IS_ERR(map)) {
|
||||
fdput(f);
|
||||
return PTR_ERR(map);
|
||||
}
|
||||
env->used_maps[env->used_map_cnt++] = map;
|
||||
|
||||
fdput(f);
|
||||
next_insn:
|
||||
insn++;
|
||||
|
@@ -353,7 +353,7 @@ static struct srcu_struct pmus_srcu;
|
||||
* 1 - disallow cpu events for unpriv
|
||||
* 2 - disallow kernel profiling for unpriv
|
||||
*/
|
||||
int sysctl_perf_event_paranoid __read_mostly = 1;
|
||||
int sysctl_perf_event_paranoid __read_mostly = 2;
|
||||
|
||||
/* Minimum for 512 kiB + 1 user control page */
|
||||
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
|
||||
|
@@ -596,17 +596,8 @@ bool sched_can_stop_tick(struct rq *rq)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* FIFO realtime policy runs the highest priority task (after DEADLINE).
|
||||
* Other runnable tasks are of a lower priority. The scheduler tick
|
||||
* isn't needed.
|
||||
*/
|
||||
fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
|
||||
if (fifo_nr_running)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Round-robin realtime tasks time slice with other tasks at the same
|
||||
* realtime priority.
|
||||
* If there are more than one RR tasks, we need the tick to effect the
|
||||
* actual RR behaviour.
|
||||
*/
|
||||
if (rq->rt.rr_nr_running) {
|
||||
if (rq->rt.rr_nr_running == 1)
|
||||
@@ -615,8 +606,20 @@ bool sched_can_stop_tick(struct rq *rq)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Normal multitasking need periodic preemption checks */
|
||||
if (rq->cfs.nr_running > 1)
|
||||
/*
|
||||
* If there's no RR tasks, but FIFO tasks, we can skip the tick, no
|
||||
* forced preemption between FIFO tasks.
|
||||
*/
|
||||
fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
|
||||
if (fifo_nr_running)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
|
||||
* if there's more than one we need the tick for involuntary
|
||||
* preemption.
|
||||
*/
|
||||
if (rq->nr_running > 1)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@@ -1394,6 +1394,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
|
||||
!cpumask_test_cpu(later_rq->cpu,
|
||||
&task->cpus_allowed) ||
|
||||
task_running(rq, task) ||
|
||||
!dl_task(task) ||
|
||||
!task_on_rq_queued(task))) {
|
||||
double_unlock_balance(rq, later_rq);
|
||||
later_rq = NULL;
|
||||
|
@@ -3030,7 +3030,14 @@ static int idle_balance(struct rq *this_rq);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
|
||||
static inline void update_load_avg(struct sched_entity *se, int not_used)
|
||||
{
|
||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
|
||||
cpufreq_trigger_update(rq_clock(rq));
|
||||
}
|
||||
|
||||
static inline void
|
||||
enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
|
||||
static inline void
|
||||
|
@@ -1729,6 +1729,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
||||
!cpumask_test_cpu(lowest_rq->cpu,
|
||||
tsk_cpus_allowed(task)) ||
|
||||
task_running(rq, task) ||
|
||||
!rt_task(task) ||
|
||||
!task_on_rq_queued(task))) {
|
||||
|
||||
double_unlock_balance(rq, lowest_rq);
|
||||
|
@@ -2095,8 +2095,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
|
||||
trace_create_file("filter", 0644, file->dir, file,
|
||||
&ftrace_event_filter_fops);
|
||||
|
||||
trace_create_file("trigger", 0644, file->dir, file,
|
||||
&event_trigger_fops);
|
||||
/*
|
||||
* Only event directories that can be enabled should have
|
||||
* triggers.
|
||||
*/
|
||||
if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
|
||||
trace_create_file("trigger", 0644, file->dir, file,
|
||||
&event_trigger_fops);
|
||||
|
||||
trace_create_file("format", 0444, file->dir, call,
|
||||
&ftrace_event_format_fops);
|
||||
|
Reference in New Issue
Block a user