Merge tag 'perf-urgent-for-mingo-5.2-20190528' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent

Pull perf/urgent fixes:

BPF:

  Jiri Olsa:

  - Fixup determination of end of kernel map, to avoid having BPF programs,
    that are after the kernel headers and just before module texts mixed up in
    the kernel map.

tools UAPI header copies:

  Arnaldo Carvalho de Melo:

  - Update copy of files related to new fspick, fsmount, fsconfig, fsopen,
    move_mount and open_tree syscalls.

  - Sync cpufeatures.h, sched.h, fs.h, drm.h, i915_drm.h and kvm.h headers.

Namespaces:

  Namhyung Kim:

  - Add missing byte swap ops for namespace events when processing records from
    perf.data files that could have been recorded in a arch with a different
    endianness.

  - Fix access to the thread namespaces list by using the namespaces_lock.

perf data:

  Shawn Landden:

  - Fix 'strncat may truncate' build failure with recent gcc.

s/390

  Thomas Richter:

  - Fix s390 missing module symbol and warning for non-root users in 'perf record'.

arm64:

  Vitaly Chikunov:

  - Fix mksyscalltbl when system kernel headers are ahead of the kernel.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2019-05-28 23:16:22 +02:00
124 changed files with 1488 additions and 591 deletions

View File

@@ -164,6 +164,9 @@ static void dev_map_free(struct bpf_map *map)
bpf_clear_redirect_map(map);
synchronize_rcu();
/* Make sure prior __dev_map_entry_free() have completed. */
rcu_barrier();
/* To ensure all pending flush operations have completed wait for flush
* bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
* Because the above synchronize_rcu() ensures the map is disconnected

View File

@@ -527,18 +527,30 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
return insn - insn_buf;
}
static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
void *key, const bool mark)
{
struct htab_elem *l = __htab_map_lookup_elem(map, key);
if (l) {
bpf_lru_node_set_ref(&l->lru_node);
if (mark)
bpf_lru_node_set_ref(&l->lru_node);
return l->key + round_up(map->key_size, 8);
}
return NULL;
}
static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
{
return __htab_lru_map_lookup_elem(map, key, true);
}
static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
{
return __htab_lru_map_lookup_elem(map, key, false);
}
static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
struct bpf_insn *insn_buf)
{
@@ -1250,6 +1262,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
.map_lookup_elem = htab_lru_map_lookup_elem,
.map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
.map_update_elem = htab_lru_map_update_elem,
.map_delete_elem = htab_lru_map_delete_elem,
.map_gen_lookup = htab_lru_map_gen_lookup,
@@ -1281,7 +1294,6 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l;
void __percpu *pptr;
int ret = -ENOENT;
@@ -1297,8 +1309,9 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
l = __htab_map_lookup_elem(map, key);
if (!l)
goto out;
if (htab_is_lru(htab))
bpf_lru_node_set_ref(&l->lru_node);
/* We do not mark LRU map element here in order to not mess up
* eviction heuristics when user space does a map walk.
*/
pptr = htab_elem_get_ptr(l, map->key_size);
for_each_possible_cpu(cpu) {
bpf_long_memcpy(value + off,

View File

@@ -518,7 +518,7 @@ out:
static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
{
struct bpf_prog *prog;
int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
int ret = inode_permission(inode, MAY_READ);
if (ret)
return ERR_PTR(ret);

View File

@@ -808,7 +808,10 @@ static int map_lookup_elem(union bpf_attr *attr)
err = map->ops->map_peek_elem(map, value);
} else {
rcu_read_lock();
ptr = map->ops->map_lookup_elem(map, key);
if (map->ops->map_lookup_elem_sys_only)
ptr = map->ops->map_lookup_elem_sys_only(map, key);
else
ptr = map->ops->map_lookup_elem(map, key);
if (IS_ERR(ptr)) {
err = PTR_ERR(ptr);
} else if (!ptr) {

View File

@@ -1297,7 +1297,8 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
}
#ifdef CONFIG_MODULES
int bpf_event_notify(struct notifier_block *nb, unsigned long op, void *module)
static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
void *module)
{
struct bpf_trace_module *btm, *tmp;
struct module *mod = module;
@@ -1336,7 +1337,7 @@ static struct notifier_block bpf_module_nb = {
.notifier_call = bpf_event_notify,
};
int __init bpf_event_init(void)
static int __init bpf_event_init(void)
{
register_module_notifier(&bpf_module_nb);
return 0;