Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/renesas/ravb_main.c kernel/bpf/syscall.c net/ipv4/ipmr.c All three conflicts were cases of overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -28,11 +28,17 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
||||
attr->value_size == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1))
|
||||
/* if value_size is bigger, the user space won't be able to
|
||||
* access the elements.
|
||||
*/
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
elem_size = round_up(attr->value_size, 8);
|
||||
|
||||
/* check round_up into zero and u32 overflow */
|
||||
if (elem_size == 0 ||
|
||||
attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size)
|
||||
attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
array_size = sizeof(*array) + attr->max_entries * elem_size;
|
||||
@@ -105,7 +111,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
/* all elements already exist */
|
||||
return -EEXIST;
|
||||
|
||||
memcpy(array->value + array->elem_size * index, value, array->elem_size);
|
||||
memcpy(array->value + array->elem_size * index, value, map->value_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -64,12 +64,35 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
||||
*/
|
||||
goto free_htab;
|
||||
|
||||
err = -ENOMEM;
|
||||
if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
|
||||
MAX_BPF_STACK - sizeof(struct htab_elem))
|
||||
/* if value_size is bigger, the user space won't be able to
|
||||
* access the elements via bpf syscall. This check also makes
|
||||
* sure that the elem_size doesn't overflow and it's
|
||||
* kmalloc-able later in htab_map_update_elem()
|
||||
*/
|
||||
goto free_htab;
|
||||
|
||||
htab->elem_size = sizeof(struct htab_elem) +
|
||||
round_up(htab->map.key_size, 8) +
|
||||
htab->map.value_size;
|
||||
|
||||
/* prevent zero size kmalloc and check for u32 overflow */
|
||||
if (htab->n_buckets == 0 ||
|
||||
htab->n_buckets > U32_MAX / sizeof(struct hlist_head))
|
||||
goto free_htab;
|
||||
|
||||
if ((u64) htab->n_buckets * sizeof(struct hlist_head) +
|
||||
(u64) htab->elem_size * htab->map.max_entries >=
|
||||
U32_MAX - PAGE_SIZE)
|
||||
/* make sure page count doesn't overflow */
|
||||
goto free_htab;
|
||||
|
||||
htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
|
||||
htab->elem_size * htab->map.max_entries,
|
||||
PAGE_SIZE) >> PAGE_SHIFT;
|
||||
|
||||
err = -ENOMEM;
|
||||
htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head),
|
||||
GFP_USER | __GFP_NOWARN);
|
||||
|
||||
@@ -85,13 +108,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
||||
raw_spin_lock_init(&htab->lock);
|
||||
htab->count = 0;
|
||||
|
||||
htab->elem_size = sizeof(struct htab_elem) +
|
||||
round_up(htab->map.key_size, 8) +
|
||||
htab->map.value_size;
|
||||
|
||||
htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
|
||||
htab->elem_size * htab->map.max_entries,
|
||||
PAGE_SIZE) >> PAGE_SHIFT;
|
||||
return &htab->map;
|
||||
|
||||
free_htab:
|
||||
@@ -222,7 +238,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
|
||||
/* allocate new element outside of lock */
|
||||
l_new = kmalloc(htab->elem_size, GFP_ATOMIC);
|
||||
l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!l_new)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@@ -34,7 +34,7 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
|
||||
atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
|
||||
break;
|
||||
case BPF_TYPE_MAP:
|
||||
atomic_inc(&((struct bpf_map *)raw)->refcnt);
|
||||
bpf_map_inc(raw, true);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
@@ -51,7 +51,7 @@ static void bpf_any_put(void *raw, enum bpf_type type)
|
||||
bpf_prog_put(raw);
|
||||
break;
|
||||
case BPF_TYPE_MAP:
|
||||
bpf_map_put(raw);
|
||||
bpf_map_put_with_uref(raw);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
@@ -64,7 +64,7 @@ static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
|
||||
void *raw;
|
||||
|
||||
*type = BPF_TYPE_MAP;
|
||||
raw = bpf_map_get(ufd);
|
||||
raw = bpf_map_get_with_uref(ufd);
|
||||
if (IS_ERR(raw)) {
|
||||
*type = BPF_TYPE_PROG;
|
||||
raw = bpf_prog_get(ufd);
|
||||
|
@@ -82,6 +82,14 @@ static void bpf_map_free_deferred(struct work_struct *work)
|
||||
map->ops->map_free(map);
|
||||
}
|
||||
|
||||
static void bpf_map_put_uref(struct bpf_map *map)
|
||||
{
|
||||
if (atomic_dec_and_test(&map->usercnt)) {
|
||||
if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
|
||||
bpf_fd_array_map_clear(map);
|
||||
}
|
||||
}
|
||||
|
||||
/* decrement map refcnt and schedule it for freeing via workqueue
|
||||
* (unrelying map implementation ops->map_free() might sleep)
|
||||
*/
|
||||
@@ -93,6 +101,18 @@ void bpf_map_put(struct bpf_map *map)
|
||||
}
|
||||
}
|
||||
|
||||
void bpf_map_put_with_uref(struct bpf_map *map)
|
||||
{
|
||||
bpf_map_put_uref(map);
|
||||
bpf_map_put(map);
|
||||
}
|
||||
|
||||
static int bpf_map_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
bpf_map_put_with_uref(filp->private_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
{
|
||||
@@ -110,20 +130,6 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int bpf_map_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct bpf_map *map = filp->private_data;
|
||||
|
||||
if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
|
||||
/* prog_array stores refcnt-ed bpf_prog pointers
|
||||
* release them all when user space closes prog_array_fd
|
||||
*/
|
||||
bpf_fd_array_map_clear(map);
|
||||
|
||||
bpf_map_put(map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations bpf_map_fops = {
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show_fdinfo = bpf_map_show_fdinfo,
|
||||
@@ -162,6 +168,7 @@ static int map_create(union bpf_attr *attr)
|
||||
return PTR_ERR(map);
|
||||
|
||||
atomic_set(&map->refcnt, 1);
|
||||
atomic_set(&map->usercnt, 1);
|
||||
|
||||
err = bpf_map_charge_memlock(map);
|
||||
if (err)
|
||||
@@ -194,7 +201,14 @@ struct bpf_map *__bpf_map_get(struct fd f)
|
||||
return f.file->private_data;
|
||||
}
|
||||
|
||||
struct bpf_map *bpf_map_get(u32 ufd)
|
||||
void bpf_map_inc(struct bpf_map *map, bool uref)
|
||||
{
|
||||
atomic_inc(&map->refcnt);
|
||||
if (uref)
|
||||
atomic_inc(&map->usercnt);
|
||||
}
|
||||
|
||||
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
|
||||
{
|
||||
struct fd f = fdget(ufd);
|
||||
struct bpf_map *map;
|
||||
@@ -203,7 +217,7 @@ struct bpf_map *bpf_map_get(u32 ufd)
|
||||
if (IS_ERR(map))
|
||||
return map;
|
||||
|
||||
atomic_inc(&map->refcnt);
|
||||
bpf_map_inc(map, true);
|
||||
fdput(f);
|
||||
|
||||
return map;
|
||||
@@ -246,7 +260,7 @@ static int map_lookup_elem(union bpf_attr *attr)
|
||||
goto free_key;
|
||||
|
||||
err = -ENOMEM;
|
||||
value = kmalloc(map->value_size, GFP_USER);
|
||||
value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
|
||||
if (!value)
|
||||
goto free_key;
|
||||
|
||||
@@ -305,7 +319,7 @@ static int map_update_elem(union bpf_attr *attr)
|
||||
goto free_key;
|
||||
|
||||
err = -ENOMEM;
|
||||
value = kmalloc(map->value_size, GFP_USER);
|
||||
value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
|
||||
if (!value)
|
||||
goto free_key;
|
||||
|
||||
|
@@ -2021,8 +2021,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
|
||||
* will be used by the valid program until it's unloaded
|
||||
* and all maps are released in free_bpf_prog_info()
|
||||
*/
|
||||
atomic_inc(&map->refcnt);
|
||||
|
||||
bpf_map_inc(map, false);
|
||||
fdput(f);
|
||||
next_insn:
|
||||
insn++;
|
||||
|
Reference in New Issue
Block a user