Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2019-04-28 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Introduce BPF socket local storage map so that BPF programs can store private data they associate with a socket (instead of e.g. separate hash table), from Martin. 2) Add support for bpftool to dump BTF types. This is done through a new `bpftool btf dump` sub-command, from Andrii. 3) Enable BPF-based flow dissector for skb-less eth_get_headlen() calls which was currently not supported since skb was used to lookup netns, from Stanislav. 4) Add an opt-in interface for tracepoints to expose a writable context for attached BPF programs, used here for NBD sockets, from Matt. 5) BPF xadd related arm64 JIT fixes and scalability improvements, from Daniel. 6) Change the skb->protocol for bpf_skb_adjust_room() helper in order to support tunnels such as sit. Add selftests as well, from Willem. 7) Various smaller misc fixes. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -34,3 +34,4 @@ obj-$(CONFIG_HWBM) += hwbm.o
|
||||
obj-$(CONFIG_NET_DEVLINK) += devlink.o
|
||||
obj-$(CONFIG_GRO_CELLS) += gro_cells.o
|
||||
obj-$(CONFIG_FAILOVER) += failover.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
|
||||
|
804
net/core/bpf_sk_storage.c
Normal file
804
net/core/bpf_sk_storage.c
Normal file
@@ -0,0 +1,804 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2019 Facebook */
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <net/bpf_sk_storage.h>
|
||||
#include <net/sock.h>
|
||||
#include <uapi/linux/btf.h>
|
||||
|
||||
static atomic_t cache_idx;
|
||||
|
||||
struct bucket {
|
||||
struct hlist_head list;
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
/* Thp map is not the primary owner of a bpf_sk_storage_elem.
|
||||
* Instead, the sk->sk_bpf_storage is.
|
||||
*
|
||||
* The map (bpf_sk_storage_map) is for two purposes
|
||||
* 1. Define the size of the "sk local storage". It is
|
||||
* the map's value_size.
|
||||
*
|
||||
* 2. Maintain a list to keep track of all elems such
|
||||
* that they can be cleaned up during the map destruction.
|
||||
*
|
||||
* When a bpf local storage is being looked up for a
|
||||
* particular sk, the "bpf_map" pointer is actually used
|
||||
* as the "key" to search in the list of elem in
|
||||
* sk->sk_bpf_storage.
|
||||
*
|
||||
* Hence, consider sk->sk_bpf_storage is the mini-map
|
||||
* with the "bpf_map" pointer as the searching key.
|
||||
*/
|
||||
struct bpf_sk_storage_map {
|
||||
struct bpf_map map;
|
||||
/* Lookup elem does not require accessing the map.
|
||||
*
|
||||
* Updating/Deleting requires a bucket lock to
|
||||
* link/unlink the elem from the map. Having
|
||||
* multiple buckets to improve contention.
|
||||
*/
|
||||
struct bucket *buckets;
|
||||
u32 bucket_log;
|
||||
u16 elem_size;
|
||||
u16 cache_idx;
|
||||
};
|
||||
|
||||
struct bpf_sk_storage_data {
|
||||
/* smap is used as the searching key when looking up
|
||||
* from sk->sk_bpf_storage.
|
||||
*
|
||||
* Put it in the same cacheline as the data to minimize
|
||||
* the number of cachelines access during the cache hit case.
|
||||
*/
|
||||
struct bpf_sk_storage_map __rcu *smap;
|
||||
u8 data[0] __aligned(8);
|
||||
};
|
||||
|
||||
/* Linked to bpf_sk_storage and bpf_sk_storage_map */
|
||||
struct bpf_sk_storage_elem {
|
||||
struct hlist_node map_node; /* Linked to bpf_sk_storage_map */
|
||||
struct hlist_node snode; /* Linked to bpf_sk_storage */
|
||||
struct bpf_sk_storage __rcu *sk_storage;
|
||||
struct rcu_head rcu;
|
||||
/* 8 bytes hole */
|
||||
/* The data is stored in aother cacheline to minimize
|
||||
* the number of cachelines access during a cache hit.
|
||||
*/
|
||||
struct bpf_sk_storage_data sdata ____cacheline_aligned;
|
||||
};
|
||||
|
||||
#define SELEM(_SDATA) container_of((_SDATA), struct bpf_sk_storage_elem, sdata)
|
||||
#define SDATA(_SELEM) (&(_SELEM)->sdata)
|
||||
#define BPF_SK_STORAGE_CACHE_SIZE 16
|
||||
|
||||
struct bpf_sk_storage {
|
||||
struct bpf_sk_storage_data __rcu *cache[BPF_SK_STORAGE_CACHE_SIZE];
|
||||
struct hlist_head list; /* List of bpf_sk_storage_elem */
|
||||
struct sock *sk; /* The sk that owns the the above "list" of
|
||||
* bpf_sk_storage_elem.
|
||||
*/
|
||||
struct rcu_head rcu;
|
||||
raw_spinlock_t lock; /* Protect adding/removing from the "list" */
|
||||
};
|
||||
|
||||
static struct bucket *select_bucket(struct bpf_sk_storage_map *smap,
|
||||
struct bpf_sk_storage_elem *selem)
|
||||
{
|
||||
return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
|
||||
}
|
||||
|
||||
static int omem_charge(struct sock *sk, unsigned int size)
|
||||
{
|
||||
/* same check as in sock_kmalloc() */
|
||||
if (size <= sysctl_optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
|
||||
atomic_add(size, &sk->sk_omem_alloc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static bool selem_linked_to_sk(const struct bpf_sk_storage_elem *selem)
|
||||
{
|
||||
return !hlist_unhashed(&selem->snode);
|
||||
}
|
||||
|
||||
static bool selem_linked_to_map(const struct bpf_sk_storage_elem *selem)
|
||||
{
|
||||
return !hlist_unhashed(&selem->map_node);
|
||||
}
|
||||
|
||||
static struct bpf_sk_storage_elem *selem_alloc(struct bpf_sk_storage_map *smap,
|
||||
struct sock *sk, void *value,
|
||||
bool charge_omem)
|
||||
{
|
||||
struct bpf_sk_storage_elem *selem;
|
||||
|
||||
if (charge_omem && omem_charge(sk, smap->elem_size))
|
||||
return NULL;
|
||||
|
||||
selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (selem) {
|
||||
if (value)
|
||||
memcpy(SDATA(selem)->data, value, smap->map.value_size);
|
||||
return selem;
|
||||
}
|
||||
|
||||
if (charge_omem)
|
||||
atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* sk_storage->lock must be held and selem->sk_storage == sk_storage.
|
||||
* The caller must ensure selem->smap is still valid to be
|
||||
* dereferenced for its smap->elem_size and smap->cache_idx.
|
||||
*/
|
||||
static bool __selem_unlink_sk(struct bpf_sk_storage *sk_storage,
|
||||
struct bpf_sk_storage_elem *selem,
|
||||
bool uncharge_omem)
|
||||
{
|
||||
struct bpf_sk_storage_map *smap;
|
||||
bool free_sk_storage;
|
||||
struct sock *sk;
|
||||
|
||||
smap = rcu_dereference(SDATA(selem)->smap);
|
||||
sk = sk_storage->sk;
|
||||
|
||||
/* All uncharging on sk->sk_omem_alloc must be done first.
|
||||
* sk may be freed once the last selem is unlinked from sk_storage.
|
||||
*/
|
||||
if (uncharge_omem)
|
||||
atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
|
||||
|
||||
free_sk_storage = hlist_is_singular_node(&selem->snode,
|
||||
&sk_storage->list);
|
||||
if (free_sk_storage) {
|
||||
atomic_sub(sizeof(struct bpf_sk_storage), &sk->sk_omem_alloc);
|
||||
sk_storage->sk = NULL;
|
||||
/* After this RCU_INIT, sk may be freed and cannot be used */
|
||||
RCU_INIT_POINTER(sk->sk_bpf_storage, NULL);
|
||||
|
||||
/* sk_storage is not freed now. sk_storage->lock is
|
||||
* still held and raw_spin_unlock_bh(&sk_storage->lock)
|
||||
* will be done by the caller.
|
||||
*
|
||||
* Although the unlock will be done under
|
||||
* rcu_read_lock(), it is more intutivie to
|
||||
* read if kfree_rcu(sk_storage, rcu) is done
|
||||
* after the raw_spin_unlock_bh(&sk_storage->lock).
|
||||
*
|
||||
* Hence, a "bool free_sk_storage" is returned
|
||||
* to the caller which then calls the kfree_rcu()
|
||||
* after unlock.
|
||||
*/
|
||||
}
|
||||
hlist_del_init_rcu(&selem->snode);
|
||||
if (rcu_access_pointer(sk_storage->cache[smap->cache_idx]) ==
|
||||
SDATA(selem))
|
||||
RCU_INIT_POINTER(sk_storage->cache[smap->cache_idx], NULL);
|
||||
|
||||
kfree_rcu(selem, rcu);
|
||||
|
||||
return free_sk_storage;
|
||||
}
|
||||
|
||||
static void selem_unlink_sk(struct bpf_sk_storage_elem *selem)
|
||||
{
|
||||
struct bpf_sk_storage *sk_storage;
|
||||
bool free_sk_storage = false;
|
||||
|
||||
if (unlikely(!selem_linked_to_sk(selem)))
|
||||
/* selem has already been unlinked from sk */
|
||||
return;
|
||||
|
||||
sk_storage = rcu_dereference(selem->sk_storage);
|
||||
raw_spin_lock_bh(&sk_storage->lock);
|
||||
if (likely(selem_linked_to_sk(selem)))
|
||||
free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
|
||||
raw_spin_unlock_bh(&sk_storage->lock);
|
||||
|
||||
if (free_sk_storage)
|
||||
kfree_rcu(sk_storage, rcu);
|
||||
}
|
||||
|
||||
/* sk_storage->lock must be held and sk_storage->list cannot be empty */
|
||||
static void __selem_link_sk(struct bpf_sk_storage *sk_storage,
|
||||
struct bpf_sk_storage_elem *selem)
|
||||
{
|
||||
RCU_INIT_POINTER(selem->sk_storage, sk_storage);
|
||||
hlist_add_head(&selem->snode, &sk_storage->list);
|
||||
}
|
||||
|
||||
static void selem_unlink_map(struct bpf_sk_storage_elem *selem)
|
||||
{
|
||||
struct bpf_sk_storage_map *smap;
|
||||
struct bucket *b;
|
||||
|
||||
if (unlikely(!selem_linked_to_map(selem)))
|
||||
/* selem has already be unlinked from smap */
|
||||
return;
|
||||
|
||||
smap = rcu_dereference(SDATA(selem)->smap);
|
||||
b = select_bucket(smap, selem);
|
||||
raw_spin_lock_bh(&b->lock);
|
||||
if (likely(selem_linked_to_map(selem)))
|
||||
hlist_del_init_rcu(&selem->map_node);
|
||||
raw_spin_unlock_bh(&b->lock);
|
||||
}
|
||||
|
||||
static void selem_link_map(struct bpf_sk_storage_map *smap,
|
||||
struct bpf_sk_storage_elem *selem)
|
||||
{
|
||||
struct bucket *b = select_bucket(smap, selem);
|
||||
|
||||
raw_spin_lock_bh(&b->lock);
|
||||
RCU_INIT_POINTER(SDATA(selem)->smap, smap);
|
||||
hlist_add_head_rcu(&selem->map_node, &b->list);
|
||||
raw_spin_unlock_bh(&b->lock);
|
||||
}
|
||||
|
||||
static void selem_unlink(struct bpf_sk_storage_elem *selem)
|
||||
{
|
||||
/* Always unlink from map before unlinking from sk_storage
|
||||
* because selem will be freed after successfully unlinked from
|
||||
* the sk_storage.
|
||||
*/
|
||||
selem_unlink_map(selem);
|
||||
selem_unlink_sk(selem);
|
||||
}
|
||||
|
||||
static struct bpf_sk_storage_data *
|
||||
__sk_storage_lookup(struct bpf_sk_storage *sk_storage,
|
||||
struct bpf_sk_storage_map *smap,
|
||||
bool cacheit_lockit)
|
||||
{
|
||||
struct bpf_sk_storage_data *sdata;
|
||||
struct bpf_sk_storage_elem *selem;
|
||||
|
||||
/* Fast path (cache hit) */
|
||||
sdata = rcu_dereference(sk_storage->cache[smap->cache_idx]);
|
||||
if (sdata && rcu_access_pointer(sdata->smap) == smap)
|
||||
return sdata;
|
||||
|
||||
/* Slow path (cache miss) */
|
||||
hlist_for_each_entry_rcu(selem, &sk_storage->list, snode)
|
||||
if (rcu_access_pointer(SDATA(selem)->smap) == smap)
|
||||
break;
|
||||
|
||||
if (!selem)
|
||||
return NULL;
|
||||
|
||||
sdata = SDATA(selem);
|
||||
if (cacheit_lockit) {
|
||||
/* spinlock is needed to avoid racing with the
|
||||
* parallel delete. Otherwise, publishing an already
|
||||
* deleted sdata to the cache will become a use-after-free
|
||||
* problem in the next __sk_storage_lookup().
|
||||
*/
|
||||
raw_spin_lock_bh(&sk_storage->lock);
|
||||
if (selem_linked_to_sk(selem))
|
||||
rcu_assign_pointer(sk_storage->cache[smap->cache_idx],
|
||||
sdata);
|
||||
raw_spin_unlock_bh(&sk_storage->lock);
|
||||
}
|
||||
|
||||
return sdata;
|
||||
}
|
||||
|
||||
static struct bpf_sk_storage_data *
|
||||
sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
|
||||
{
|
||||
struct bpf_sk_storage *sk_storage;
|
||||
struct bpf_sk_storage_map *smap;
|
||||
|
||||
sk_storage = rcu_dereference(sk->sk_bpf_storage);
|
||||
if (!sk_storage)
|
||||
return NULL;
|
||||
|
||||
smap = (struct bpf_sk_storage_map *)map;
|
||||
return __sk_storage_lookup(sk_storage, smap, cacheit_lockit);
|
||||
}
|
||||
|
||||
static int check_flags(const struct bpf_sk_storage_data *old_sdata,
|
||||
u64 map_flags)
|
||||
{
|
||||
if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
|
||||
/* elem already exists */
|
||||
return -EEXIST;
|
||||
|
||||
if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
|
||||
/* elem doesn't exist, cannot update it */
|
||||
return -ENOENT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sk_storage_alloc(struct sock *sk,
|
||||
struct bpf_sk_storage_map *smap,
|
||||
struct bpf_sk_storage_elem *first_selem)
|
||||
{
|
||||
struct bpf_sk_storage *prev_sk_storage, *sk_storage;
|
||||
int err;
|
||||
|
||||
err = omem_charge(sk, sizeof(*sk_storage));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sk_storage = kzalloc(sizeof(*sk_storage), GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!sk_storage) {
|
||||
err = -ENOMEM;
|
||||
goto uncharge;
|
||||
}
|
||||
INIT_HLIST_HEAD(&sk_storage->list);
|
||||
raw_spin_lock_init(&sk_storage->lock);
|
||||
sk_storage->sk = sk;
|
||||
|
||||
__selem_link_sk(sk_storage, first_selem);
|
||||
selem_link_map(smap, first_selem);
|
||||
/* Publish sk_storage to sk. sk->sk_lock cannot be acquired.
|
||||
* Hence, atomic ops is used to set sk->sk_bpf_storage
|
||||
* from NULL to the newly allocated sk_storage ptr.
|
||||
*
|
||||
* From now on, the sk->sk_bpf_storage pointer is protected
|
||||
* by the sk_storage->lock. Hence, when freeing
|
||||
* the sk->sk_bpf_storage, the sk_storage->lock must
|
||||
* be held before setting sk->sk_bpf_storage to NULL.
|
||||
*/
|
||||
prev_sk_storage = cmpxchg((struct bpf_sk_storage **)&sk->sk_bpf_storage,
|
||||
NULL, sk_storage);
|
||||
if (unlikely(prev_sk_storage)) {
|
||||
selem_unlink_map(first_selem);
|
||||
err = -EAGAIN;
|
||||
goto uncharge;
|
||||
|
||||
/* Note that even first_selem was linked to smap's
|
||||
* bucket->list, first_selem can be freed immediately
|
||||
* (instead of kfree_rcu) because
|
||||
* bpf_sk_storage_map_free() does a
|
||||
* synchronize_rcu() before walking the bucket->list.
|
||||
* Hence, no one is accessing selem from the
|
||||
* bucket->list under rcu_read_lock().
|
||||
*/
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
uncharge:
|
||||
kfree(sk_storage);
|
||||
atomic_sub(sizeof(*sk_storage), &sk->sk_omem_alloc);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* sk cannot be going away because it is linking new elem
|
||||
* to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
|
||||
* Otherwise, it will become a leak (and other memory issues
|
||||
* during map destruction).
|
||||
*/
|
||||
static struct bpf_sk_storage_data *sk_storage_update(struct sock *sk,
|
||||
struct bpf_map *map,
|
||||
void *value,
|
||||
u64 map_flags)
|
||||
{
|
||||
struct bpf_sk_storage_data *old_sdata = NULL;
|
||||
struct bpf_sk_storage_elem *selem;
|
||||
struct bpf_sk_storage *sk_storage;
|
||||
struct bpf_sk_storage_map *smap;
|
||||
int err;
|
||||
|
||||
/* BPF_EXIST and BPF_NOEXIST cannot be both set */
|
||||
if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
|
||||
/* BPF_F_LOCK can only be used in a value with spin_lock */
|
||||
unlikely((map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
smap = (struct bpf_sk_storage_map *)map;
|
||||
sk_storage = rcu_dereference(sk->sk_bpf_storage);
|
||||
if (!sk_storage || hlist_empty(&sk_storage->list)) {
|
||||
/* Very first elem for this sk */
|
||||
err = check_flags(NULL, map_flags);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
selem = selem_alloc(smap, sk, value, true);
|
||||
if (!selem)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = sk_storage_alloc(sk, smap, selem);
|
||||
if (err) {
|
||||
kfree(selem);
|
||||
atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return SDATA(selem);
|
||||
}
|
||||
|
||||
if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
|
||||
/* Hoping to find an old_sdata to do inline update
|
||||
* such that it can avoid taking the sk_storage->lock
|
||||
* and changing the lists.
|
||||
*/
|
||||
old_sdata = __sk_storage_lookup(sk_storage, smap, false);
|
||||
err = check_flags(old_sdata, map_flags);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
if (old_sdata && selem_linked_to_sk(SELEM(old_sdata))) {
|
||||
copy_map_value_locked(map, old_sdata->data,
|
||||
value, false);
|
||||
return old_sdata;
|
||||
}
|
||||
}
|
||||
|
||||
raw_spin_lock_bh(&sk_storage->lock);
|
||||
|
||||
/* Recheck sk_storage->list under sk_storage->lock */
|
||||
if (unlikely(hlist_empty(&sk_storage->list))) {
|
||||
/* A parallel del is happening and sk_storage is going
|
||||
* away. It has just been checked before, so very
|
||||
* unlikely. Return instead of retry to keep things
|
||||
* simple.
|
||||
*/
|
||||
err = -EAGAIN;
|
||||
goto unlock_err;
|
||||
}
|
||||
|
||||
old_sdata = __sk_storage_lookup(sk_storage, smap, false);
|
||||
err = check_flags(old_sdata, map_flags);
|
||||
if (err)
|
||||
goto unlock_err;
|
||||
|
||||
if (old_sdata && (map_flags & BPF_F_LOCK)) {
|
||||
copy_map_value_locked(map, old_sdata->data, value, false);
|
||||
selem = SELEM(old_sdata);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* sk_storage->lock is held. Hence, we are sure
|
||||
* we can unlink and uncharge the old_sdata successfully
|
||||
* later. Hence, instead of charging the new selem now
|
||||
* and then uncharge the old selem later (which may cause
|
||||
* a potential but unnecessary charge failure), avoid taking
|
||||
* a charge at all here (the "!old_sdata" check) and the
|
||||
* old_sdata will not be uncharged later during __selem_unlink_sk().
|
||||
*/
|
||||
selem = selem_alloc(smap, sk, value, !old_sdata);
|
||||
if (!selem) {
|
||||
err = -ENOMEM;
|
||||
goto unlock_err;
|
||||
}
|
||||
|
||||
/* First, link the new selem to the map */
|
||||
selem_link_map(smap, selem);
|
||||
|
||||
/* Second, link (and publish) the new selem to sk_storage */
|
||||
__selem_link_sk(sk_storage, selem);
|
||||
|
||||
/* Third, remove old selem, SELEM(old_sdata) */
|
||||
if (old_sdata) {
|
||||
selem_unlink_map(SELEM(old_sdata));
|
||||
__selem_unlink_sk(sk_storage, SELEM(old_sdata), false);
|
||||
}
|
||||
|
||||
unlock:
|
||||
raw_spin_unlock_bh(&sk_storage->lock);
|
||||
return SDATA(selem);
|
||||
|
||||
unlock_err:
|
||||
raw_spin_unlock_bh(&sk_storage->lock);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
|
||||
{
|
||||
struct bpf_sk_storage_data *sdata;
|
||||
|
||||
sdata = sk_storage_lookup(sk, map, false);
|
||||
if (!sdata)
|
||||
return -ENOENT;
|
||||
|
||||
selem_unlink(SELEM(sdata));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Called by __sk_destruct() */
|
||||
void bpf_sk_storage_free(struct sock *sk)
|
||||
{
|
||||
struct bpf_sk_storage_elem *selem;
|
||||
struct bpf_sk_storage *sk_storage;
|
||||
bool free_sk_storage = false;
|
||||
struct hlist_node *n;
|
||||
|
||||
rcu_read_lock();
|
||||
sk_storage = rcu_dereference(sk->sk_bpf_storage);
|
||||
if (!sk_storage) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
/* Netiher the bpf_prog nor the bpf-map's syscall
|
||||
* could be modifying the sk_storage->list now.
|
||||
* Thus, no elem can be added-to or deleted-from the
|
||||
* sk_storage->list by the bpf_prog or by the bpf-map's syscall.
|
||||
*
|
||||
* It is racing with bpf_sk_storage_map_free() alone
|
||||
* when unlinking elem from the sk_storage->list and
|
||||
* the map's bucket->list.
|
||||
*/
|
||||
raw_spin_lock_bh(&sk_storage->lock);
|
||||
hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
|
||||
/* Always unlink from map before unlinking from
|
||||
* sk_storage.
|
||||
*/
|
||||
selem_unlink_map(selem);
|
||||
free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
|
||||
}
|
||||
raw_spin_unlock_bh(&sk_storage->lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (free_sk_storage)
|
||||
kfree_rcu(sk_storage, rcu);
|
||||
}
|
||||
|
||||
static void bpf_sk_storage_map_free(struct bpf_map *map)
|
||||
{
|
||||
struct bpf_sk_storage_elem *selem;
|
||||
struct bpf_sk_storage_map *smap;
|
||||
struct bucket *b;
|
||||
unsigned int i;
|
||||
|
||||
smap = (struct bpf_sk_storage_map *)map;
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
/* bpf prog and the userspace can no longer access this map
|
||||
* now. No new selem (of this map) can be added
|
||||
* to the sk->sk_bpf_storage or to the map bucket's list.
|
||||
*
|
||||
* The elem of this map can be cleaned up here
|
||||
* or
|
||||
* by bpf_sk_storage_free() during __sk_destruct().
|
||||
*/
|
||||
for (i = 0; i < (1U << smap->bucket_log); i++) {
|
||||
b = &smap->buckets[i];
|
||||
|
||||
rcu_read_lock();
|
||||
/* No one is adding to b->list now */
|
||||
while ((selem = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(&b->list)),
|
||||
struct bpf_sk_storage_elem,
|
||||
map_node))) {
|
||||
selem_unlink(selem);
|
||||
cond_resched_rcu();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* bpf_sk_storage_free() may still need to access the map.
|
||||
* e.g. bpf_sk_storage_free() has unlinked selem from the map
|
||||
* which then made the above while((selem = ...)) loop
|
||||
* exited immediately.
|
||||
*
|
||||
* However, the bpf_sk_storage_free() still needs to access
|
||||
* the smap->elem_size to do the uncharging in
|
||||
* __selem_unlink_sk().
|
||||
*
|
||||
* Hence, wait another rcu grace period for the
|
||||
* bpf_sk_storage_free() to finish.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
kvfree(smap->buckets);
|
||||
kfree(map);
|
||||
}
|
||||
|
||||
static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr)
|
||||
{
|
||||
if (attr->map_flags != BPF_F_NO_PREALLOC || attr->max_entries ||
|
||||
attr->key_size != sizeof(int) || !attr->value_size ||
|
||||
/* Enforce BTF for userspace sk dumping */
|
||||
!attr->btf_key_type_id || !attr->btf_value_type_id)
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (attr->value_size >= KMALLOC_MAX_SIZE -
|
||||
MAX_BPF_STACK - sizeof(struct bpf_sk_storage_elem) ||
|
||||
/* U16_MAX is much more than enough for sk local storage
|
||||
* considering a tcp_sock is ~2k.
|
||||
*/
|
||||
attr->value_size > U16_MAX - sizeof(struct bpf_sk_storage_elem))
|
||||
return -E2BIG;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_sk_storage_map *smap;
|
||||
unsigned int i;
|
||||
u32 nbuckets;
|
||||
u64 cost;
|
||||
|
||||
smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN);
|
||||
if (!smap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
bpf_map_init_from_attr(&smap->map, attr);
|
||||
|
||||
smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus()));
|
||||
nbuckets = 1U << smap->bucket_log;
|
||||
smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
|
||||
GFP_USER | __GFP_NOWARN);
|
||||
if (!smap->buckets) {
|
||||
kfree(smap);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
|
||||
|
||||
for (i = 0; i < nbuckets; i++) {
|
||||
INIT_HLIST_HEAD(&smap->buckets[i].list);
|
||||
raw_spin_lock_init(&smap->buckets[i].lock);
|
||||
}
|
||||
|
||||
smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
|
||||
smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) %
|
||||
BPF_SK_STORAGE_CACHE_SIZE;
|
||||
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
|
||||
return &smap->map;
|
||||
}
|
||||
|
||||
static int notsupp_get_next_key(struct bpf_map *map, void *key,
|
||||
void *next_key)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static int bpf_sk_storage_map_check_btf(const struct bpf_map *map,
|
||||
const struct btf *btf,
|
||||
const struct btf_type *key_type,
|
||||
const struct btf_type *value_type)
|
||||
{
|
||||
u32 int_data;
|
||||
|
||||
if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
|
||||
return -EINVAL;
|
||||
|
||||
int_data = *(u32 *)(key_type + 1);
|
||||
if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_sk_storage_data *sdata;
|
||||
struct socket *sock;
|
||||
int fd, err;
|
||||
|
||||
fd = *(int *)key;
|
||||
sock = sockfd_lookup(fd, &err);
|
||||
if (sock) {
|
||||
sdata = sk_storage_lookup(sock->sk, map, true);
|
||||
sockfd_put(sock);
|
||||
return sdata ? sdata->data : NULL;
|
||||
}
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
{
|
||||
struct bpf_sk_storage_data *sdata;
|
||||
struct socket *sock;
|
||||
int fd, err;
|
||||
|
||||
fd = *(int *)key;
|
||||
sock = sockfd_lookup(fd, &err);
|
||||
if (sock) {
|
||||
sdata = sk_storage_update(sock->sk, map, value, map_flags);
|
||||
sockfd_put(sock);
|
||||
return IS_ERR(sdata) ? PTR_ERR(sdata) : 0;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct socket *sock;
|
||||
int fd, err;
|
||||
|
||||
fd = *(int *)key;
|
||||
sock = sockfd_lookup(fd, &err);
|
||||
if (sock) {
|
||||
err = sk_storage_delete(sock->sk, map);
|
||||
sockfd_put(sock);
|
||||
return err;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
|
||||
void *, value, u64, flags)
|
||||
{
|
||||
struct bpf_sk_storage_data *sdata;
|
||||
|
||||
if (flags > BPF_SK_STORAGE_GET_F_CREATE)
|
||||
return (unsigned long)NULL;
|
||||
|
||||
sdata = sk_storage_lookup(sk, map, true);
|
||||
if (sdata)
|
||||
return (unsigned long)sdata->data;
|
||||
|
||||
if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
|
||||
/* Cannot add new elem to a going away sk.
|
||||
* Otherwise, the new elem may become a leak
|
||||
* (and also other memory issues during map
|
||||
* destruction).
|
||||
*/
|
||||
refcount_inc_not_zero(&sk->sk_refcnt)) {
|
||||
sdata = sk_storage_update(sk, map, value, BPF_NOEXIST);
|
||||
/* sk must be a fullsock (guaranteed by verifier),
|
||||
* so sock_gen_put() is unnecessary.
|
||||
*/
|
||||
sock_put(sk);
|
||||
return IS_ERR(sdata) ?
|
||||
(unsigned long)NULL : (unsigned long)sdata->data;
|
||||
}
|
||||
|
||||
return (unsigned long)NULL;
|
||||
}
|
||||
|
||||
BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
|
||||
{
|
||||
if (refcount_inc_not_zero(&sk->sk_refcnt)) {
|
||||
int err;
|
||||
|
||||
err = sk_storage_delete(sk, map);
|
||||
sock_put(sk);
|
||||
return err;
|
||||
}
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
const struct bpf_map_ops sk_storage_map_ops = {
|
||||
.map_alloc_check = bpf_sk_storage_map_alloc_check,
|
||||
.map_alloc = bpf_sk_storage_map_alloc,
|
||||
.map_free = bpf_sk_storage_map_free,
|
||||
.map_get_next_key = notsupp_get_next_key,
|
||||
.map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
|
||||
.map_update_elem = bpf_fd_sk_storage_update_elem,
|
||||
.map_delete_elem = bpf_fd_sk_storage_delete_elem,
|
||||
.map_check_btf = bpf_sk_storage_map_check_btf,
|
||||
};
|
||||
|
||||
const struct bpf_func_proto bpf_sk_storage_get_proto = {
|
||||
.func = bpf_sk_storage_get,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_SOCKET,
|
||||
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
const struct bpf_func_proto bpf_sk_storage_delete_proto = {
|
||||
.func = bpf_sk_storage_delete,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_SOCKET,
|
||||
};
|
@@ -75,6 +75,7 @@
|
||||
#include <net/seg6_local.h>
|
||||
#include <net/lwtunnel.h>
|
||||
#include <net/ipv6_stubs.h>
|
||||
#include <net/bpf_sk_storage.h>
|
||||
|
||||
/**
|
||||
* sk_filter_trim_cap - run a packet through a socket filter
|
||||
@@ -1730,6 +1731,40 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
|
||||
.arg4_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_4(bpf_flow_dissector_load_bytes,
|
||||
const struct bpf_flow_dissector *, ctx, u32, offset,
|
||||
void *, to, u32, len)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
if (unlikely(offset > 0xffff))
|
||||
goto err_clear;
|
||||
|
||||
if (unlikely(!ctx->skb))
|
||||
goto err_clear;
|
||||
|
||||
ptr = skb_header_pointer(ctx->skb, offset, len, to);
|
||||
if (unlikely(!ptr))
|
||||
goto err_clear;
|
||||
if (ptr != to)
|
||||
memcpy(to, ptr, len);
|
||||
|
||||
return 0;
|
||||
err_clear:
|
||||
memset(to, 0, len);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = {
|
||||
.func = bpf_flow_dissector_load_bytes,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg4_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
|
||||
u32, offset, void *, to, u32, len, u32, start_header)
|
||||
{
|
||||
@@ -3047,6 +3082,14 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
|
||||
|
||||
skb_set_transport_header(skb, mac_len + nh_len);
|
||||
}
|
||||
|
||||
/* Match skb->protocol to new outer l3 protocol */
|
||||
if (skb->protocol == htons(ETH_P_IP) &&
|
||||
flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
else if (skb->protocol == htons(ETH_P_IPV6) &&
|
||||
flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
}
|
||||
|
||||
if (skb_is_gso(skb)) {
|
||||
@@ -5861,6 +5904,9 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
}
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_sk_storage_get_proto __weak;
|
||||
const struct bpf_func_proto bpf_sk_storage_delete_proto __weak;
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
@@ -5869,6 +5915,10 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_get_local_storage_proto;
|
||||
case BPF_FUNC_sk_fullsock:
|
||||
return &bpf_sk_fullsock_proto;
|
||||
case BPF_FUNC_sk_storage_get:
|
||||
return &bpf_sk_storage_get_proto;
|
||||
case BPF_FUNC_sk_storage_delete:
|
||||
return &bpf_sk_storage_delete_proto;
|
||||
#ifdef CONFIG_INET
|
||||
case BPF_FUNC_tcp_sock:
|
||||
return &bpf_tcp_sock_proto;
|
||||
@@ -5950,6 +6000,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_skb_fib_lookup_proto;
|
||||
case BPF_FUNC_sk_fullsock:
|
||||
return &bpf_sk_fullsock_proto;
|
||||
case BPF_FUNC_sk_storage_get:
|
||||
return &bpf_sk_storage_get_proto;
|
||||
case BPF_FUNC_sk_storage_delete:
|
||||
return &bpf_sk_storage_delete_proto;
|
||||
#ifdef CONFIG_XFRM
|
||||
case BPF_FUNC_skb_get_xfrm_state:
|
||||
return &bpf_skb_get_xfrm_state_proto;
|
||||
@@ -6121,7 +6175,7 @@ flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_skb_load_bytes:
|
||||
return &bpf_skb_load_bytes_proto;
|
||||
return &bpf_flow_dissector_load_bytes_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
@@ -6248,9 +6302,7 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
|
||||
return false;
|
||||
break;
|
||||
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
|
||||
if (size != sizeof(__u64))
|
||||
return false;
|
||||
break;
|
||||
return false;
|
||||
case bpf_ctx_range(struct __sk_buff, tstamp):
|
||||
if (size != sizeof(__u64))
|
||||
return false;
|
||||
@@ -6285,7 +6337,6 @@ static bool sk_filter_is_valid_access(int off, int size,
|
||||
case bpf_ctx_range(struct __sk_buff, data):
|
||||
case bpf_ctx_range(struct __sk_buff, data_meta):
|
||||
case bpf_ctx_range(struct __sk_buff, data_end):
|
||||
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
|
||||
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
|
||||
case bpf_ctx_range(struct __sk_buff, tstamp):
|
||||
case bpf_ctx_range(struct __sk_buff, wire_len):
|
||||
@@ -6312,7 +6363,6 @@ static bool cg_skb_is_valid_access(int off, int size,
|
||||
switch (off) {
|
||||
case bpf_ctx_range(struct __sk_buff, tc_classid):
|
||||
case bpf_ctx_range(struct __sk_buff, data_meta):
|
||||
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
|
||||
case bpf_ctx_range(struct __sk_buff, wire_len):
|
||||
return false;
|
||||
case bpf_ctx_range(struct __sk_buff, data):
|
||||
@@ -6358,7 +6408,6 @@ static bool lwt_is_valid_access(int off, int size,
|
||||
case bpf_ctx_range(struct __sk_buff, tc_classid):
|
||||
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
|
||||
case bpf_ctx_range(struct __sk_buff, data_meta):
|
||||
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
|
||||
case bpf_ctx_range(struct __sk_buff, tstamp):
|
||||
case bpf_ctx_range(struct __sk_buff, wire_len):
|
||||
return false;
|
||||
@@ -6601,7 +6650,6 @@ static bool tc_cls_act_is_valid_access(int off, int size,
|
||||
case bpf_ctx_range(struct __sk_buff, data_end):
|
||||
info->reg_type = PTR_TO_PACKET_END;
|
||||
break;
|
||||
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
|
||||
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
|
||||
return false;
|
||||
}
|
||||
@@ -6803,7 +6851,6 @@ static bool sk_skb_is_valid_access(int off, int size,
|
||||
switch (off) {
|
||||
case bpf_ctx_range(struct __sk_buff, tc_classid):
|
||||
case bpf_ctx_range(struct __sk_buff, data_meta):
|
||||
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
|
||||
case bpf_ctx_range(struct __sk_buff, tstamp):
|
||||
case bpf_ctx_range(struct __sk_buff, wire_len):
|
||||
return false;
|
||||
@@ -6877,24 +6924,65 @@ static bool flow_dissector_is_valid_access(int off, int size,
|
||||
const struct bpf_prog *prog,
|
||||
struct bpf_insn_access_aux *info)
|
||||
{
|
||||
const int size_default = sizeof(__u32);
|
||||
|
||||
if (off < 0 || off >= sizeof(struct __sk_buff))
|
||||
return false;
|
||||
|
||||
if (type == BPF_WRITE)
|
||||
return false;
|
||||
|
||||
switch (off) {
|
||||
case bpf_ctx_range(struct __sk_buff, data):
|
||||
if (size != size_default)
|
||||
return false;
|
||||
info->reg_type = PTR_TO_PACKET;
|
||||
break;
|
||||
return true;
|
||||
case bpf_ctx_range(struct __sk_buff, data_end):
|
||||
if (size != size_default)
|
||||
return false;
|
||||
info->reg_type = PTR_TO_PACKET_END;
|
||||
break;
|
||||
return true;
|
||||
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
|
||||
if (size != sizeof(__u64))
|
||||
return false;
|
||||
info->reg_type = PTR_TO_FLOW_KEYS;
|
||||
break;
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return bpf_skb_is_valid_access(off, size, type, prog, info);
|
||||
static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
|
||||
const struct bpf_insn *si,
|
||||
struct bpf_insn *insn_buf,
|
||||
struct bpf_prog *prog,
|
||||
u32 *target_size)
|
||||
|
||||
{
|
||||
struct bpf_insn *insn = insn_buf;
|
||||
|
||||
switch (si->off) {
|
||||
case offsetof(struct __sk_buff, data):
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data),
|
||||
si->dst_reg, si->src_reg,
|
||||
offsetof(struct bpf_flow_dissector, data));
|
||||
break;
|
||||
|
||||
case offsetof(struct __sk_buff, data_end):
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end),
|
||||
si->dst_reg, si->src_reg,
|
||||
offsetof(struct bpf_flow_dissector, data_end));
|
||||
break;
|
||||
|
||||
case offsetof(struct __sk_buff, flow_keys):
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys),
|
||||
si->dst_reg, si->src_reg,
|
||||
offsetof(struct bpf_flow_dissector, flow_keys));
|
||||
break;
|
||||
}
|
||||
|
||||
return insn - insn_buf;
|
||||
}
|
||||
|
||||
static u32 bpf_convert_ctx_access(enum bpf_access_type type,
|
||||
@@ -7201,15 +7289,6 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
|
||||
skc_num, 2, target_size));
|
||||
break;
|
||||
|
||||
case offsetof(struct __sk_buff, flow_keys):
|
||||
off = si->off;
|
||||
off -= offsetof(struct __sk_buff, flow_keys);
|
||||
off += offsetof(struct sk_buff, cb);
|
||||
off += offsetof(struct qdisc_skb_cb, flow_keys);
|
||||
*insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
|
||||
si->src_reg, off);
|
||||
break;
|
||||
|
||||
case offsetof(struct __sk_buff, tstamp):
|
||||
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8);
|
||||
|
||||
@@ -8214,7 +8293,7 @@ const struct bpf_prog_ops sk_msg_prog_ops = {
|
||||
const struct bpf_verifier_ops flow_dissector_verifier_ops = {
|
||||
.get_func_proto = flow_dissector_func_proto,
|
||||
.is_valid_access = flow_dissector_is_valid_access,
|
||||
.convert_ctx_access = bpf_convert_ctx_access,
|
||||
.convert_ctx_access = flow_dissector_convert_ctx_access,
|
||||
};
|
||||
|
||||
const struct bpf_prog_ops flow_dissector_prog_ops = {
|
||||
|
@@ -65,6 +65,45 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
|
||||
}
|
||||
EXPORT_SYMBOL(skb_flow_dissector_init);
|
||||
|
||||
int skb_flow_dissector_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
|
||||
u32 prog_id, prog_cnt = 0, flags = 0;
|
||||
struct bpf_prog *attached;
|
||||
struct net *net;
|
||||
|
||||
if (attr->query.query_flags)
|
||||
return -EINVAL;
|
||||
|
||||
net = get_net_ns_by_fd(attr->query.target_fd);
|
||||
if (IS_ERR(net))
|
||||
return PTR_ERR(net);
|
||||
|
||||
rcu_read_lock();
|
||||
attached = rcu_dereference(net->flow_dissector_prog);
|
||||
if (attached) {
|
||||
prog_cnt = 1;
|
||||
prog_id = attached->aux->id;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
put_net(net);
|
||||
|
||||
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
|
||||
return 0;
|
||||
|
||||
if (copy_to_user(prog_ids, &prog_id, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
|
||||
struct bpf_prog *prog)
|
||||
{
|
||||
@@ -683,50 +722,30 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
|
||||
}
|
||||
}
|
||||
|
||||
bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
|
||||
const struct sk_buff *skb,
|
||||
struct flow_dissector *flow_dissector,
|
||||
struct bpf_flow_keys *flow_keys)
|
||||
bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
|
||||
__be16 proto, int nhoff, int hlen)
|
||||
{
|
||||
struct bpf_skb_data_end cb_saved;
|
||||
struct bpf_skb_data_end *cb;
|
||||
struct bpf_flow_keys *flow_keys = ctx->flow_keys;
|
||||
u32 result;
|
||||
|
||||
/* Note that even though the const qualifier is discarded
|
||||
* throughout the execution of the BPF program, all changes(the
|
||||
* control block) are reverted after the BPF program returns.
|
||||
* Therefore, __skb_flow_dissect does not alter the skb.
|
||||
*/
|
||||
|
||||
cb = (struct bpf_skb_data_end *)skb->cb;
|
||||
|
||||
/* Save Control Block */
|
||||
memcpy(&cb_saved, cb, sizeof(cb_saved));
|
||||
memset(cb, 0, sizeof(*cb));
|
||||
|
||||
/* Pass parameters to the BPF program */
|
||||
memset(flow_keys, 0, sizeof(*flow_keys));
|
||||
cb->qdisc_cb.flow_keys = flow_keys;
|
||||
flow_keys->n_proto = skb->protocol;
|
||||
flow_keys->nhoff = skb_network_offset(skb);
|
||||
flow_keys->n_proto = proto;
|
||||
flow_keys->nhoff = nhoff;
|
||||
flow_keys->thoff = flow_keys->nhoff;
|
||||
|
||||
bpf_compute_data_pointers((struct sk_buff *)skb);
|
||||
result = BPF_PROG_RUN(prog, skb);
|
||||
result = BPF_PROG_RUN(prog, ctx);
|
||||
|
||||
/* Restore state */
|
||||
memcpy(cb, &cb_saved, sizeof(cb_saved));
|
||||
|
||||
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff,
|
||||
skb_network_offset(skb), skb->len);
|
||||
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
|
||||
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
|
||||
flow_keys->nhoff, skb->len);
|
||||
flow_keys->nhoff, hlen);
|
||||
|
||||
return result == BPF_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* __skb_flow_dissect - extract the flow_keys struct and return it
|
||||
* @net: associated network namespace, derived from @skb if NULL
|
||||
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
|
||||
* @flow_dissector: list of keys to dissect
|
||||
* @target_container: target structure to put dissected values into
|
||||
@@ -743,7 +762,8 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
|
||||
*
|
||||
* Caller must take care of zeroing target container memory.
|
||||
*/
|
||||
bool __skb_flow_dissect(const struct sk_buff *skb,
|
||||
bool __skb_flow_dissect(const struct net *net,
|
||||
const struct sk_buff *skb,
|
||||
struct flow_dissector *flow_dissector,
|
||||
void *target_container,
|
||||
void *data, __be16 proto, int nhoff, int hlen,
|
||||
@@ -756,6 +776,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
|
||||
struct flow_dissector_key_icmp *key_icmp;
|
||||
struct flow_dissector_key_tags *key_tags;
|
||||
struct flow_dissector_key_vlan *key_vlan;
|
||||
struct bpf_prog *attached = NULL;
|
||||
enum flow_dissect_ret fdret;
|
||||
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
|
||||
int num_hdrs = 0;
|
||||
@@ -798,22 +819,39 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
|
||||
target_container);
|
||||
|
||||
if (skb) {
|
||||
struct bpf_flow_keys flow_keys;
|
||||
struct bpf_prog *attached = NULL;
|
||||
if (!net) {
|
||||
if (skb->dev)
|
||||
net = dev_net(skb->dev);
|
||||
else if (skb->sk)
|
||||
net = sock_net(skb->sk);
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(!net);
|
||||
if (net) {
|
||||
rcu_read_lock();
|
||||
|
||||
if (skb->dev)
|
||||
attached = rcu_dereference(dev_net(skb->dev)->flow_dissector_prog);
|
||||
else if (skb->sk)
|
||||
attached = rcu_dereference(sock_net(skb->sk)->flow_dissector_prog);
|
||||
else
|
||||
WARN_ON_ONCE(1);
|
||||
attached = rcu_dereference(net->flow_dissector_prog);
|
||||
|
||||
if (attached) {
|
||||
ret = __skb_flow_bpf_dissect(attached, skb,
|
||||
flow_dissector,
|
||||
&flow_keys);
|
||||
struct bpf_flow_keys flow_keys;
|
||||
struct bpf_flow_dissector ctx = {
|
||||
.flow_keys = &flow_keys,
|
||||
.data = data,
|
||||
.data_end = data + hlen,
|
||||
};
|
||||
__be16 n_proto = proto;
|
||||
|
||||
if (skb) {
|
||||
ctx.skb = skb;
|
||||
/* we can't use 'proto' in the skb case
|
||||
* because it might be set to skb->vlan_proto
|
||||
* which has been pulled from the data
|
||||
*/
|
||||
n_proto = skb->protocol;
|
||||
}
|
||||
|
||||
ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff,
|
||||
hlen);
|
||||
__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
|
||||
target_container);
|
||||
rcu_read_unlock();
|
||||
@@ -1410,8 +1448,8 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
|
||||
__flow_hash_secret_init();
|
||||
|
||||
memset(&keys, 0, sizeof(keys));
|
||||
__skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
|
||||
NULL, 0, 0, 0,
|
||||
__skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
|
||||
&keys, NULL, 0, 0, 0,
|
||||
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
|
||||
|
||||
return __flow_hash_from_keys(&keys, hashrnd);
|
||||
@@ -1512,7 +1550,8 @@ u32 skb_get_poff(const struct sk_buff *skb)
|
||||
{
|
||||
struct flow_keys_basic keys;
|
||||
|
||||
if (!skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
|
||||
if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
|
||||
NULL, 0, 0, 0, 0))
|
||||
return 0;
|
||||
|
||||
return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
|
||||
|
@@ -137,6 +137,7 @@
|
||||
|
||||
#include <linux/filter.h>
|
||||
#include <net/sock_reuseport.h>
|
||||
#include <net/bpf_sk_storage.h>
|
||||
|
||||
#include <trace/events/sock.h>
|
||||
|
||||
@@ -1709,6 +1710,10 @@ static void __sk_destruct(struct rcu_head *head)
|
||||
|
||||
sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
bpf_sk_storage_free(sk);
|
||||
#endif
|
||||
|
||||
if (atomic_read(&sk->sk_omem_alloc))
|
||||
pr_debug("%s: optmem leakage (%d bytes) detected\n",
|
||||
__func__, atomic_read(&sk->sk_omem_alloc));
|
||||
|
Reference in New Issue
Block a user