bpf: Remove extra lock_sock for TCP_ZEROCOPY_RECEIVE
[ Upstream commit 9cacf81f8161111db25f98e78a7a0e32ae142b3f ] Add custom implementation of getsockopt hook for TCP_ZEROCOPY_RECEIVE. We skip generic hooks for TCP_ZEROCOPY_RECEIVE and have a custom call in do_tcp_getsockopt using the on-stack data. This removes 3% overhead for locking/unlocking the socket. Without this patch: 3.38% 0.07% tcp_mmap [kernel.kallsyms] [k] __cgroup_bpf_run_filter_getsockopt | --3.30%--__cgroup_bpf_run_filter_getsockopt | --0.81%--__kmalloc With the patch applied: 0.52% 0.12% tcp_mmap [kernel.kallsyms] [k] __cgroup_bpf_run_filter_getsockopt_kern Note, exporting uapi/tcp.h requires removing netinet/tcp.h from test_progs.h because those headers have confliciting definitions. Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Martin KaFai Lau <kafai@fb.com> Link: https://lore.kernel.org/bpf/20210115163501.805133-2-sdf@google.com Stable-dep-of: 2598619e012c ("sctp: add bpf_bypass_getsockopt proto callback") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
c62e2ac02e
commit
08f61a3491
@@ -158,6 +158,10 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
||||
int __user *optlen, int max_optlen,
|
||||
int retval);
|
||||
|
||||
int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
|
||||
int optname, void *optval,
|
||||
int *optlen, int retval);
|
||||
|
||||
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
|
||||
struct bpf_map *map)
|
||||
{
|
||||
@@ -404,10 +408,23 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
({ \
|
||||
int __ret = retval; \
|
||||
if (cgroup_bpf_enabled) \
|
||||
__ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
|
||||
optname, optval, \
|
||||
optlen, max_optlen, \
|
||||
retval); \
|
||||
if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
|
||||
!INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
|
||||
tcp_bpf_bypass_getsockopt, \
|
||||
level, optname)) \
|
||||
__ret = __cgroup_bpf_run_filter_getsockopt( \
|
||||
sock, level, optname, optval, optlen, \
|
||||
max_optlen, retval); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
|
||||
optlen, retval) \
|
||||
({ \
|
||||
int __ret = retval; \
|
||||
if (cgroup_bpf_enabled) \
|
||||
__ret = __cgroup_bpf_run_filter_getsockopt_kern( \
|
||||
sock, level, optname, optval, optlen, retval); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
@@ -493,6 +510,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
|
||||
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
|
||||
optlen, max_optlen, retval) ({ retval; })
|
||||
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
|
||||
optlen, retval) ({ retval; })
|
||||
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
|
||||
kernel_optval) ({ 0; })
|
||||
|
||||
|
Reference in New Issue
Block a user