bpf: Remove extra lock_sock for TCP_ZEROCOPY_RECEIVE
[ Upstream commit 9cacf81f8161111db25f98e78a7a0e32ae142b3f ] Add custom implementation of getsockopt hook for TCP_ZEROCOPY_RECEIVE. We skip generic hooks for TCP_ZEROCOPY_RECEIVE and have a custom call in do_tcp_getsockopt using the on-stack data. This removes 3% overhead for locking/unlocking the socket. Without this patch: 3.38% 0.07% tcp_mmap [kernel.kallsyms] [k] __cgroup_bpf_run_filter_getsockopt | --3.30%--__cgroup_bpf_run_filter_getsockopt | --0.81%--__kmalloc With the patch applied: 0.52% 0.12% tcp_mmap [kernel.kallsyms] [k] __cgroup_bpf_run_filter_getsockopt_kern Note, exporting uapi/tcp.h requires removing netinet/tcp.h from test_progs.h because those headers have confliciting definitions. Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Martin KaFai Lau <kafai@fb.com> Link: https://lore.kernel.org/bpf/20210115163501.805133-2-sdf@google.com Stable-dep-of: 2598619e012c ("sctp: add bpf_bypass_getsockopt proto callback") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
c62e2ac02e
commit
08f61a3491
@@ -389,6 +389,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock,
|
||||
struct poll_table_struct *wait);
|
||||
int tcp_getsockopt(struct sock *sk, int level, int optname,
|
||||
char __user *optval, int __user *optlen);
|
||||
bool tcp_bpf_bypass_getsockopt(int level, int optname);
|
||||
int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
|
||||
unsigned int optlen);
|
||||
void tcp_set_keepalive(struct sock *sk, int val);
|
||||
|
Reference in New Issue
Block a user