proto_ops: Add locked held versions of sendmsg and sendpage
Add new proto_ops sendmsg_locked and sendpage_locked that can be called when the socket lock is already held. Correspondingly, add kernel_sendmsg_locked and kernel_sendpage_locked as front end functions. These functions will be used in zero proxy so that we can take the socket lock in a ULP sendmsg/sendpage and then directly call the backend transport proto_ops functions. Signed-off-by: Tom Herbert <tom@quantonium.net> Signed-off-by: David S. Miller <davem@davemloft.net>
此提交包含在:
@@ -944,6 +944,8 @@ const struct proto_ops inet_stream_ops = {
|
||||
.sendpage = inet_sendpage,
|
||||
.splice_read = tcp_splice_read,
|
||||
.read_sock = tcp_read_sock,
|
||||
.sendmsg_locked = tcp_sendmsg_locked,
|
||||
.sendpage_locked = tcp_sendpage_locked,
|
||||
.peek_len = tcp_peek_len,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_setsockopt = compat_sock_common_setsockopt,
|
||||
|
@@ -1046,23 +1046,29 @@ out_err:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(do_tcp_sendpages);
|
||||
|
||||
int tcp_sendpage(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
ssize_t res;
|
||||
|
||||
if (!(sk->sk_route_caps & NETIF_F_SG) ||
|
||||
!sk_check_csum_caps(sk))
|
||||
return sock_no_sendpage(sk->sk_socket, page, offset, size,
|
||||
flags);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
tcp_rate_check_app_limited(sk); /* is sending application-limited? */
|
||||
|
||||
res = do_tcp_sendpages(sk, page, offset, size, flags);
|
||||
return do_tcp_sendpages(sk, page, offset, size, flags);
|
||||
}
|
||||
|
||||
int tcp_sendpage(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_sock(sk);
|
||||
ret = tcp_sendpage_locked(sk, page, offset, size, flags);
|
||||
release_sock(sk);
|
||||
return res;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_sendpage);
|
||||
|
||||
@@ -1156,7 +1162,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
|
||||
return err;
|
||||
}
|
||||
|
||||
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
@@ -1167,8 +1173,6 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
bool sg;
|
||||
long timeo;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
flags = msg->msg_flags;
|
||||
if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
|
||||
err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
|
||||
@@ -1377,7 +1381,6 @@ out:
|
||||
tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
|
||||
}
|
||||
out_nopush:
|
||||
release_sock(sk);
|
||||
return copied + copied_syn;
|
||||
|
||||
do_fault:
|
||||
@@ -1401,9 +1404,19 @@ out_err:
|
||||
sk->sk_write_space(sk);
|
||||
tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
|
||||
}
|
||||
release_sock(sk);
|
||||
return err;
|
||||
}
|
||||
|
||||
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_sock(sk);
|
||||
ret = tcp_sendmsg_locked(sk, msg, size);
|
||||
release_sock(sk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_sendmsg);
|
||||
|
||||
/*
|
||||
|
新增問題並參考
封鎖使用者