Merge branch 'akpm' (Andrew's patch-bomb)
Merge Andrew's second set of patches: - MM - a few random fixes - a couple of RTC leftovers * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (120 commits) rtc/rtc-88pm80x: remove unneed devm_kfree rtc/rtc-88pm80x: assign ret only when rtc_register_driver fails mm: hugetlbfs: close race during teardown of hugetlbfs shared page tables tmpfs: distribute interleave better across nodes mm: remove redundant initialization mm: warn if pg_data_t isn't initialized with zero mips: zero out pg_data_t when it's allocated memcg: gix memory accounting scalability in shrink_page_list mm/sparse: remove index_init_lock mm/sparse: more checks on mem_section number mm/sparse: optimize sparse_index_alloc memcg: add mem_cgroup_from_css() helper memcg: further prevent OOM with too many dirty pages memcg: prevent OOM with too many dirty pages mm: mmu_notifier: fix freed page still mapped in secondary MMU mm: memcg: only check anon swapin page charges for swap cache mm: memcg: only check swap cache pages for repeated charging mm: memcg: split swapin charge function into private and public part mm: memcg: remove needless !mm fixup to init_mm when charging mm: memcg: remove unneeded shmem charge type ...
This commit is contained in:
@@ -4351,19 +4351,20 @@ static void tcp_ofo_queue(struct sock *sk)
|
||||
static bool tcp_prune_ofo_queue(struct sock *sk);
|
||||
static int tcp_prune_queue(struct sock *sk);
|
||||
|
||||
static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
|
||||
static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
|
||||
unsigned int size)
|
||||
{
|
||||
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
|
||||
!sk_rmem_schedule(sk, size)) {
|
||||
!sk_rmem_schedule(sk, skb, size)) {
|
||||
|
||||
if (tcp_prune_queue(sk) < 0)
|
||||
return -1;
|
||||
|
||||
if (!sk_rmem_schedule(sk, size)) {
|
||||
if (!sk_rmem_schedule(sk, skb, size)) {
|
||||
if (!tcp_prune_ofo_queue(sk))
|
||||
return -1;
|
||||
|
||||
if (!sk_rmem_schedule(sk, size))
|
||||
if (!sk_rmem_schedule(sk, skb, size))
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@@ -4418,7 +4419,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
TCP_ECN_check_ce(tp, skb);
|
||||
|
||||
if (unlikely(tcp_try_rmem_schedule(sk, skb->truesize))) {
|
||||
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
|
||||
__kfree_skb(skb);
|
||||
return;
|
||||
@@ -4552,17 +4553,17 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
|
||||
|
||||
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *skb = NULL;
|
||||
struct tcphdr *th;
|
||||
bool fragstolen;
|
||||
|
||||
if (tcp_try_rmem_schedule(sk, size + sizeof(*th)))
|
||||
goto err;
|
||||
|
||||
skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
|
||||
if (!skb)
|
||||
goto err;
|
||||
|
||||
if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th)))
|
||||
goto err_free;
|
||||
|
||||
th = (struct tcphdr *)skb_put(skb, sizeof(*th));
|
||||
skb_reset_transport_header(skb);
|
||||
memset(th, 0, sizeof(*th));
|
||||
@@ -4633,7 +4634,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
||||
if (eaten <= 0) {
|
||||
queue_and_out:
|
||||
if (eaten < 0 &&
|
||||
tcp_try_rmem_schedule(sk, skb->truesize))
|
||||
tcp_try_rmem_schedule(sk, skb, skb->truesize))
|
||||
goto drop;
|
||||
|
||||
eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
|
||||
|
Reference in New Issue
Block a user