Merge branch 'akpm' (Andrew's patch-bomb)
Merge Andrew's second set of patches: - MM - a few random fixes - a couple of RTC leftovers * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (120 commits) rtc/rtc-88pm80x: remove unneed devm_kfree rtc/rtc-88pm80x: assign ret only when rtc_register_driver fails mm: hugetlbfs: close race during teardown of hugetlbfs shared page tables tmpfs: distribute interleave better across nodes mm: remove redundant initialization mm: warn if pg_data_t isn't initialized with zero mips: zero out pg_data_t when it's allocated memcg: gix memory accounting scalability in shrink_page_list mm/sparse: remove index_init_lock mm/sparse: more checks on mem_section number mm/sparse: optimize sparse_index_alloc memcg: add mem_cgroup_from_css() helper memcg: further prevent OOM with too many dirty pages memcg: prevent OOM with too many dirty pages mm: mmu_notifier: fix freed page still mapped in secondary MMU mm: memcg: only check anon swapin page charges for swap cache mm: memcg: only check swap cache pages for repeated charging mm: memcg: split swapin charge function into private and public part mm: memcg: remove needless !mm fixup to init_mm when charging mm: memcg: remove unneeded shmem charge type ...
This commit is contained in:
@@ -49,7 +49,7 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
|
||||
obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
|
||||
obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
|
||||
obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
|
||||
obj-$(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) += tcp_memcontrol.o
|
||||
obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
|
||||
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
|
||||
|
||||
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
|
||||
|
@@ -184,7 +184,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
|
||||
int ret;
|
||||
unsigned long vec[3];
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
struct mem_cgroup *memcg;
|
||||
#endif
|
||||
|
||||
@@ -203,7 +203,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
rcu_read_lock();
|
||||
memcg = mem_cgroup_from_task(current);
|
||||
|
||||
|
@@ -4351,19 +4351,20 @@ static void tcp_ofo_queue(struct sock *sk)
|
||||
static bool tcp_prune_ofo_queue(struct sock *sk);
|
||||
static int tcp_prune_queue(struct sock *sk);
|
||||
|
||||
static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
|
||||
static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
|
||||
unsigned int size)
|
||||
{
|
||||
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
|
||||
!sk_rmem_schedule(sk, size)) {
|
||||
!sk_rmem_schedule(sk, skb, size)) {
|
||||
|
||||
if (tcp_prune_queue(sk) < 0)
|
||||
return -1;
|
||||
|
||||
if (!sk_rmem_schedule(sk, size)) {
|
||||
if (!sk_rmem_schedule(sk, skb, size)) {
|
||||
if (!tcp_prune_ofo_queue(sk))
|
||||
return -1;
|
||||
|
||||
if (!sk_rmem_schedule(sk, size))
|
||||
if (!sk_rmem_schedule(sk, skb, size))
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@@ -4418,7 +4419,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
TCP_ECN_check_ce(tp, skb);
|
||||
|
||||
if (unlikely(tcp_try_rmem_schedule(sk, skb->truesize))) {
|
||||
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
|
||||
__kfree_skb(skb);
|
||||
return;
|
||||
@@ -4552,17 +4553,17 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
|
||||
|
||||
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *skb = NULL;
|
||||
struct tcphdr *th;
|
||||
bool fragstolen;
|
||||
|
||||
if (tcp_try_rmem_schedule(sk, size + sizeof(*th)))
|
||||
goto err;
|
||||
|
||||
skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
|
||||
if (!skb)
|
||||
goto err;
|
||||
|
||||
if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th)))
|
||||
goto err_free;
|
||||
|
||||
th = (struct tcphdr *)skb_put(skb, sizeof(*th));
|
||||
skb_reset_transport_header(skb);
|
||||
memset(th, 0, sizeof(*th));
|
||||
@@ -4633,7 +4634,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
||||
if (eaten <= 0) {
|
||||
queue_and_out:
|
||||
if (eaten < 0 &&
|
||||
tcp_try_rmem_schedule(sk, skb->truesize))
|
||||
tcp_try_rmem_schedule(sk, skb, skb->truesize))
|
||||
goto drop;
|
||||
|
||||
eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
|
||||
|
@@ -2633,7 +2633,7 @@ struct proto tcp_prot = {
|
||||
.compat_setsockopt = compat_tcp_setsockopt,
|
||||
.compat_getsockopt = compat_tcp_getsockopt,
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
.init_cgroup = tcp_init_cgroup,
|
||||
.destroy_cgroup = tcp_destroy_cgroup,
|
||||
.proto_cgroup = tcp_proto_cgroup,
|
||||
|
@@ -2045,7 +2045,8 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
|
||||
if (unlikely(sk->sk_state == TCP_CLOSE))
|
||||
return;
|
||||
|
||||
if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC))
|
||||
if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
|
||||
sk_gfp_atomic(sk, GFP_ATOMIC)))
|
||||
tcp_check_probe_timer(sk);
|
||||
}
|
||||
|
||||
@@ -2666,7 +2667,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
|
||||
|
||||
if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
|
||||
s_data_desired = cvp->s_data_desired;
|
||||
skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired, GFP_ATOMIC);
|
||||
skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired,
|
||||
sk_gfp_atomic(sk, GFP_ATOMIC));
|
||||
if (unlikely(!skb)) {
|
||||
dst_release(dst);
|
||||
return NULL;
|
||||
@@ -3064,7 +3066,7 @@ void tcp_send_ack(struct sock *sk)
|
||||
* tcp_transmit_skb() will set the ownership to this
|
||||
* sock.
|
||||
*/
|
||||
buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
|
||||
buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
|
||||
if (buff == NULL) {
|
||||
inet_csk_schedule_ack(sk);
|
||||
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
|
||||
@@ -3079,7 +3081,7 @@ void tcp_send_ack(struct sock *sk)
|
||||
|
||||
/* Send it off, this clears delayed acks for us. */
|
||||
TCP_SKB_CB(buff)->when = tcp_time_stamp;
|
||||
tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
|
||||
tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
|
||||
}
|
||||
|
||||
/* This routine sends a packet with an out of date sequence
|
||||
@@ -3099,7 +3101,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* We don't queue it, tcp_transmit_skb() sets ownership. */
|
||||
skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
|
||||
skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
|
||||
if (skb == NULL)
|
||||
return -1;
|
||||
|
||||
|
Reference in New Issue
Block a user