Merge branch 'percpu-for-linus' into percpu-for-next
Conflicts: arch/sparc/kernel/smp_64.c arch/x86/kernel/cpu/perf_counter.c arch/x86/kernel/setup_percpu.c drivers/cpufreq/cpufreq_ondemand.c mm/percpu.c Conflicts in core and arch percpu codes are mostly from commit ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many num_possible_cpus() with nr_cpu_ids. As for-next branch has moved all the first chunk allocators into mm/percpu.c, the changes are moved from arch code to mm/percpu.c. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
@@ -1304,7 +1304,9 @@ static void arp_format_neigh_entry(struct seq_file *seq,
|
||||
hbuffer[k++] = hex_asc_lo(n->ha[j]);
|
||||
hbuffer[k++] = ':';
|
||||
}
|
||||
hbuffer[--k] = 0;
|
||||
if (k != 0)
|
||||
--k;
|
||||
hbuffer[k] = 0;
|
||||
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
|
||||
}
|
||||
#endif
|
||||
|
@@ -316,8 +316,8 @@ static inline void check_tnode(const struct tnode *tn)
|
||||
|
||||
static const int halve_threshold = 25;
|
||||
static const int inflate_threshold = 50;
|
||||
static const int halve_threshold_root = 8;
|
||||
static const int inflate_threshold_root = 15;
|
||||
static const int halve_threshold_root = 15;
|
||||
static const int inflate_threshold_root = 25;
|
||||
|
||||
|
||||
static void __alias_free_mem(struct rcu_head *head)
|
||||
|
@@ -735,10 +735,10 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
tos = tiph->tos;
|
||||
if (tos&1) {
|
||||
if (tos == 1) {
|
||||
tos = 0;
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
tos = old_iph->tos;
|
||||
tos &= ~1;
|
||||
}
|
||||
|
||||
{
|
||||
|
@@ -1243,7 +1243,6 @@ int ip_push_pending_frames(struct sock *sk)
|
||||
skb->len += tmp_skb->len;
|
||||
skb->data_len += tmp_skb->len;
|
||||
skb->truesize += tmp_skb->truesize;
|
||||
__sock_put(tmp_skb->sk);
|
||||
tmp_skb->destructor = NULL;
|
||||
tmp_skb->sk = NULL;
|
||||
}
|
||||
|
@@ -339,7 +339,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
struct sock *sk = sock->sk;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
if (sk->sk_state == TCP_LISTEN)
|
||||
return inet_csk_listen_poll(sk);
|
||||
|
||||
|
@@ -1160,6 +1160,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
|
||||
#ifdef CONFIG_TCP_MD5SIG
|
||||
static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
|
||||
.md5_lookup = tcp_v4_reqsk_md5_lookup,
|
||||
.calc_md5_hash = tcp_v4_md5_hash_skb,
|
||||
};
|
||||
#endif
|
||||
|
||||
@@ -1373,7 +1374,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
|
||||
*/
|
||||
char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
|
||||
if (newkey != NULL)
|
||||
tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
|
||||
tcp_v4_md5_do_add(newsk, newinet->daddr,
|
||||
newkey, key->keylen);
|
||||
newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
|
||||
}
|
||||
|
@@ -2261,7 +2261,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
|
||||
#ifdef CONFIG_TCP_MD5SIG
|
||||
/* Okay, we have all we need - do the md5 hash if needed */
|
||||
if (md5) {
|
||||
tp->af_specific->calc_md5_hash(md5_hash_location,
|
||||
tcp_rsk(req)->af_specific->calc_md5_hash(md5_hash_location,
|
||||
md5, NULL, req, skb);
|
||||
}
|
||||
#endif
|
||||
|
@@ -136,7 +136,8 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
||||
case IPPROTO_TCP:
|
||||
case IPPROTO_SCTP:
|
||||
case IPPROTO_DCCP:
|
||||
if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
if (xprth + 4 < skb->data ||
|
||||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
__be16 *ports = (__be16 *)xprth;
|
||||
|
||||
fl->fl_ip_sport = ports[!!reverse];
|
||||
|
Reference in New Issue
Block a user