Automatic merge of /spare/repo/netdev-2.6 branch we18
This commit is contained in:
26
net/802/tr.c
26
net/802/tr.c
@@ -47,12 +47,12 @@ static void rif_check_expire(unsigned long dummy);
|
||||
* Each RIF entry we learn is kept this way
|
||||
*/
|
||||
|
||||
struct rif_cache_s {
|
||||
struct rif_cache {
|
||||
unsigned char addr[TR_ALEN];
|
||||
int iface;
|
||||
__u16 rcf;
|
||||
__u16 rseg[8];
|
||||
struct rif_cache_s *next;
|
||||
__be16 rcf;
|
||||
__be16 rseg[8];
|
||||
struct rif_cache *next;
|
||||
unsigned long last_used;
|
||||
unsigned char local_ring;
|
||||
};
|
||||
@@ -64,7 +64,7 @@ struct rif_cache_s {
|
||||
* up a lot.
|
||||
*/
|
||||
|
||||
static struct rif_cache_s *rif_table[RIF_TABLE_SIZE];
|
||||
static struct rif_cache *rif_table[RIF_TABLE_SIZE];
|
||||
|
||||
static DEFINE_SPINLOCK(rif_lock);
|
||||
|
||||
@@ -249,7 +249,7 @@ void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *
|
||||
{
|
||||
int slack;
|
||||
unsigned int hash;
|
||||
struct rif_cache_s *entry;
|
||||
struct rif_cache *entry;
|
||||
unsigned char *olddata;
|
||||
static const unsigned char mcast_func_addr[]
|
||||
= {0xC0,0x00,0x00,0x04,0x00,0x00};
|
||||
@@ -337,7 +337,7 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
|
||||
static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
|
||||
{
|
||||
unsigned int hash, rii_p = 0;
|
||||
struct rif_cache_s *entry;
|
||||
struct rif_cache *entry;
|
||||
|
||||
|
||||
spin_lock_bh(&rif_lock);
|
||||
@@ -373,7 +373,7 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
|
||||
* FIXME: We ought to keep some kind of cache size
|
||||
* limiting and adjust the timers to suit.
|
||||
*/
|
||||
entry=kmalloc(sizeof(struct rif_cache_s),GFP_ATOMIC);
|
||||
entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
|
||||
|
||||
if(!entry)
|
||||
{
|
||||
@@ -435,7 +435,7 @@ static void rif_check_expire(unsigned long dummy)
|
||||
spin_lock_bh(&rif_lock);
|
||||
|
||||
for(i =0; i < RIF_TABLE_SIZE; i++) {
|
||||
struct rif_cache_s *entry, **pentry;
|
||||
struct rif_cache *entry, **pentry;
|
||||
|
||||
pentry = rif_table+i;
|
||||
while((entry=*pentry) != NULL) {
|
||||
@@ -467,10 +467,10 @@ static void rif_check_expire(unsigned long dummy)
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
static struct rif_cache_s *rif_get_idx(loff_t pos)
|
||||
static struct rif_cache *rif_get_idx(loff_t pos)
|
||||
{
|
||||
int i;
|
||||
struct rif_cache_s *entry;
|
||||
struct rif_cache *entry;
|
||||
loff_t off = 0;
|
||||
|
||||
for(i = 0; i < RIF_TABLE_SIZE; i++)
|
||||
@@ -493,7 +493,7 @@ static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
int i;
|
||||
struct rif_cache_s *ent = v;
|
||||
struct rif_cache *ent = v;
|
||||
|
||||
++*pos;
|
||||
|
||||
@@ -522,7 +522,7 @@ static void rif_seq_stop(struct seq_file *seq, void *v)
|
||||
static int rif_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
int j, rcf_len, segment, brdgnmb;
|
||||
struct rif_cache_s *entry = v;
|
||||
struct rif_cache *entry = v;
|
||||
|
||||
if (v == SEQ_START_TOKEN)
|
||||
seq_puts(seq,
|
||||
|
@@ -490,6 +490,14 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
|
||||
/* Partially cloned skb? */
|
||||
if (skb_shared(frag))
|
||||
goto slow_path;
|
||||
|
||||
BUG_ON(frag->sk);
|
||||
if (skb->sk) {
|
||||
sock_hold(skb->sk);
|
||||
frag->sk = skb->sk;
|
||||
frag->destructor = sock_wfree;
|
||||
skb->truesize -= frag->truesize;
|
||||
}
|
||||
}
|
||||
|
||||
/* Everything is OK. Generate! */
|
||||
|
@@ -508,7 +508,6 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
|
||||
rc = NF_ACCEPT;
|
||||
/* do not touch skb anymore */
|
||||
atomic_inc(&cp->in_pkts);
|
||||
__ip_vs_conn_put(cp);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@@ -940,37 +940,25 @@ void ip_ct_refresh_acct(struct ip_conntrack *ct,
|
||||
struct sk_buff *
|
||||
ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user)
|
||||
{
|
||||
struct sock *sk = skb->sk;
|
||||
#ifdef CONFIG_NETFILTER_DEBUG
|
||||
unsigned int olddebug = skb->nf_debug;
|
||||
#endif
|
||||
|
||||
if (sk) {
|
||||
sock_hold(sk);
|
||||
skb_orphan(skb);
|
||||
}
|
||||
skb_orphan(skb);
|
||||
|
||||
local_bh_disable();
|
||||
skb = ip_defrag(skb, user);
|
||||
local_bh_enable();
|
||||
|
||||
if (!skb) {
|
||||
if (sk)
|
||||
sock_put(sk);
|
||||
return skb;
|
||||
}
|
||||
|
||||
if (sk) {
|
||||
skb_set_owner_w(skb, sk);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
ip_send_check(skb->nh.iph);
|
||||
skb->nfcache |= NFC_ALTERED;
|
||||
if (skb) {
|
||||
ip_send_check(skb->nh.iph);
|
||||
skb->nfcache |= NFC_ALTERED;
|
||||
#ifdef CONFIG_NETFILTER_DEBUG
|
||||
/* Packet path as if nothing had happened. */
|
||||
skb->nf_debug = olddebug;
|
||||
/* Packet path as if nothing had happened. */
|
||||
skb->nf_debug = olddebug;
|
||||
#endif
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
|
@@ -4355,16 +4355,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
goto no_ack;
|
||||
}
|
||||
|
||||
if (eaten) {
|
||||
if (tcp_in_quickack_mode(tp)) {
|
||||
tcp_send_ack(sk);
|
||||
} else {
|
||||
tcp_send_delayed_ack(sk);
|
||||
}
|
||||
} else {
|
||||
__tcp_ack_snd_check(sk, 0);
|
||||
}
|
||||
|
||||
__tcp_ack_snd_check(sk, 0);
|
||||
no_ack:
|
||||
if (eaten)
|
||||
__kfree_skb(skb);
|
||||
|
@@ -552,13 +552,17 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
skb_headroom(frag) < hlen)
|
||||
goto slow_path;
|
||||
|
||||
/* Correct socket ownership. */
|
||||
if (frag->sk == NULL)
|
||||
goto slow_path;
|
||||
|
||||
/* Partially cloned skb? */
|
||||
if (skb_shared(frag))
|
||||
goto slow_path;
|
||||
|
||||
BUG_ON(frag->sk);
|
||||
if (skb->sk) {
|
||||
sock_hold(skb->sk);
|
||||
frag->sk = skb->sk;
|
||||
frag->destructor = sock_wfree;
|
||||
skb->truesize -= frag->truesize;
|
||||
}
|
||||
}
|
||||
|
||||
err = 0;
|
||||
@@ -1116,12 +1120,10 @@ int ip6_push_pending_frames(struct sock *sk)
|
||||
tail_skb = &(tmp_skb->next);
|
||||
skb->len += tmp_skb->len;
|
||||
skb->data_len += tmp_skb->len;
|
||||
#if 0 /* Logically correct, but useless work, ip_fragment() will have to undo */
|
||||
skb->truesize += tmp_skb->truesize;
|
||||
__sock_put(tmp_skb->sk);
|
||||
tmp_skb->destructor = NULL;
|
||||
tmp_skb->sk = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
ipv6_addr_copy(final_dst, &fl->fl6_dst);
|
||||
|
@@ -84,6 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
|
||||
mtu = IPV6_MIN_MTU;
|
||||
|
||||
if (skb->len > mtu) {
|
||||
skb->dev = dst->dev;
|
||||
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
|
||||
ret = -EMSGSIZE;
|
||||
}
|
||||
|
@@ -113,6 +113,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
|
||||
|
||||
xdst = (struct xfrm_dst *)dst1;
|
||||
xdst->route = &rt->u.dst;
|
||||
if (rt->rt6i_node)
|
||||
xdst->route_cookie = rt->rt6i_node->fn_sernum;
|
||||
|
||||
dst1->next = dst_prev;
|
||||
dst_prev = dst1;
|
||||
@@ -137,6 +139,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
|
||||
|
||||
dst_prev->child = &rt->u.dst;
|
||||
dst->path = &rt->u.dst;
|
||||
if (rt->rt6i_node)
|
||||
((struct xfrm_dst *)dst)->path_cookie = rt->rt6i_node->fn_sernum;
|
||||
|
||||
*dst_p = dst;
|
||||
dst = dst_prev;
|
||||
|
@@ -735,11 +735,15 @@ static inline int do_one_broadcast(struct sock *sk,
|
||||
|
||||
sock_hold(sk);
|
||||
if (p->skb2 == NULL) {
|
||||
if (atomic_read(&p->skb->users) != 1) {
|
||||
if (skb_shared(p->skb)) {
|
||||
p->skb2 = skb_clone(p->skb, p->allocation);
|
||||
} else {
|
||||
p->skb2 = p->skb;
|
||||
atomic_inc(&p->skb->users);
|
||||
p->skb2 = skb_get(p->skb);
|
||||
/*
|
||||
* skb ownership may have been set when
|
||||
* delivered to a previous socket.
|
||||
*/
|
||||
skb_orphan(p->skb2);
|
||||
}
|
||||
}
|
||||
if (p->skb2 == NULL) {
|
||||
@@ -785,11 +789,12 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
|
||||
sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
|
||||
do_one_broadcast(sk, &info);
|
||||
|
||||
kfree_skb(skb);
|
||||
|
||||
netlink_unlock_table();
|
||||
|
||||
if (info.skb2)
|
||||
kfree_skb(info.skb2);
|
||||
kfree_skb(skb);
|
||||
|
||||
if (info.delivered) {
|
||||
if (info.congested && (allocation & __GFP_WAIT))
|
||||
|
@@ -53,7 +53,6 @@
|
||||
|
||||
struct netem_sched_data {
|
||||
struct Qdisc *qdisc;
|
||||
struct sk_buff_head delayed;
|
||||
struct timer_list timer;
|
||||
|
||||
u32 latency;
|
||||
@@ -63,11 +62,12 @@ struct netem_sched_data {
|
||||
u32 gap;
|
||||
u32 jitter;
|
||||
u32 duplicate;
|
||||
u32 reorder;
|
||||
|
||||
struct crndstate {
|
||||
unsigned long last;
|
||||
unsigned long rho;
|
||||
} delay_cor, loss_cor, dup_cor;
|
||||
} delay_cor, loss_cor, dup_cor, reorder_cor;
|
||||
|
||||
struct disttable {
|
||||
u32 size;
|
||||
@@ -137,122 +137,68 @@ static long tabledist(unsigned long mu, long sigma,
|
||||
return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
|
||||
}
|
||||
|
||||
/* Put skb in the private delayed queue. */
|
||||
static int netem_delay(struct Qdisc *sch, struct sk_buff *skb)
|
||||
{
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
psched_tdiff_t td;
|
||||
psched_time_t now;
|
||||
|
||||
PSCHED_GET_TIME(now);
|
||||
td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist);
|
||||
|
||||
/* Always queue at tail to keep packets in order */
|
||||
if (likely(q->delayed.qlen < q->limit)) {
|
||||
struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
|
||||
|
||||
PSCHED_TADD2(now, td, cb->time_to_send);
|
||||
|
||||
pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb,
|
||||
now, cb->time_to_send);
|
||||
|
||||
__skb_queue_tail(&q->delayed, skb);
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
pr_debug("netem_delay: queue over limit %d\n", q->limit);
|
||||
sch->qstats.overlimits++;
|
||||
kfree_skb(skb);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Move a packet that is ready to send from the delay holding
|
||||
* list to the underlying qdisc.
|
||||
* Insert one skb into qdisc.
|
||||
* Note: parent depends on return value to account for queue length.
|
||||
* NET_XMIT_DROP: queue length didn't change.
|
||||
* NET_XMIT_SUCCESS: one skb was queued.
|
||||
*/
|
||||
static int netem_run(struct Qdisc *sch)
|
||||
{
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
struct sk_buff *skb;
|
||||
psched_time_t now;
|
||||
|
||||
PSCHED_GET_TIME(now);
|
||||
|
||||
skb = skb_peek(&q->delayed);
|
||||
if (skb) {
|
||||
const struct netem_skb_cb *cb
|
||||
= (const struct netem_skb_cb *)skb->cb;
|
||||
long delay
|
||||
= PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
|
||||
pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay);
|
||||
|
||||
/* if more time remaining? */
|
||||
if (delay > 0) {
|
||||
mod_timer(&q->timer, jiffies + delay);
|
||||
return 1;
|
||||
}
|
||||
|
||||
__skb_unlink(skb, &q->delayed);
|
||||
|
||||
if (q->qdisc->enqueue(skb, q->qdisc)) {
|
||||
sch->q.qlen--;
|
||||
sch->qstats.drops++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
{
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
|
||||
struct sk_buff *skb2;
|
||||
int ret;
|
||||
int count = 1;
|
||||
|
||||
pr_debug("netem_enqueue skb=%p\n", skb);
|
||||
|
||||
/* Random duplication */
|
||||
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
|
||||
++count;
|
||||
|
||||
/* Random packet drop 0 => none, ~0 => all */
|
||||
if (q->loss && q->loss >= get_crandom(&q->loss_cor)) {
|
||||
pr_debug("netem_enqueue: random loss\n");
|
||||
if (q->loss && q->loss >= get_crandom(&q->loss_cor))
|
||||
--count;
|
||||
|
||||
if (count == 0) {
|
||||
sch->qstats.drops++;
|
||||
kfree_skb(skb);
|
||||
return 0; /* lie about loss so TCP doesn't know */
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
/* Random duplication */
|
||||
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) {
|
||||
struct sk_buff *skb2;
|
||||
|
||||
skb2 = skb_clone(skb, GFP_ATOMIC);
|
||||
if (skb2 && netem_delay(sch, skb2) == NET_XMIT_SUCCESS) {
|
||||
struct Qdisc *qp;
|
||||
|
||||
/* Since one packet can generate two packets in the
|
||||
* queue, the parent's qlen accounting gets confused,
|
||||
* so fix it.
|
||||
*/
|
||||
qp = qdisc_lookup(sch->dev, TC_H_MAJ(sch->parent));
|
||||
if (qp)
|
||||
qp->q.qlen++;
|
||||
|
||||
sch->q.qlen++;
|
||||
sch->bstats.bytes += skb2->len;
|
||||
sch->bstats.packets++;
|
||||
} else
|
||||
sch->qstats.drops++;
|
||||
}
|
||||
|
||||
/* If doing simple delay then gap == 0 so all packets
|
||||
* go into the delayed holding queue
|
||||
* otherwise if doing out of order only "1 out of gap"
|
||||
* packets will be delayed.
|
||||
/*
|
||||
* If we need to duplicate packet, then re-insert at top of the
|
||||
* qdisc tree, since parent queuer expects that only one
|
||||
* skb will be queued.
|
||||
*/
|
||||
if (q->counter < q->gap) {
|
||||
if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
|
||||
struct Qdisc *rootq = sch->dev->qdisc;
|
||||
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
|
||||
q->duplicate = 0;
|
||||
|
||||
rootq->enqueue(skb2, rootq);
|
||||
q->duplicate = dupsave;
|
||||
}
|
||||
|
||||
if (q->gap == 0 /* not doing reordering */
|
||||
|| q->counter < q->gap /* inside last reordering gap */
|
||||
|| q->reorder < get_crandom(&q->reorder_cor)) {
|
||||
psched_time_t now;
|
||||
PSCHED_GET_TIME(now);
|
||||
PSCHED_TADD2(now, tabledist(q->latency, q->jitter,
|
||||
&q->delay_cor, q->delay_dist),
|
||||
cb->time_to_send);
|
||||
++q->counter;
|
||||
ret = q->qdisc->enqueue(skb, q->qdisc);
|
||||
} else {
|
||||
/*
|
||||
* Do re-ordering by putting one out of N packets at the front
|
||||
* of the queue.
|
||||
*/
|
||||
PSCHED_GET_TIME(cb->time_to_send);
|
||||
q->counter = 0;
|
||||
ret = netem_delay(sch, skb);
|
||||
netem_run(sch);
|
||||
ret = q->qdisc->ops->requeue(skb, q->qdisc);
|
||||
}
|
||||
|
||||
if (likely(ret == NET_XMIT_SUCCESS)) {
|
||||
@@ -296,22 +242,33 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
|
||||
{
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
struct sk_buff *skb;
|
||||
int pending;
|
||||
|
||||
pending = netem_run(sch);
|
||||
|
||||
skb = q->qdisc->dequeue(q->qdisc);
|
||||
if (skb) {
|
||||
pr_debug("netem_dequeue: return skb=%p\n", skb);
|
||||
sch->q.qlen--;
|
||||
sch->flags &= ~TCQ_F_THROTTLED;
|
||||
}
|
||||
else if (pending) {
|
||||
pr_debug("netem_dequeue: throttling\n");
|
||||
sch->flags |= TCQ_F_THROTTLED;
|
||||
}
|
||||
const struct netem_skb_cb *cb
|
||||
= (const struct netem_skb_cb *)skb->cb;
|
||||
psched_time_t now;
|
||||
long delay;
|
||||
|
||||
return skb;
|
||||
/* if more time remaining? */
|
||||
PSCHED_GET_TIME(now);
|
||||
delay = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
|
||||
pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay);
|
||||
if (delay <= 0) {
|
||||
pr_debug("netem_dequeue: return skb=%p\n", skb);
|
||||
sch->q.qlen--;
|
||||
sch->flags &= ~TCQ_F_THROTTLED;
|
||||
return skb;
|
||||
}
|
||||
|
||||
mod_timer(&q->timer, jiffies + delay);
|
||||
sch->flags |= TCQ_F_THROTTLED;
|
||||
|
||||
if (q->qdisc->ops->requeue(skb, q->qdisc) != 0)
|
||||
sch->qstats.drops++;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void netem_watchdog(unsigned long arg)
|
||||
@@ -328,8 +285,6 @@ static void netem_reset(struct Qdisc *sch)
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
qdisc_reset(q->qdisc);
|
||||
skb_queue_purge(&q->delayed);
|
||||
|
||||
sch->q.qlen = 0;
|
||||
sch->flags &= ~TCQ_F_THROTTLED;
|
||||
del_timer_sync(&q->timer);
|
||||
@@ -397,6 +352,19 @@ static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
|
||||
{
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
const struct tc_netem_reorder *r = RTA_DATA(attr);
|
||||
|
||||
if (RTA_PAYLOAD(attr) != sizeof(*r))
|
||||
return -EINVAL;
|
||||
|
||||
q->reorder = r->probability;
|
||||
init_crandom(&q->reorder_cor, r->correlation);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int netem_change(struct Qdisc *sch, struct rtattr *opt)
|
||||
{
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
@@ -417,9 +385,15 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
|
||||
q->jitter = qopt->jitter;
|
||||
q->limit = qopt->limit;
|
||||
q->gap = qopt->gap;
|
||||
q->counter = 0;
|
||||
q->loss = qopt->loss;
|
||||
q->duplicate = qopt->duplicate;
|
||||
|
||||
/* for compatiablity with earlier versions.
|
||||
* if gap is set, need to assume 100% probablity
|
||||
*/
|
||||
q->reorder = ~0;
|
||||
|
||||
/* Handle nested options after initial queue options.
|
||||
* Should have put all options in nested format but too late now.
|
||||
*/
|
||||
@@ -441,6 +415,11 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (tb[TCA_NETEM_REORDER-1]) {
|
||||
ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -455,11 +434,9 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt)
|
||||
if (!opt)
|
||||
return -EINVAL;
|
||||
|
||||
skb_queue_head_init(&q->delayed);
|
||||
init_timer(&q->timer);
|
||||
q->timer.function = netem_watchdog;
|
||||
q->timer.data = (unsigned long) sch;
|
||||
q->counter = 0;
|
||||
|
||||
q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
|
||||
if (!q->qdisc) {
|
||||
@@ -491,6 +468,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
struct rtattr *rta = (struct rtattr *) b;
|
||||
struct tc_netem_qopt qopt;
|
||||
struct tc_netem_corr cor;
|
||||
struct tc_netem_reorder reorder;
|
||||
|
||||
qopt.latency = q->latency;
|
||||
qopt.jitter = q->jitter;
|
||||
@@ -504,6 +482,11 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
cor.loss_corr = q->loss_cor.rho;
|
||||
cor.dup_corr = q->dup_cor.rho;
|
||||
RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
|
||||
|
||||
reorder.probability = q->reorder;
|
||||
reorder.correlation = q->reorder_cor.rho;
|
||||
RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
|
||||
|
||||
rta->rta_len = skb->tail - b;
|
||||
|
||||
return skb->len;
|
||||
|
@@ -770,33 +770,12 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||
err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
|
||||
if (err)
|
||||
goto out_mknod_parent;
|
||||
/*
|
||||
* Yucky last component or no last component at all?
|
||||
* (foo/., foo/.., /////)
|
||||
*/
|
||||
err = -EEXIST;
|
||||
if (nd.last_type != LAST_NORM)
|
||||
goto out_mknod;
|
||||
/*
|
||||
* Lock the directory.
|
||||
*/
|
||||
down(&nd.dentry->d_inode->i_sem);
|
||||
/*
|
||||
* Do the final lookup.
|
||||
*/
|
||||
dentry = lookup_hash(&nd.last, nd.dentry);
|
||||
|
||||
dentry = lookup_create(&nd, 0);
|
||||
err = PTR_ERR(dentry);
|
||||
if (IS_ERR(dentry))
|
||||
goto out_mknod_unlock;
|
||||
err = -ENOENT;
|
||||
/*
|
||||
* Special case - lookup gave negative, but... we had foo/bar/
|
||||
* From the vfs_mknod() POV we just have a negative dentry -
|
||||
* all is fine. Let's be bastards - you had / on the end, you've
|
||||
* been asking for (non-existent) directory. -ENOENT for you.
|
||||
*/
|
||||
if (nd.last.name[nd.last.len] && !dentry->d_inode)
|
||||
goto out_mknod_dput;
|
||||
|
||||
/*
|
||||
* All right, let's create it.
|
||||
*/
|
||||
@@ -845,7 +824,6 @@ out_mknod_dput:
|
||||
dput(dentry);
|
||||
out_mknod_unlock:
|
||||
up(&nd.dentry->d_inode->i_sem);
|
||||
out_mknod:
|
||||
path_release(&nd);
|
||||
out_mknod_parent:
|
||||
if (err==-EEXIST)
|
||||
|
@@ -698,7 +698,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
|
||||
return -ENOMEM;
|
||||
|
||||
if (skb1->sk)
|
||||
skb_set_owner_w(skb, skb1->sk);
|
||||
skb_set_owner_w(skb2, skb1->sk);
|
||||
|
||||
/* Looking around. Are we still alive?
|
||||
* OK, link new skb, drop old one */
|
||||
|
@@ -1136,7 +1136,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
|
||||
struct xfrm_dst *last;
|
||||
u32 mtu;
|
||||
|
||||
if (!dst_check(dst->path, 0) ||
|
||||
if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
|
||||
(dst->dev && !netif_running(dst->dev)))
|
||||
return 0;
|
||||
|
||||
@@ -1156,7 +1156,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
|
||||
xdst->child_mtu_cached = mtu;
|
||||
}
|
||||
|
||||
if (!dst_check(xdst->route, 0))
|
||||
if (!dst_check(xdst->route, xdst->route_cookie))
|
||||
return 0;
|
||||
mtu = dst_mtu(xdst->route);
|
||||
if (xdst->route_mtu_cached != mtu) {
|
||||
|
@@ -34,14 +34,21 @@ static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
|
||||
{
|
||||
struct rtattr *rt = xfrma[type - 1];
|
||||
struct xfrm_algo *algp;
|
||||
int len;
|
||||
|
||||
if (!rt)
|
||||
return 0;
|
||||
|
||||
if ((rt->rta_len - sizeof(*rt)) < sizeof(*algp))
|
||||
len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp);
|
||||
if (len < 0)
|
||||
return -EINVAL;
|
||||
|
||||
algp = RTA_DATA(rt);
|
||||
|
||||
len -= (algp->alg_key_len + 7U) / 8;
|
||||
if (len < 0)
|
||||
return -EINVAL;
|
||||
|
||||
switch (type) {
|
||||
case XFRMA_ALG_AUTH:
|
||||
if (!algp->alg_key_len &&
|
||||
@@ -162,6 +169,7 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
|
||||
struct rtattr *rta = u_arg;
|
||||
struct xfrm_algo *p, *ualg;
|
||||
struct xfrm_algo_desc *algo;
|
||||
int len;
|
||||
|
||||
if (!rta)
|
||||
return 0;
|
||||
@@ -173,11 +181,12 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
|
||||
return -ENOSYS;
|
||||
*props = algo->desc.sadb_alg_id;
|
||||
|
||||
p = kmalloc(sizeof(*ualg) + ualg->alg_key_len, GFP_KERNEL);
|
||||
len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8;
|
||||
p = kmalloc(len, GFP_KERNEL);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(p, ualg, sizeof(*ualg) + ualg->alg_key_len);
|
||||
memcpy(p, ualg, len);
|
||||
*algpp = p;
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user