Merge branch 'upstream-fixes'

This commit is contained in:
Jeff Garzik
2005-10-03 22:06:19 -04:00
588 changed files with 22539 additions and 6378 deletions

View File

@@ -26,19 +26,18 @@
struct inet_hashinfo;
/* I have no idea if this is a good hash for v6 or not. -DaveM */
static inline int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport,
const struct in6_addr *faddr, const u16 fport,
const int ehash_size)
static inline unsigned int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport,
const struct in6_addr *faddr, const u16 fport)
{
int hashent = (lport ^ fport);
unsigned int hashent = (lport ^ fport);
hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
hashent ^= hashent >> 16;
hashent ^= hashent >> 8;
return (hashent & (ehash_size - 1));
return hashent;
}
static inline int inet6_sk_ehashfn(const struct sock *sk, const int ehash_size)
static inline int inet6_sk_ehashfn(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -46,7 +45,7 @@ static inline int inet6_sk_ehashfn(const struct sock *sk, const int ehash_size)
const struct in6_addr *faddr = &np->daddr;
const __u16 lport = inet->num;
const __u16 fport = inet->dport;
return inet6_ehashfn(laddr, lport, faddr, fport, ehash_size);
return inet6_ehashfn(laddr, lport, faddr, fport);
}
/*
@@ -69,14 +68,14 @@ static inline struct sock *
/* Optimize here for direct hit, only listening connections can
* have wildcards anyways.
*/
const int hash = inet6_ehashfn(daddr, hnum, saddr, sport,
hashinfo->ehash_size);
struct inet_ehash_bucket *head = &hashinfo->ehash[hash];
unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport);
struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
prefetch(head->chain.first);
read_lock(&head->lock);
sk_for_each(sk, node, &head->chain) {
/* For IPV6 do the cheaper port and family tests first. */
if (INET6_MATCH(sk, saddr, daddr, ports, dif))
if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif))
goto hit; /* You sunk my battleship! */
}
/* Must check for a TIME_WAIT'er before going to listener hash. */

View File

@@ -108,7 +108,7 @@ struct inet_hashinfo {
struct inet_bind_hashbucket *bhash;
int bhash_size;
int ehash_size;
unsigned int ehash_size;
/* All sockets in TCP_LISTEN state will be in here. This is the only
* table where wildcard'd TCP sockets can exist. Hash function here
@@ -130,17 +130,16 @@ struct inet_hashinfo {
int port_rover;
};
static inline int inet_ehashfn(const __u32 laddr, const __u16 lport,
const __u32 faddr, const __u16 fport,
const int ehash_size)
static inline unsigned int inet_ehashfn(const __u32 laddr, const __u16 lport,
const __u32 faddr, const __u16 fport)
{
int h = (laddr ^ lport) ^ (faddr ^ fport);
unsigned int h = (laddr ^ lport) ^ (faddr ^ fport);
h ^= h >> 16;
h ^= h >> 8;
return h & (ehash_size - 1);
return h;
}
static inline int inet_sk_ehashfn(const struct sock *sk, const int ehash_size)
static inline int inet_sk_ehashfn(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const __u32 laddr = inet->rcv_saddr;
@@ -148,7 +147,14 @@ static inline int inet_sk_ehashfn(const struct sock *sk, const int ehash_size)
const __u32 faddr = inet->daddr;
const __u16 fport = inet->dport;
return inet_ehashfn(laddr, lport, faddr, fport, ehash_size);
return inet_ehashfn(laddr, lport, faddr, fport);
}
static inline struct inet_ehash_bucket *inet_ehash_bucket(
struct inet_hashinfo *hashinfo,
unsigned int hash)
{
return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
}
extern struct inet_bind_bucket *
@@ -235,9 +241,11 @@ static inline void __inet_hash(struct inet_hashinfo *hashinfo,
lock = &hashinfo->lhash_lock;
inet_listen_wlock(hashinfo);
} else {
sk->sk_hashent = inet_sk_ehashfn(sk, hashinfo->ehash_size);
list = &hashinfo->ehash[sk->sk_hashent].chain;
lock = &hashinfo->ehash[sk->sk_hashent].lock;
struct inet_ehash_bucket *head;
sk->sk_hash = inet_sk_ehashfn(sk);
head = inet_ehash_bucket(hashinfo, sk->sk_hash);
list = &head->chain;
lock = &head->lock;
write_lock(lock);
}
__sk_add_node(sk, list);
@@ -268,9 +276,8 @@ static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
inet_listen_wlock(hashinfo);
lock = &hashinfo->lhash_lock;
} else {
struct inet_ehash_bucket *head = &hashinfo->ehash[sk->sk_hashent];
lock = &head->lock;
write_lock_bh(&head->lock);
lock = &inet_ehash_bucket(hashinfo, sk->sk_hash)->lock;
write_lock_bh(lock);
}
if (__sk_del_node_init(sk))
@@ -337,23 +344,27 @@ sherry_cache:
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr));
#endif /* __BIG_ENDIAN */
#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
(((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
(((__sk)->sk_hash == (__hash)) && \
((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
(((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
#define INET_TW_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
(((__sk)->sk_hash == (__hash)) && \
((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#else /* 32-bit arch */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \
((inet_sk(__sk)->daddr == (__saddr)) && \
#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif) \
(((__sk)->sk_hash == (__hash)) && \
(inet_sk(__sk)->daddr == (__saddr)) && \
(inet_sk(__sk)->rcv_saddr == (__daddr)) && \
((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \
((inet_twsk(__sk)->tw_daddr == (__saddr)) && \
#define INET_TW_MATCH(__sk, __hash,__cookie, __saddr, __daddr, __ports, __dif) \
(((__sk)->sk_hash == (__hash)) && \
(inet_twsk(__sk)->tw_daddr == (__saddr)) && \
(inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
@@ -378,18 +389,19 @@ static inline struct sock *
/* Optimize here for direct hit, only listening connections can
* have wildcards anyways.
*/
const int hash = inet_ehashfn(daddr, hnum, saddr, sport, hashinfo->ehash_size);
struct inet_ehash_bucket *head = &hashinfo->ehash[hash];
unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
prefetch(head->chain.first);
read_lock(&head->lock);
sk_for_each(sk, node, &head->chain) {
if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif))
if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
goto hit; /* You sunk my battleship! */
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
if (INET_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
goto hit;
}
sk = NULL;

View File

@@ -112,6 +112,7 @@ struct inet_timewait_sock {
#define tw_node __tw_common.skc_node
#define tw_bind_node __tw_common.skc_bind_node
#define tw_refcnt __tw_common.skc_refcnt
#define tw_hash __tw_common.skc_hash
#define tw_prot __tw_common.skc_prot
volatile unsigned char tw_substate;
/* 3 bits hole, try to pack */
@@ -126,7 +127,6 @@ struct inet_timewait_sock {
/* And these are ours. */
__u8 tw_ipv6only:1;
/* 31 bits hole, try to pack */
int tw_hashent;
int tw_timeout;
unsigned long tw_ttd;
struct inet_bind_bucket *tw_tb;

View File

@@ -17,6 +17,8 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
struct net_device;
struct packet_type;
struct sk_buff;
@@ -44,6 +46,7 @@ struct llc_sap {
unsigned char state;
unsigned char p_bit;
unsigned char f_bit;
atomic_t refcnt;
int (*rcv_func)(struct sk_buff *skb,
struct net_device *dev,
struct packet_type *pt,
@@ -81,13 +84,27 @@ extern struct llc_sap *llc_sap_open(unsigned char lsap,
struct net_device *dev,
struct packet_type *pt,
struct net_device *orig_dev));
static inline void llc_sap_hold(struct llc_sap *sap)
{
atomic_inc(&sap->refcnt);
}
extern void llc_sap_close(struct llc_sap *sap);
static inline void llc_sap_put(struct llc_sap *sap)
{
if (atomic_dec_and_test(&sap->refcnt))
llc_sap_close(sap);
}
extern struct llc_sap *llc_sap_find(unsigned char sap_value);
extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
unsigned char *dmac, unsigned char dsap);
extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
extern int llc_station_init(void);
extern void llc_station_exit(void);
@@ -98,4 +115,17 @@ extern void llc_proc_exit(void);
#define llc_proc_init() (0)
#define llc_proc_exit() do { } while(0)
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SYSCTL
extern int llc_sysctl_init(void);
extern void llc_sysctl_exit(void);
extern int sysctl_llc2_ack_timeout;
extern int sysctl_llc2_busy_timeout;
extern int sysctl_llc2_p_timeout;
extern int sysctl_llc2_rej_timeout;
extern int sysctl_llc_station_ack_timeout;
#else
#define llc_sysctl_init() (0)
#define llc_sysctl_exit() do { } while(0)
#endif /* CONFIG_SYSCTL */
#endif /* LLC_H */

View File

@@ -19,14 +19,14 @@
#define LLC_EVENT 1
#define LLC_PACKET 2
#define LLC_P_TIME 2
#define LLC_ACK_TIME 1
#define LLC_REJ_TIME 3
#define LLC_BUSY_TIME 3
#define LLC2_P_TIME 2
#define LLC2_ACK_TIME 1
#define LLC2_REJ_TIME 3
#define LLC2_BUSY_TIME 3
struct llc_timer {
struct timer_list timer;
u16 expire; /* timer expire time */
unsigned long expire; /* timer expire time */
};
struct llc_sock {
@@ -38,6 +38,7 @@ struct llc_sock {
struct llc_addr laddr; /* lsap/mac pair */
struct llc_addr daddr; /* dsap/mac pair */
struct net_device *dev; /* device to send to remote */
u32 copied_seq; /* head of yet unread data */
u8 retry_count; /* number of retries */
u8 ack_must_be_send;
u8 first_pdu_Ns;
@@ -92,7 +93,8 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
return skb->cb[sizeof(skb->cb) - 1];
}
extern struct sock *llc_sk_alloc(int family, int priority, struct proto *prot);
extern struct sock *llc_sk_alloc(int family, unsigned int __nocast priority,
struct proto *prot);
extern void llc_sk_free(struct sock *sk);
extern void llc_sk_reset(struct sock *sk);
@@ -115,5 +117,4 @@ extern void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
extern u8 llc_data_accept_state(u8 state);
extern void llc_build_offset_table(void);
extern int llc_release_sockets(struct llc_sap *sap);
#endif /* LLC_CONN_H */

View File

@@ -12,11 +12,15 @@
* See the GNU General Public License for more details.
*/
struct llc_sap;
struct net_device;
struct sk_buff;
struct sock;
extern void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
extern void llc_save_primitive(struct sk_buff* skb, unsigned char prim);
extern struct sk_buff *llc_alloc_frame(void);
extern void llc_save_primitive(struct sock *sk, struct sk_buff* skb,
unsigned char prim);
extern struct sk_buff *llc_alloc_frame(struct sock *sk,
struct net_device *dev);
extern void llc_build_and_send_test_pkt(struct llc_sap *sap,
struct sk_buff *skb,

View File

@@ -99,6 +99,7 @@ struct proto;
* @skc_node: main hash linkage for various protocol lookup tables
* @skc_bind_node: bind hash linkage for various protocol lookup tables
* @skc_refcnt: reference count
* @skc_hash: hash value used with various protocol lookup tables
* @skc_prot: protocol handlers inside a network family
*
* This is the minimal network layer representation of sockets, the header
@@ -112,6 +113,7 @@ struct sock_common {
struct hlist_node skc_node;
struct hlist_node skc_bind_node;
atomic_t skc_refcnt;
unsigned int skc_hash;
struct proto *skc_prot;
};
@@ -139,7 +141,6 @@ struct sock_common {
* @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
* @sk_lingertime: %SO_LINGER l_linger setting
* @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash)
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
* @sk_error_queue: rarely used
@@ -186,6 +187,7 @@ struct sock {
#define sk_node __sk_common.skc_node
#define sk_bind_node __sk_common.skc_bind_node
#define sk_refcnt __sk_common.skc_refcnt
#define sk_hash __sk_common.skc_hash
#define sk_prot __sk_common.skc_prot
unsigned char sk_shutdown : 2,
sk_no_check : 2,
@@ -208,7 +210,6 @@ struct sock {
unsigned int sk_allocation;
int sk_sndbuf;
int sk_route_caps;
int sk_hashent;
unsigned long sk_flags;
unsigned long sk_lingertime;
/*