Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (24 commits)
  e100: do not go D3 in shutdown unless system is powering off
  netfilter: revised locking for x_tables
  Bluetooth: Fix connection establishment with low security requirement
  Bluetooth: Add different pairing timeout for Legacy Pairing
  Bluetooth: Ensure that HCI sysfs add/del is preempt safe
  net: Avoid extra wakeups of threads blocked in wait_for_packet()
  net: Fix typo in net_device_ops description.
  ipv4: Limit size of route cache hash table
  Add reference to CAPI 2.0 standard
  Documentation/isdn/INTERFACE.CAPI
  update Documentation/isdn/00-INDEX
  ixgbe: Fix WoL functionality for 82599 KX4 devices
  veth: prevent oops caused by netdev destructor
  xfrm: wrong hash value for temporary SA
  forcedeth: tx timeout fix
  net: Fix LL_MAX_HEADER for CONFIG_TR_MODULE
  mlx4_en: Handle page allocation failure during receive
  mlx4_en: Fix cleanup flow on cq activation
  vlan: update vlan carrier state for admin up/down
  netfilter: xt_recent: fix stack overread in compat code
  ...
This commit is contained in:
Linus Torvalds
2009-04-29 07:55:45 -07:00
33 changed files with 795 additions and 447 deletions

View File

@@ -104,7 +104,7 @@ struct wireless_dev;
# else
# define LL_MAX_HEADER 96
# endif
#elif defined(CONFIG_TR)
#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
# define LL_MAX_HEADER 48
#else
# define LL_MAX_HEADER 32
@@ -500,7 +500,7 @@ struct netdev_queue {
*
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
* This function is called when the Media Access Control address
* needs to be changed. If not this interface is not defined, the
* needs to be changed. If this interface is not defined, the
* mac address can not be changed.
*
* int (*ndo_validate_addr)(struct net_device *dev);

View File

@@ -100,6 +100,7 @@ enum ctattr_protoinfo_tcp {
enum ctattr_protoinfo_dccp {
CTA_PROTOINFO_DCCP_UNSPEC,
CTA_PROTOINFO_DCCP_STATE,
CTA_PROTOINFO_DCCP_ROLE,
__CTA_PROTOINFO_DCCP_MAX,
};
#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)

View File

@@ -354,9 +354,6 @@ struct xt_table
/* What hooks you will enter on */
unsigned int valid_hooks;
/* Lock for the curtain */
struct mutex lock;
/* Man behind the curtain... */
struct xt_table_info *private;
@@ -434,8 +431,74 @@ extern void xt_proto_fini(struct net *net, u_int8_t af);
extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
extern void xt_free_table_info(struct xt_table_info *info);
extern void xt_table_entry_swap_rcu(struct xt_table_info *old,
struct xt_table_info *new);
/*
* Per-CPU spinlock associated with per-cpu table entries, and
* with a counter for the "reading" side that allows a recursive
* reader to avoid taking the lock and deadlocking.
*
* "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
* It needs to ensure that the rules are not being changed while the packet
* is being processed. In some cases, the read lock will be acquired
* twice on the same CPU; this is okay because of the count.
*
* "writing" is used when reading counters.
* During replace any readers that are using the old tables have to complete
* before freeing the old table. This is handled by the write locking
* necessary for reading the counters.
*/
struct xt_info_lock {
spinlock_t lock;
unsigned char readers;
};
DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
/*
* Note: we need to ensure that preemption is disabled before acquiring
* the per-cpu-variable, so we do it as a two step process rather than
* using "spin_lock_bh()".
*
* We _also_ need to disable bottom half processing before updating our
* nesting count, to make sure that the only kind of re-entrancy is this
* code being called by itself: since the count+lock is not an atomic
* operation, we can allow no races.
*
* _Only_ that special combination of being per-cpu and never getting
* re-entered asynchronously means that the count is safe.
*/
static inline void xt_info_rdlock_bh(void)
{
struct xt_info_lock *lock;
local_bh_disable();
lock = &__get_cpu_var(xt_info_locks);
if (!lock->readers++)
spin_lock(&lock->lock);
}
static inline void xt_info_rdunlock_bh(void)
{
struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
if (!--lock->readers)
spin_unlock(&lock->lock);
local_bh_enable();
}
/*
* The "writer" side needs to get exclusive access to the lock,
* regardless of readers. This must be called with bottom half
* processing (and thus also preemption) disabled.
*/
static inline void xt_info_wrlock(unsigned int cpu)
{
spin_lock(&per_cpu(xt_info_locks, cpu).lock);
}
static inline void xt_info_wrunlock(unsigned int cpu)
{
spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
}
/*
* This helper is performance critical and must be inlined

View File

@@ -440,13 +440,15 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
#define DEFINE_WAIT(name) \
#define DEFINE_WAIT_FUNC(name, function) \
wait_queue_t name = { \
.private = current, \
.func = autoremove_wake_function, \
.func = function, \
.task_list = LIST_HEAD_INIT((name).task_list), \
}
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
#define DEFINE_WAIT_BIT(name, word, bit) \
struct wait_bit_queue name = { \
.key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \