Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/geneve.c Here we had an overlapping change, where in 'net' the extraneous stats bump was being removed whilst in 'net-next' the final argument to udp_tunnel6_xmit_skb() was being changed. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -5,7 +5,7 @@
|
||||
*
|
||||
* Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
|
||||
* Bits and pieces stolen from Peter Zijlstra's code, which is
|
||||
* Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright 2007, Red Hat Inc. Peter Zijlstra
|
||||
* GPLv2
|
||||
*
|
||||
* see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
|
||||
|
@@ -1464,7 +1464,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
entry->type = dma_debug_coherent;
|
||||
entry->dev = dev;
|
||||
entry->pfn = page_to_pfn(virt_to_page(virt));
|
||||
entry->offset = (size_t) virt & PAGE_MASK;
|
||||
entry->offset = (size_t) virt & ~PAGE_MASK;
|
||||
entry->size = size;
|
||||
entry->dev_addr = dma_addr;
|
||||
entry->direction = DMA_BIDIRECTIONAL;
|
||||
@@ -1480,7 +1480,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
.type = dma_debug_coherent,
|
||||
.dev = dev,
|
||||
.pfn = page_to_pfn(virt_to_page(virt)),
|
||||
.offset = (size_t) virt & PAGE_MASK,
|
||||
.offset = (size_t) virt & ~PAGE_MASK,
|
||||
.dev_addr = addr,
|
||||
.size = size,
|
||||
.direction = DMA_BIDIRECTIONAL,
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Floating proportions
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
|
@@ -386,33 +386,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht,
|
||||
return false;
|
||||
}
|
||||
|
||||
int rhashtable_insert_rehash(struct rhashtable *ht)
|
||||
int rhashtable_insert_rehash(struct rhashtable *ht,
|
||||
struct bucket_table *tbl)
|
||||
{
|
||||
struct bucket_table *old_tbl;
|
||||
struct bucket_table *new_tbl;
|
||||
struct bucket_table *tbl;
|
||||
unsigned int size;
|
||||
int err;
|
||||
|
||||
old_tbl = rht_dereference_rcu(ht->tbl, ht);
|
||||
tbl = rhashtable_last_table(ht, old_tbl);
|
||||
|
||||
size = tbl->size;
|
||||
|
||||
err = -EBUSY;
|
||||
|
||||
if (rht_grow_above_75(ht, tbl))
|
||||
size *= 2;
|
||||
/* Do not schedule more than one rehash */
|
||||
else if (old_tbl != tbl)
|
||||
return -EBUSY;
|
||||
goto fail;
|
||||
|
||||
err = -ENOMEM;
|
||||
|
||||
new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
|
||||
if (new_tbl == NULL) {
|
||||
/* Schedule async resize/rehash to try allocation
|
||||
* non-atomic context.
|
||||
*/
|
||||
schedule_work(&ht->run_work);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (new_tbl == NULL)
|
||||
goto fail;
|
||||
|
||||
err = rhashtable_rehash_attach(ht, tbl, new_tbl);
|
||||
if (err) {
|
||||
@@ -423,12 +421,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht)
|
||||
schedule_work(&ht->run_work);
|
||||
|
||||
return err;
|
||||
|
||||
fail:
|
||||
/* Do not fail the insert if someone else did a rehash. */
|
||||
if (likely(rcu_dereference_raw(tbl->future_tbl)))
|
||||
return 0;
|
||||
|
||||
/* Schedule async rehash to retry allocation in process context. */
|
||||
if (err == -ENOMEM)
|
||||
schedule_work(&ht->run_work);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
|
||||
|
||||
int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
|
||||
struct rhash_head *obj,
|
||||
struct bucket_table *tbl)
|
||||
struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
|
||||
const void *key,
|
||||
struct rhash_head *obj,
|
||||
struct bucket_table *tbl)
|
||||
{
|
||||
struct rhash_head *head;
|
||||
unsigned int hash;
|
||||
@@ -464,7 +474,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
|
||||
exit:
|
||||
spin_unlock(rht_bucket_lock(tbl, hash));
|
||||
|
||||
return err;
|
||||
if (err == 0)
|
||||
return NULL;
|
||||
else if (err == -EAGAIN)
|
||||
return tbl;
|
||||
else
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
|
||||
|
||||
@@ -500,10 +515,10 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
|
||||
if (!iter->walker)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&ht->mutex);
|
||||
spin_lock(&ht->lock);
|
||||
iter->walker->tbl = rht_dereference(ht->tbl, ht);
|
||||
list_add(&iter->walker->list, &iter->walker->tbl->walkers);
|
||||
mutex_unlock(&ht->mutex);
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -517,10 +532,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
|
||||
*/
|
||||
void rhashtable_walk_exit(struct rhashtable_iter *iter)
|
||||
{
|
||||
mutex_lock(&iter->ht->mutex);
|
||||
spin_lock(&iter->ht->lock);
|
||||
if (iter->walker->tbl)
|
||||
list_del(&iter->walker->list);
|
||||
mutex_unlock(&iter->ht->mutex);
|
||||
spin_unlock(&iter->ht->lock);
|
||||
kfree(iter->walker);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
|
||||
@@ -544,14 +559,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
|
||||
{
|
||||
struct rhashtable *ht = iter->ht;
|
||||
|
||||
mutex_lock(&ht->mutex);
|
||||
|
||||
if (iter->walker->tbl)
|
||||
list_del(&iter->walker->list);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
mutex_unlock(&ht->mutex);
|
||||
spin_lock(&ht->lock);
|
||||
if (iter->walker->tbl)
|
||||
list_del(&iter->walker->list);
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
if (!iter->walker->tbl) {
|
||||
iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
|
||||
@@ -720,9 +733,6 @@ int rhashtable_init(struct rhashtable *ht,
|
||||
if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
|
||||
return -EINVAL;
|
||||
|
||||
if (params->nelem_hint)
|
||||
size = rounded_hashtable_size(params);
|
||||
|
||||
memset(ht, 0, sizeof(*ht));
|
||||
mutex_init(&ht->mutex);
|
||||
spin_lock_init(&ht->lock);
|
||||
@@ -742,6 +752,9 @@ int rhashtable_init(struct rhashtable *ht,
|
||||
|
||||
ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
|
||||
|
||||
if (params->nelem_hint)
|
||||
size = rounded_hashtable_size(&ht->p);
|
||||
|
||||
/* The maximum (not average) chain length grows with the
|
||||
* size of the hash table, at a rate of (log N)/(log log N).
|
||||
* The value of 16 is selected so that even if the hash
|
||||
|
Reference in New Issue
Block a user