Merge commit 'linus/master' into HEAD

Conflicts:
	MAINTAINERS

Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
此提交包含在:
Vegard Nossum
2009-06-15 15:50:49 +02:00
當前提交 722f2a6c87
共有 2130 個檔案被更改,包括 172928 行新增37398 行删除

查看文件

@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
long probe_kernel_write(void *dst, void *src, size_t size)
long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
{
long ret;
mm_segment_t old_fs = get_fs();

查看文件

@@ -47,8 +47,6 @@ static int __init alloc_node_page_cgroup(int nid)
struct page_cgroup *base, *pc;
unsigned long table_size;
unsigned long start_pfn, nr_pages, index;
struct page *page;
unsigned int order;
start_pfn = NODE_DATA(nid)->node_start_pfn;
nr_pages = NODE_DATA(nid)->node_spanned_pages;
@@ -57,13 +55,11 @@ static int __init alloc_node_page_cgroup(int nid)
return 0;
table_size = sizeof(struct page_cgroup) * nr_pages;
order = get_order(table_size);
page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order);
if (!page)
page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order);
if (!page)
base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
if (!base)
return -ENOMEM;
base = page_address(page);
for (index = 0; index < nr_pages; index++) {
pc = base + index;
__init_page_cgroup(pc, start_pfn + index);
@@ -73,7 +69,7 @@ static int __init alloc_node_page_cgroup(int nid)
return 0;
}
void __init page_cgroup_init(void)
void __init page_cgroup_init_flatmem(void)
{
int nid, fail;
@@ -117,16 +113,11 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
if (!section->page_cgroup) {
nid = page_to_nid(pfn_to_page(pfn));
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
if (slab_is_available()) {
base = kmalloc_node(table_size,
GFP_KERNEL | __GFP_NOWARN, nid);
if (!base)
base = vmalloc_node(table_size, nid);
} else {
base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
table_size,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
VM_BUG_ON(!slab_is_available());
base = kmalloc_node(table_size,
GFP_KERNEL | __GFP_NOWARN, nid);
if (!base)
base = vmalloc_node(table_size, nid);
} else {
/*
* We don't have to allocate page_cgroup again, but

查看文件

@@ -304,6 +304,12 @@ struct kmem_list3 {
int free_touched; /* updated without locking */
};
/*
* The slab allocator is initialized with interrupts disabled. Therefore, make
* sure early boot allocations don't accidentally enable interrupts.
*/
static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
/*
* Need this for bootstrapping a per node allocator.
*/
@@ -673,6 +679,7 @@ static enum {
NONE,
PARTIAL_AC,
PARTIAL_L3,
EARLY,
FULL
} g_cpucache_up;
@@ -681,7 +688,7 @@ static enum {
*/
int slab_is_available(void)
{
return g_cpucache_up == FULL;
return g_cpucache_up >= EARLY;
}
static DEFINE_PER_CPU(struct delayed_work, reap_work);
@@ -1545,19 +1552,27 @@ void __init kmem_cache_init(void)
}
}
/* 6) resize the head arrays to their final sizes */
{
struct kmem_cache *cachep;
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
if (enable_cpucache(cachep, GFP_NOWAIT))
BUG();
mutex_unlock(&cache_chain_mutex);
}
g_cpucache_up = EARLY;
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
}
void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
/*
* Interrupts are enabled now so all GFP allocations are safe.
*/
slab_gfp_mask = __GFP_BITS_MASK;
/* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
if (enable_cpucache(cachep, GFP_NOWAIT))
BUG();
mutex_unlock(&cache_chain_mutex);
/* Done! */
g_cpucache_up = FULL;
@@ -2034,7 +2049,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
for_each_online_node(node) {
cachep->nodelists[node] =
kmalloc_node(sizeof(struct kmem_list3),
GFP_KERNEL, node);
gfp, node);
BUG_ON(!cachep->nodelists[node]);
kmem_list3_init(cachep->nodelists[node]);
}
@@ -3286,6 +3301,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long save_flags;
void *ptr;
flags &= slab_gfp_mask;
lockdep_trace_alloc(flags);
if (slab_should_failslab(cachep, flags))
@@ -3369,6 +3386,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
unsigned long save_flags;
void *objp;
flags &= slab_gfp_mask;
lockdep_trace_alloc(flags);
if (slab_should_failslab(cachep, flags))

查看文件

@@ -179,6 +179,12 @@ static enum {
SYSFS /* Sysfs up */
} slab_state = DOWN;
/*
* The slab allocator is initialized with interrupts disabled. Therefore, make
* sure early boot allocations don't accidentally enable interrupts.
*/
static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
/* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock);
static LIST_HEAD(slab_caches);
@@ -1618,6 +1624,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
unsigned long flags;
unsigned int objsize;
gfpflags &= slab_gfp_mask;
lockdep_trace_alloc(gfpflags);
might_sleep_if(gfpflags & __GFP_WAIT);
@@ -3132,6 +3140,14 @@ void __init kmem_cache_init(void)
nr_cpu_ids, nr_node_ids);
}
void __init kmem_cache_init_late(void)
{
/*
* Interrupts are enabled now so all GFP allocations are safe.
*/
slab_gfp_mask = __GFP_BITS_MASK;
}
/*
* Find a mergeable slab cache
*/

查看文件

@@ -2056,7 +2056,7 @@ unsigned long global_lru_pages(void)
+ global_page_state(NR_INACTIVE_FILE);
}
#ifdef CONFIG_PM
#ifdef CONFIG_HIBERNATION
/*
* Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
* from LRU lists system-wide, for given pass and priority.
@@ -2196,7 +2196,7 @@ out:
return sc.nr_reclaimed;
}
#endif
#endif /* CONFIG_HIBERNATION */
/* It's optimal to keep kswapds on the same CPUs as their memory, but
not required for correctness. So if the last cpu in a node goes