Merge commit 'v2.6.28-rc7' into tracing/core
This commit is contained in:
@@ -189,7 +189,7 @@ static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
|
||||
pgdat->node_start_pfn;
|
||||
}
|
||||
|
||||
static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
|
||||
static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
|
||||
{
|
||||
struct pglist_data *pgdat = zone->zone_pgdat;
|
||||
int nr_pages = PAGES_PER_SECTION;
|
||||
@@ -216,7 +216,7 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
|
||||
static int __meminit __add_section(struct zone *zone, unsigned long phys_start_pfn)
|
||||
{
|
||||
int nr_pages = PAGES_PER_SECTION;
|
||||
int ret;
|
||||
@@ -273,7 +273,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms)
|
||||
* call this function after deciding the zone to which to
|
||||
* add the new pages.
|
||||
*/
|
||||
int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
|
||||
int __ref __add_pages(struct zone *zone, unsigned long phys_start_pfn,
|
||||
unsigned long nr_pages)
|
||||
{
|
||||
unsigned long i;
|
||||
@@ -470,7 +470,8 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
|
||||
}
|
||||
|
||||
|
||||
int add_memory(int nid, u64 start, u64 size)
|
||||
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
|
||||
int __ref add_memory(int nid, u64 start, u64 size)
|
||||
{
|
||||
pg_data_t *pgdat = NULL;
|
||||
int new_pgdat = 0;
|
||||
|
@@ -21,7 +21,7 @@ static unsigned long total_usage;
|
||||
#if !defined(CONFIG_SPARSEMEM)
|
||||
|
||||
|
||||
void __init pgdat_page_cgroup_init(struct pglist_data *pgdat)
|
||||
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
|
||||
{
|
||||
pgdat->node_page_cgroup = NULL;
|
||||
}
|
||||
@@ -97,7 +97,8 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
|
||||
return section->page_cgroup + pfn;
|
||||
}
|
||||
|
||||
int __meminit init_section_page_cgroup(unsigned long pfn)
|
||||
/* __alloc_bootmem...() is protected by !slab_available() */
|
||||
int __init_refok init_section_page_cgroup(unsigned long pfn)
|
||||
{
|
||||
struct mem_section *section;
|
||||
struct page_cgroup *base, *pc;
|
||||
@@ -106,19 +107,29 @@ int __meminit init_section_page_cgroup(unsigned long pfn)
|
||||
|
||||
section = __pfn_to_section(pfn);
|
||||
|
||||
if (section->page_cgroup)
|
||||
return 0;
|
||||
|
||||
nid = page_to_nid(pfn_to_page(pfn));
|
||||
|
||||
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
|
||||
if (slab_is_available()) {
|
||||
base = kmalloc_node(table_size, GFP_KERNEL, nid);
|
||||
if (!base)
|
||||
base = vmalloc_node(table_size, nid);
|
||||
} else {
|
||||
base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size,
|
||||
if (!section->page_cgroup) {
|
||||
nid = page_to_nid(pfn_to_page(pfn));
|
||||
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
|
||||
if (slab_is_available()) {
|
||||
base = kmalloc_node(table_size, GFP_KERNEL, nid);
|
||||
if (!base)
|
||||
base = vmalloc_node(table_size, nid);
|
||||
} else {
|
||||
base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
|
||||
table_size,
|
||||
PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* We don't have to allocate page_cgroup again, but
|
||||
* address of memmap may be changed. So, we have to initialize
|
||||
* again.
|
||||
*/
|
||||
base = section->page_cgroup + pfn;
|
||||
table_size = 0;
|
||||
/* check address of memmap is changed or not. */
|
||||
if (base->page == pfn_to_page(pfn))
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!base) {
|
||||
@@ -158,7 +169,7 @@ void __free_page_cgroup(unsigned long pfn)
|
||||
}
|
||||
}
|
||||
|
||||
int online_page_cgroup(unsigned long start_pfn,
|
||||
int __meminit online_page_cgroup(unsigned long start_pfn,
|
||||
unsigned long nr_pages,
|
||||
int nid)
|
||||
{
|
||||
@@ -183,7 +194,7 @@ int online_page_cgroup(unsigned long start_pfn,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int offline_page_cgroup(unsigned long start_pfn,
|
||||
int __meminit offline_page_cgroup(unsigned long start_pfn,
|
||||
unsigned long nr_pages, int nid)
|
||||
{
|
||||
unsigned long start, end, pfn;
|
||||
@@ -197,7 +208,7 @@ int offline_page_cgroup(unsigned long start_pfn,
|
||||
|
||||
}
|
||||
|
||||
static int page_cgroup_callback(struct notifier_block *self,
|
||||
static int __meminit page_cgroup_callback(struct notifier_block *self,
|
||||
unsigned long action, void *arg)
|
||||
{
|
||||
struct memory_notify *mn = arg;
|
||||
@@ -207,18 +218,23 @@ static int page_cgroup_callback(struct notifier_block *self,
|
||||
ret = online_page_cgroup(mn->start_pfn,
|
||||
mn->nr_pages, mn->status_change_nid);
|
||||
break;
|
||||
case MEM_CANCEL_ONLINE:
|
||||
case MEM_OFFLINE:
|
||||
offline_page_cgroup(mn->start_pfn,
|
||||
mn->nr_pages, mn->status_change_nid);
|
||||
break;
|
||||
case MEM_CANCEL_ONLINE:
|
||||
case MEM_GOING_OFFLINE:
|
||||
break;
|
||||
case MEM_ONLINE:
|
||||
case MEM_CANCEL_OFFLINE:
|
||||
break;
|
||||
}
|
||||
ret = notifier_from_errno(ret);
|
||||
|
||||
if (ret)
|
||||
ret = notifier_from_errno(ret);
|
||||
else
|
||||
ret = NOTIFY_OK;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -248,7 +264,7 @@ void __init page_cgroup_init(void)
|
||||
" want\n");
|
||||
}
|
||||
|
||||
void __init pgdat_page_cgroup_init(struct pglist_data *pgdat)
|
||||
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
@@ -2931,8 +2931,10 @@ static int slab_memory_callback(struct notifier_block *self,
|
||||
case MEM_CANCEL_OFFLINE:
|
||||
break;
|
||||
}
|
||||
|
||||
ret = notifier_from_errno(ret);
|
||||
if (ret)
|
||||
ret = notifier_from_errno(ret);
|
||||
else
|
||||
ret = NOTIFY_OK;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -570,7 +570,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
|
||||
* set. If this is <=0, then that means that the passed-in
|
||||
* map was not consumed and must be freed.
|
||||
*/
|
||||
int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
|
||||
int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
|
||||
int nr_pages)
|
||||
{
|
||||
unsigned long section_nr = pfn_to_section_nr(start_pfn);
|
||||
|
20
mm/vmalloc.c
20
mm/vmalloc.c
@@ -77,7 +77,6 @@ static void vunmap_page_range(unsigned long addr, unsigned long end)
|
||||
|
||||
BUG_ON(addr >= end);
|
||||
pgd = pgd_offset_k(addr);
|
||||
flush_cache_vunmap(addr, end);
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none_or_clear_bad(pgd))
|
||||
@@ -543,9 +542,10 @@ static void purge_vmap_area_lazy(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Free and unmap a vmap area
|
||||
* Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
|
||||
* called for the correct range previously.
|
||||
*/
|
||||
static void free_unmap_vmap_area(struct vmap_area *va)
|
||||
static void free_unmap_vmap_area_noflush(struct vmap_area *va)
|
||||
{
|
||||
va->flags |= VM_LAZY_FREE;
|
||||
atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
|
||||
@@ -553,6 +553,15 @@ static void free_unmap_vmap_area(struct vmap_area *va)
|
||||
try_purge_vmap_area_lazy();
|
||||
}
|
||||
|
||||
/*
|
||||
* Free and unmap a vmap area
|
||||
*/
|
||||
static void free_unmap_vmap_area(struct vmap_area *va)
|
||||
{
|
||||
flush_cache_vunmap(va->va_start, va->va_end);
|
||||
free_unmap_vmap_area_noflush(va);
|
||||
}
|
||||
|
||||
static struct vmap_area *find_vmap_area(unsigned long addr)
|
||||
{
|
||||
struct vmap_area *va;
|
||||
@@ -734,7 +743,7 @@ static void free_vmap_block(struct vmap_block *vb)
|
||||
spin_unlock(&vmap_block_tree_lock);
|
||||
BUG_ON(tmp != vb);
|
||||
|
||||
free_unmap_vmap_area(vb->va);
|
||||
free_unmap_vmap_area_noflush(vb->va);
|
||||
call_rcu(&vb->rcu_head, rcu_free_vb);
|
||||
}
|
||||
|
||||
@@ -796,6 +805,9 @@ static void vb_free(const void *addr, unsigned long size)
|
||||
|
||||
BUG_ON(size & ~PAGE_MASK);
|
||||
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
|
||||
|
||||
flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
|
||||
|
||||
order = get_order(size);
|
||||
|
||||
offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
|
||||
|
@@ -1248,6 +1248,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
||||
list_add(&page->lru, &l_inactive);
|
||||
}
|
||||
|
||||
spin_lock_irq(&zone->lru_lock);
|
||||
/*
|
||||
* Count referenced pages from currently used mappings as
|
||||
* rotated, even though they are moved to the inactive list.
|
||||
@@ -1263,7 +1264,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
||||
|
||||
pgmoved = 0;
|
||||
lru = LRU_BASE + file * LRU_FILE;
|
||||
spin_lock_irq(&zone->lru_lock);
|
||||
while (!list_empty(&l_inactive)) {
|
||||
page = lru_to_page(&l_inactive);
|
||||
prefetchw_prev_lru_page(page, &l_inactive, flags);
|
||||
|
Reference in New Issue
Block a user