Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - a few hotfixes - various misc updates - ocfs2 updates - most of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (108 commits) mm, memory_hotplug: move movable_node to the hotplug proper mm, memory_hotplug: drop CONFIG_MOVABLE_NODE mm, memory_hotplug: drop artificial restriction on online/offline mm: memcontrol: account slab stats per lruvec mm: memcontrol: per-lruvec stats infrastructure mm: memcontrol: use generic mod_memcg_page_state for kmem pages mm: memcontrol: use the node-native slab memory counters mm: vmstat: move slab statistics from zone to node counters mm/zswap.c: delete an error message for a failed memory allocation in zswap_dstmem_prepare() mm/zswap.c: improve a size determination in zswap_frontswap_init() mm/zswap.c: delete an error message for a failed memory allocation in zswap_pool_create() mm/swapfile.c: sort swap entries before free mm/oom_kill: count global and memory cgroup oom kills mm: per-cgroup memory reclaim stats mm: kmemleak: treat vm_struct as alternative reference to vmalloc'ed objects mm: kmemleak: factor object reference updating out of scan_block() mm: kmemleak: slightly reduce the size of some structures on 64-bit architectures mm, mempolicy: don't check cpuset seqlock where it doesn't matter mm, cpuset: always use seqlock when changing task's nodemask mm, mempolicy: simplify rebinding mempolicies when updating cpusets ...
This commit is contained in:
@@ -128,6 +128,9 @@ static ssize_t show_mem_removable(struct device *dev,
|
||||
int ret = 1;
|
||||
struct memory_block *mem = to_memory_block(dev);
|
||||
|
||||
if (mem->state != MEM_ONLINE)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < sections_per_block; i++) {
|
||||
if (!present_section_nr(mem->start_section_nr + i))
|
||||
continue;
|
||||
@@ -135,6 +138,7 @@ static ssize_t show_mem_removable(struct device *dev,
|
||||
ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
|
||||
}
|
||||
|
||||
out:
|
||||
return sprintf(buf, "%d\n", ret);
|
||||
}
|
||||
|
||||
@@ -388,39 +392,43 @@ static ssize_t show_valid_zones(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct memory_block *mem = to_memory_block(dev);
|
||||
unsigned long start_pfn, end_pfn;
|
||||
unsigned long valid_start, valid_end, valid_pages;
|
||||
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
|
||||
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
|
||||
struct zone *zone;
|
||||
int zone_shift = 0;
|
||||
unsigned long valid_start_pfn, valid_end_pfn;
|
||||
bool append = false;
|
||||
int nid;
|
||||
|
||||
start_pfn = section_nr_to_pfn(mem->start_section_nr);
|
||||
end_pfn = start_pfn + nr_pages;
|
||||
|
||||
/* The block contains more than one zone can not be offlined. */
|
||||
if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
|
||||
/*
|
||||
* The block contains more than one zone can not be offlined.
|
||||
* This can happen e.g. for ZONE_DMA and ZONE_DMA32
|
||||
*/
|
||||
if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
|
||||
return sprintf(buf, "none\n");
|
||||
|
||||
zone = page_zone(pfn_to_page(valid_start));
|
||||
valid_pages = valid_end - valid_start;
|
||||
start_pfn = valid_start_pfn;
|
||||
nr_pages = valid_end_pfn - start_pfn;
|
||||
|
||||
/* MMOP_ONLINE_KEEP */
|
||||
sprintf(buf, "%s", zone->name);
|
||||
|
||||
/* MMOP_ONLINE_KERNEL */
|
||||
zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
|
||||
if (zone_shift) {
|
||||
strcat(buf, " ");
|
||||
strcat(buf, (zone + zone_shift)->name);
|
||||
/*
|
||||
* Check the existing zone. Make sure that we do that only on the
|
||||
* online nodes otherwise the page_zone is not reliable
|
||||
*/
|
||||
if (mem->state == MEM_ONLINE) {
|
||||
strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* MMOP_ONLINE_MOVABLE */
|
||||
zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
|
||||
if (zone_shift) {
|
||||
strcat(buf, " ");
|
||||
strcat(buf, (zone + zone_shift)->name);
|
||||
nid = pfn_to_nid(start_pfn);
|
||||
if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL)) {
|
||||
strcat(buf, default_zone_for_pfn(nid, start_pfn, nr_pages)->name);
|
||||
append = true;
|
||||
}
|
||||
|
||||
if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE)) {
|
||||
if (append)
|
||||
strcat(buf, " ");
|
||||
strcat(buf, NODE_DATA(nid)->node_zones[ZONE_MOVABLE].name);
|
||||
}
|
||||
out:
|
||||
strcat(buf, "\n");
|
||||
|
||||
return strlen(buf);
|
||||
@@ -685,14 +693,6 @@ static int add_memory_block(int base_section_nr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool is_zone_device_section(struct mem_section *ms)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = sparse_decode_mem_map(ms->section_mem_map, __section_nr(ms));
|
||||
return is_zone_device_page(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* need an interface for the VM to add new memory regions,
|
||||
* but without onlining it.
|
||||
@@ -702,9 +702,6 @@ int register_new_memory(int nid, struct mem_section *section)
|
||||
int ret = 0;
|
||||
struct memory_block *mem;
|
||||
|
||||
if (is_zone_device_section(section))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&mem_sysfs_mutex);
|
||||
|
||||
mem = find_memory_block(section);
|
||||
@@ -741,11 +738,16 @@ static int remove_memory_section(unsigned long node_id,
|
||||
{
|
||||
struct memory_block *mem;
|
||||
|
||||
if (is_zone_device_section(section))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&mem_sysfs_mutex);
|
||||
|
||||
/*
|
||||
* Some users of the memory hotplug do not want/need memblock to
|
||||
* track all sections. Skip over those.
|
||||
*/
|
||||
mem = find_memory_block(section);
|
||||
if (!mem)
|
||||
goto out_unlock;
|
||||
|
||||
unregister_mem_sect_under_nodes(mem, __section_nr(section));
|
||||
|
||||
mem->section_count--;
|
||||
@@ -754,6 +756,7 @@ static int remove_memory_section(unsigned long node_id,
|
||||
else
|
||||
put_device(&mem->dev);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&mem_sysfs_mutex);
|
||||
return 0;
|
||||
}
|
||||
@@ -820,6 +823,10 @@ int __init memory_dev_init(void)
|
||||
*/
|
||||
mutex_lock(&mem_sysfs_mutex);
|
||||
for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) {
|
||||
/* Don't iterate over sections we know are !present: */
|
||||
if (i > __highest_present_section_nr)
|
||||
break;
|
||||
|
||||
err = add_memory_block(i);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
|
@@ -129,11 +129,11 @@ static ssize_t node_read_meminfo(struct device *dev,
|
||||
nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
|
||||
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
|
||||
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
|
||||
nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) +
|
||||
sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
|
||||
nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)),
|
||||
nid, K(node_page_state(pgdat, NR_SLAB_RECLAIMABLE) +
|
||||
node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE)),
|
||||
nid, K(node_page_state(pgdat, NR_SLAB_RECLAIMABLE)),
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
|
||||
nid, K(node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE)),
|
||||
nid, K(node_page_state(pgdat, NR_ANON_THPS) *
|
||||
HPAGE_PMD_NR),
|
||||
nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
|
||||
@@ -141,7 +141,7 @@ static ssize_t node_read_meminfo(struct device *dev,
|
||||
nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
|
||||
HPAGE_PMD_NR));
|
||||
#else
|
||||
nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
|
||||
nid, K(node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE)));
|
||||
#endif
|
||||
n += hugetlb_report_node_meminfo(nid, buf + n);
|
||||
return n;
|
||||
@@ -368,21 +368,14 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
|
||||
#define page_initialized(page) (page->lru.next)
|
||||
|
||||
static int __ref get_nid_for_pfn(unsigned long pfn)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
if (!pfn_valid_within(pfn))
|
||||
return -1;
|
||||
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
||||
if (system_state < SYSTEM_RUNNING)
|
||||
return early_pfn_to_nid(pfn);
|
||||
#endif
|
||||
page = pfn_to_page(pfn);
|
||||
if (!page_initialized(page))
|
||||
return -1;
|
||||
return pfn_to_nid(pfn);
|
||||
}
|
||||
|
||||
@@ -468,10 +461,9 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int link_mem_sections(int nid)
|
||||
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
|
||||
{
|
||||
unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn;
|
||||
unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages;
|
||||
unsigned long end_pfn = start_pfn + nr_pages;
|
||||
unsigned long pfn;
|
||||
struct memory_block *mem_blk = NULL;
|
||||
int err = 0;
|
||||
@@ -559,10 +551,7 @@ static int node_memory_callback(struct notifier_block *self,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif /* CONFIG_HUGETLBFS */
|
||||
#else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */
|
||||
|
||||
static int link_mem_sections(int nid) { return 0; }
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
|
||||
|
||||
#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
|
||||
!defined(CONFIG_HUGETLBFS)
|
||||
@@ -576,39 +565,32 @@ static void init_node_hugetlb_work(int nid) { }
|
||||
|
||||
#endif
|
||||
|
||||
int register_one_node(int nid)
|
||||
int __register_one_node(int nid)
|
||||
{
|
||||
int error = 0;
|
||||
int p_node = parent_node(nid);
|
||||
struct node *parent = NULL;
|
||||
int error;
|
||||
int cpu;
|
||||
|
||||
if (node_online(nid)) {
|
||||
int p_node = parent_node(nid);
|
||||
struct node *parent = NULL;
|
||||
if (p_node != nid)
|
||||
parent = node_devices[p_node];
|
||||
|
||||
if (p_node != nid)
|
||||
parent = node_devices[p_node];
|
||||
node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
|
||||
if (!node_devices[nid])
|
||||
return -ENOMEM;
|
||||
|
||||
node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
|
||||
if (!node_devices[nid])
|
||||
return -ENOMEM;
|
||||
error = register_node(node_devices[nid], nid, parent);
|
||||
|
||||
error = register_node(node_devices[nid], nid, parent);
|
||||
|
||||
/* link cpu under this node */
|
||||
for_each_present_cpu(cpu) {
|
||||
if (cpu_to_node(cpu) == nid)
|
||||
register_cpu_under_node(cpu, nid);
|
||||
}
|
||||
|
||||
/* link memory sections under this node */
|
||||
error = link_mem_sections(nid);
|
||||
|
||||
/* initialize work queue for memory hot plug */
|
||||
init_node_hugetlb_work(nid);
|
||||
/* link cpu under this node */
|
||||
for_each_present_cpu(cpu) {
|
||||
if (cpu_to_node(cpu) == nid)
|
||||
register_cpu_under_node(cpu, nid);
|
||||
}
|
||||
|
||||
return error;
|
||||
/* initialize work queue for memory hot plug */
|
||||
init_node_hugetlb_work(nid);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
void unregister_one_node(int nid)
|
||||
@@ -657,9 +639,7 @@ static struct node_attr node_state_attr[] = {
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
[N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
|
||||
#endif
|
||||
#ifdef CONFIG_MOVABLE_NODE
|
||||
[N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
|
||||
#endif
|
||||
[N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
|
||||
};
|
||||
|
||||
@@ -670,9 +650,7 @@ static struct attribute *node_state_attrs[] = {
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
&node_state_attr[N_HIGH_MEMORY].attr.attr,
|
||||
#endif
|
||||
#ifdef CONFIG_MOVABLE_NODE
|
||||
&node_state_attr[N_MEMORY].attr.attr,
|
||||
#endif
|
||||
&node_state_attr[N_CPU].attr.attr,
|
||||
NULL
|
||||
};
|
||||
|
Reference in New Issue
Block a user