mm, page_alloc: consider dirtyable memory in terms of nodes

Historically dirty pages were spread among zones but now that LRUs are
per-node it is more appropriate to consider dirty pages in a node.

Link: http://lkml.kernel.org/r/1467970510-21195-17-git-send-email-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Mel Gorman
2016-07-28 15:46:11 -07:00
committed by Linus Torvalds
parent 1e6b10857f
commit 281e37265f
4 changed files with 79 additions and 52 deletions

View File

@@ -267,26 +267,35 @@ static void wb_min_max_ratio(struct bdi_writeback *wb,
*/
/**
* zone_dirtyable_memory - number of dirtyable pages in a zone
* @zone: the zone
* node_dirtyable_memory - number of dirtyable pages in a node
* @pgdat: the node
*
* Returns the zone's number of pages potentially available for dirty
* page cache. This is the base value for the per-zone dirty limits.
* Returns the node's number of pages potentially available for dirty
* page cache. This is the base value for the per-node dirty limits.
*/
static unsigned long zone_dirtyable_memory(struct zone *zone)
static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
{
unsigned long nr_pages;
unsigned long nr_pages = 0;
int z;
for (z = 0; z < MAX_NR_ZONES; z++) {
struct zone *zone = pgdat->node_zones + z;
if (!populated_zone(zone))
continue;
nr_pages += zone_page_state(zone, NR_FREE_PAGES);
}
nr_pages = zone_page_state(zone, NR_FREE_PAGES);
/*
* Pages reserved for the kernel should not be considered
* dirtyable, to prevent a situation where reclaim has to
* clean pages in order to balance the zones.
*/
nr_pages -= min(nr_pages, zone->totalreserve_pages);
nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
nr_pages += node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE);
nr_pages += node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE);
nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
return nr_pages;
}
@@ -299,13 +308,24 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
int i;
for_each_node_state(node, N_HIGH_MEMORY) {
for (i = 0; i < MAX_NR_ZONES; i++) {
struct zone *z = &NODE_DATA(node)->node_zones[i];
for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
struct zone *z;
unsigned long dirtyable;
if (is_highmem(z))
x += zone_dirtyable_memory(z);
if (!is_highmem_idx(i))
continue;
z = &NODE_DATA(node)->node_zones[i];
dirtyable = zone_page_state(z, NR_FREE_PAGES) +
zone_page_state(z, NR_ZONE_LRU_FILE);
/* watch for underflows */
dirtyable -= min(dirtyable, high_wmark_pages(z));
x += dirtyable;
}
}
/*
* Unreclaimable memory (kernel memory or anonymous memory
* without swap) can bring down the dirtyable pages below
@@ -445,23 +465,23 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
}
/**
* zone_dirty_limit - maximum number of dirty pages allowed in a zone
* @zone: the zone
* node_dirty_limit - maximum number of dirty pages allowed in a node
* @pgdat: the node
*
* Returns the maximum number of dirty pages allowed in a zone, based
* on the zone's dirtyable memory.
* Returns the maximum number of dirty pages allowed in a node, based
* on the node's dirtyable memory.
*/
static unsigned long zone_dirty_limit(struct zone *zone)
static unsigned long node_dirty_limit(struct pglist_data *pgdat)
{
unsigned long zone_memory = zone_dirtyable_memory(zone);
unsigned long node_memory = node_dirtyable_memory(pgdat);
struct task_struct *tsk = current;
unsigned long dirty;
if (vm_dirty_bytes)
dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
zone_memory / global_dirtyable_memory();
node_memory / global_dirtyable_memory();
else
dirty = vm_dirty_ratio * zone_memory / 100;
dirty = vm_dirty_ratio * node_memory / 100;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
dirty += dirty / 4;
@@ -470,19 +490,30 @@ static unsigned long zone_dirty_limit(struct zone *zone)
}
/**
* zone_dirty_ok - tells whether a zone is within its dirty limits
* @zone: the zone to check
* node_dirty_ok - tells whether a node is within its dirty limits
* @pgdat: the node to check
*
* Returns %true when the dirty pages in @zone are within the zone's
* Returns %true when the dirty pages in @pgdat are within the node's
* dirty limit, %false if the limit is exceeded.
*/
bool zone_dirty_ok(struct zone *zone)
bool node_dirty_ok(struct pglist_data *pgdat)
{
unsigned long limit = zone_dirty_limit(zone);
int z;
unsigned long limit = node_dirty_limit(pgdat);
unsigned long nr_pages = 0;
return zone_page_state(zone, NR_FILE_DIRTY) +
zone_page_state(zone, NR_UNSTABLE_NFS) +
zone_page_state(zone, NR_WRITEBACK) <= limit;
for (z = 0; z < MAX_NR_ZONES; z++) {
struct zone *zone = pgdat->node_zones + z;
if (!populated_zone(zone))
continue;
nr_pages += zone_page_state(zone, NR_FILE_DIRTY);
nr_pages += zone_page_state(zone, NR_UNSTABLE_NFS);
nr_pages += zone_page_state(zone, NR_WRITEBACK);
}
return nr_pages <= limit;
}
int dirty_background_ratio_handler(struct ctl_table *table, int write,