Merge branch 'akpm' (patches from Andrew Morton)
Merge more patches from Andrew Morton: "The rest of MM. Plus one misc cleanup" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (35 commits) mm/Kconfig: add MMU dependency for MIGRATION. kernel: replace strict_strto*() with kstrto*() mm, thp: count thp_fault_fallback anytime thp fault fails thp: consolidate code between handle_mm_fault() and do_huge_pmd_anonymous_page() thp: do_huge_pmd_anonymous_page() cleanup thp: move maybe_pmd_mkwrite() out of mk_huge_pmd() mm: cleanup add_to_page_cache_locked() thp: account anon transparent huge pages into NR_ANON_PAGES truncate: drop 'oldsize' truncate_pagecache() parameter mm: make lru_add_drain_all() selective memcg: document cgroup dirty/writeback memory statistics memcg: add per cgroup writeback pages accounting memcg: check for proper lock held in mem_cgroup_update_page_stat memcg: remove MEMCG_NR_FILE_MAPPED memcg: reduce function dereference memcg: avoid overflow caused by PAGE_ALIGN memcg: rename RESOURCE_MAX to RES_COUNTER_MAX memcg: correct RESOURCE_MAX to ULLONG_MAX mm: memcg: do not trap chargers with full callstack on OOM mm: memcg: rework and document OOM waiting and wakeup ...
このコミットが含まれているのは:
83
mm/vmscan.c
83
mm/vmscan.c
@@ -139,11 +139,23 @@ static bool global_reclaim(struct scan_control *sc)
|
||||
{
|
||||
return !sc->target_mem_cgroup;
|
||||
}
|
||||
|
||||
static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
|
||||
{
|
||||
struct mem_cgroup *root = sc->target_mem_cgroup;
|
||||
return !mem_cgroup_disabled() &&
|
||||
mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE;
|
||||
}
|
||||
#else
|
||||
static bool global_reclaim(struct scan_control *sc)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned long zone_reclaimable_pages(struct zone *zone)
|
||||
@@ -2164,9 +2176,11 @@ static inline bool should_continue_reclaim(struct zone *zone,
|
||||
}
|
||||
}
|
||||
|
||||
static void shrink_zone(struct zone *zone, struct scan_control *sc)
|
||||
static int
|
||||
__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
|
||||
{
|
||||
unsigned long nr_reclaimed, nr_scanned;
|
||||
int groups_scanned = 0;
|
||||
|
||||
do {
|
||||
struct mem_cgroup *root = sc->target_mem_cgroup;
|
||||
@@ -2174,15 +2188,17 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc)
|
||||
.zone = zone,
|
||||
.priority = sc->priority,
|
||||
};
|
||||
struct mem_cgroup *memcg;
|
||||
struct mem_cgroup *memcg = NULL;
|
||||
mem_cgroup_iter_filter filter = (soft_reclaim) ?
|
||||
mem_cgroup_soft_reclaim_eligible : NULL;
|
||||
|
||||
nr_reclaimed = sc->nr_reclaimed;
|
||||
nr_scanned = sc->nr_scanned;
|
||||
|
||||
memcg = mem_cgroup_iter(root, NULL, &reclaim);
|
||||
do {
|
||||
while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) {
|
||||
struct lruvec *lruvec;
|
||||
|
||||
groups_scanned++;
|
||||
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
|
||||
|
||||
shrink_lruvec(lruvec, sc);
|
||||
@@ -2202,8 +2218,7 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc)
|
||||
mem_cgroup_iter_break(root, memcg);
|
||||
break;
|
||||
}
|
||||
memcg = mem_cgroup_iter(root, memcg, &reclaim);
|
||||
} while (memcg);
|
||||
}
|
||||
|
||||
vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
|
||||
sc->nr_scanned - nr_scanned,
|
||||
@@ -2211,6 +2226,37 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc)
|
||||
|
||||
} while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
|
||||
sc->nr_scanned - nr_scanned, sc));
|
||||
|
||||
return groups_scanned;
|
||||
}
|
||||
|
||||
|
||||
static void shrink_zone(struct zone *zone, struct scan_control *sc)
|
||||
{
|
||||
bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
|
||||
unsigned long nr_scanned = sc->nr_scanned;
|
||||
int scanned_groups;
|
||||
|
||||
scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
|
||||
/*
|
||||
* memcg iterator might race with other reclaimer or start from
|
||||
* a incomplete tree walk so the tree walk in __shrink_zone
|
||||
* might have missed groups that are above the soft limit. Try
|
||||
* another loop to catch up with others. Do it just once to
|
||||
* prevent from reclaim latencies when other reclaimers always
|
||||
* preempt this one.
|
||||
*/
|
||||
if (do_soft_reclaim && !scanned_groups)
|
||||
__shrink_zone(zone, sc, do_soft_reclaim);
|
||||
|
||||
/*
|
||||
* No group is over the soft limit or those that are do not have
|
||||
* pages in the zone we are reclaiming so we have to reclaim everybody
|
||||
*/
|
||||
if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
|
||||
__shrink_zone(zone, sc, false);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns true if compaction should go ahead for a high-order request */
|
||||
@@ -2274,8 +2320,6 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
||||
{
|
||||
struct zoneref *z;
|
||||
struct zone *zone;
|
||||
unsigned long nr_soft_reclaimed;
|
||||
unsigned long nr_soft_scanned;
|
||||
bool aborted_reclaim = false;
|
||||
|
||||
/*
|
||||
@@ -2315,18 +2359,6 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* This steals pages from memory cgroups over softlimit
|
||||
* and returns the number of reclaimed pages and
|
||||
* scanned pages. This works for global memory pressure
|
||||
* and balancing, not for a memcg's limit.
|
||||
*/
|
||||
nr_soft_scanned = 0;
|
||||
nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
|
||||
sc->order, sc->gfp_mask,
|
||||
&nr_soft_scanned);
|
||||
sc->nr_reclaimed += nr_soft_reclaimed;
|
||||
sc->nr_scanned += nr_soft_scanned;
|
||||
/* need some check for avoid more shrink_zone() */
|
||||
}
|
||||
|
||||
@@ -2920,8 +2952,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
||||
{
|
||||
int i;
|
||||
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
|
||||
unsigned long nr_soft_reclaimed;
|
||||
unsigned long nr_soft_scanned;
|
||||
struct scan_control sc = {
|
||||
.gfp_mask = GFP_KERNEL,
|
||||
.priority = DEF_PRIORITY,
|
||||
@@ -3036,15 +3066,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
||||
|
||||
sc.nr_scanned = 0;
|
||||
|
||||
nr_soft_scanned = 0;
|
||||
/*
|
||||
* Call soft limit reclaim before calling shrink_zone.
|
||||
*/
|
||||
nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
|
||||
order, sc.gfp_mask,
|
||||
&nr_soft_scanned);
|
||||
sc.nr_reclaimed += nr_soft_reclaimed;
|
||||
|
||||
/*
|
||||
* There should be no need to raise the scanning
|
||||
* priority if enough pages are already being scanned
|
||||
|
新しいイシューから参照
ユーザーをブロックする