mm, oom: rename zonelist locking functions
try_set_zonelist_oom() and clear_zonelist_oom() are not named properly to imply that they require locking semantics to avoid out_of_memory() being reordered. zone_scan_lock is required for both functions to ensure that there is proper locking synchronization. Rename try_set_zonelist_oom() to oom_zonelist_trylock() and rename clear_zonelist_oom() to oom_zonelist_unlock() to imply there is proper locking semantics. At the same time, convert oom_zonelist_trylock() to return bool instead of int since only success and failure are tested. Signed-off-by: David Rientjes <rientjes@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
8d060bf490
commit
e972a070e2
@@ -559,28 +559,25 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
|
||||
* if a parallel OOM killing is already taking place that includes a zone in
|
||||
* the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
|
||||
*/
|
||||
int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
|
||||
bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask)
|
||||
{
|
||||
struct zoneref *z;
|
||||
struct zone *zone;
|
||||
int ret = 1;
|
||||
bool ret = true;
|
||||
|
||||
spin_lock(&zone_scan_lock);
|
||||
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
|
||||
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
|
||||
if (zone_is_oom_locked(zone)) {
|
||||
ret = 0;
|
||||
ret = false;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
|
||||
/*
|
||||
* Lock each zone in the zonelist under zone_scan_lock so a
|
||||
* parallel invocation of try_set_zonelist_oom() doesn't succeed
|
||||
* when it shouldn't.
|
||||
*/
|
||||
/*
|
||||
* Lock each zone in the zonelist under zone_scan_lock so a parallel
|
||||
* call to oom_zonelist_trylock() doesn't succeed when it shouldn't.
|
||||
*/
|
||||
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
|
||||
zone_set_flag(zone, ZONE_OOM_LOCKED);
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&zone_scan_lock);
|
||||
@@ -592,15 +589,14 @@ out:
|
||||
* allocation attempts with zonelists containing them may now recall the OOM
|
||||
* killer, if necessary.
|
||||
*/
|
||||
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
|
||||
void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask)
|
||||
{
|
||||
struct zoneref *z;
|
||||
struct zone *zone;
|
||||
|
||||
spin_lock(&zone_scan_lock);
|
||||
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
|
||||
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
|
||||
zone_clear_flag(zone, ZONE_OOM_LOCKED);
|
||||
}
|
||||
spin_unlock(&zone_scan_lock);
|
||||
}
|
||||
|
||||
@@ -695,8 +691,8 @@ void pagefault_out_of_memory(void)
|
||||
return;
|
||||
|
||||
zonelist = node_zonelist(first_memory_node, GFP_KERNEL);
|
||||
if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
|
||||
if (oom_zonelist_trylock(zonelist, GFP_KERNEL)) {
|
||||
out_of_memory(NULL, 0, 0, NULL, false);
|
||||
clear_zonelist_oom(zonelist, GFP_KERNEL);
|
||||
oom_zonelist_unlock(zonelist, GFP_KERNEL);
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user