mm: memcontrol: rewrite uncharge API
The memcg uncharging code that is involved towards the end of a page's lifetime - truncation, reclaim, swapout, migration - is impressively complicated and fragile. Because anonymous and file pages were always charged before they had their page->mapping established, uncharges had to happen when the page type could still be known from the context; as in unmap for anonymous, page cache removal for file and shmem pages, and swap cache truncation for swap pages. However, these operations happen well before the page is actually freed, and so a lot of synchronization is necessary: - Charging, uncharging, page migration, and charge migration all need to take a per-page bit spinlock as they could race with uncharging. - Swap cache truncation happens during both swap-in and swap-out, and possibly repeatedly before the page is actually freed. This means that the memcg swapout code is called from many contexts that make no sense and it has to figure out the direction from page state to make sure memory and memory+swap are always correctly charged. - On page migration, the old page might be unmapped but then reused, so memcg code has to prevent untimely uncharging in that case. Because this code - which should be a simple charge transfer - is so special-cased, it is not reusable for replace_page_cache(). But now that charged pages always have a page->mapping, introduce mem_cgroup_uncharge(), which is called after the final put_page(), when we know for sure that nobody is looking at the page anymore. For page migration, introduce mem_cgroup_migrate(), which is called after the migration is successful and the new page is fully rmapped. Because the old page is no longer uncharged after migration, prevent double charges by decoupling the page's memcg association (PCG_USED and pc->mem_cgroup) from the page holding an actual charge. The new bits PCG_MEM and PCG_MEMSW represent the respective charges and are transferred to the new page during migration. mem_cgroup_migrate() is suitable for replace_page_cache() as well, which gets rid of mem_cgroup_replace_page_cache(). However, care needs to be taken because both the source and the target page can already be charged and on the LRU when fuse is splicing: grab the page lock on the charge moving side to prevent changing pc->mem_cgroup of a page under migration. Also, the lruvecs of both pages change as we uncharge the old and charge the new during migration, and putback may race with us, so grab the lru lock and isolate the pages iff on LRU to prevent races and ensure the pages are on the right lruvec afterward. Swap accounting is massively simplified: because the page is no longer uncharged as early as swap cache deletion, a new mem_cgroup_swapout() can transfer the page's memory+swap charge (PCG_MEMSW) to the swap entry before the final put_page() in page reclaim. Finally, page_cgroup changes are now protected by whatever protection the page itself offers: anonymous pages are charged under the page table lock, whereas page cache insertions, swapin, and migration hold the page lock. Uncharging happens under full exclusion with no outstanding references. Charging and uncharging also ensure that the page is off-LRU, which serializes against charge migration. Remove the very costly page_cgroup lock and set pc->flags non-atomically. [mhocko@suse.cz: mem_cgroup_charge_statistics needs preempt_disable] [vdavydov@parallels.com: fix flags definition] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Hugh Dickins <hughd@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vladimir Davydov <vdavydov@parallels.com> Tested-by: Jet Chen <jet.chen@intel.com> Acked-by: Michal Hocko <mhocko@suse.cz> Tested-by: Felipe Balbi <balbi@ti.com> Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
00501b531c
commit
0a31bc97c8
@@ -234,7 +234,6 @@ void delete_from_page_cache(struct page *page)
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
__delete_from_page_cache(page, NULL);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
mem_cgroup_uncharge_cache_page(page);
|
||||
|
||||
if (freepage)
|
||||
freepage(page);
|
||||
@@ -490,8 +489,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
|
||||
if (PageSwapBacked(new))
|
||||
__inc_zone_page_state(new, NR_SHMEM);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
/* mem_cgroup codes must not be called under tree_lock */
|
||||
mem_cgroup_replace_page_cache(old, new);
|
||||
mem_cgroup_migrate(old, new, true);
|
||||
radix_tree_preload_end();
|
||||
if (freepage)
|
||||
freepage(old);
|
||||
|
828
mm/memcontrol.c
828
mm/memcontrol.c
File diff suppressed because it is too large
Load Diff
@@ -1292,7 +1292,6 @@ static void unmap_page_range(struct mmu_gather *tlb,
|
||||
details = NULL;
|
||||
|
||||
BUG_ON(addr >= end);
|
||||
mem_cgroup_uncharge_start();
|
||||
tlb_start_vma(tlb, vma);
|
||||
pgd = pgd_offset(vma->vm_mm, addr);
|
||||
do {
|
||||
@@ -1302,7 +1301,6 @@ static void unmap_page_range(struct mmu_gather *tlb,
|
||||
next = zap_pud_range(tlb, vma, pgd, addr, next, details);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
tlb_end_vma(tlb, vma);
|
||||
mem_cgroup_uncharge_end();
|
||||
}
|
||||
|
||||
|
||||
|
38
mm/migrate.c
38
mm/migrate.c
@@ -780,6 +780,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
|
||||
if (rc != MIGRATEPAGE_SUCCESS) {
|
||||
newpage->mapping = NULL;
|
||||
} else {
|
||||
mem_cgroup_migrate(page, newpage, false);
|
||||
if (remap_swapcache)
|
||||
remove_migration_ptes(page, newpage);
|
||||
page->mapping = NULL;
|
||||
@@ -795,7 +796,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
||||
{
|
||||
int rc = -EAGAIN;
|
||||
int remap_swapcache = 1;
|
||||
struct mem_cgroup *mem;
|
||||
struct anon_vma *anon_vma = NULL;
|
||||
|
||||
if (!trylock_page(page)) {
|
||||
@@ -821,9 +821,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
||||
lock_page(page);
|
||||
}
|
||||
|
||||
/* charge against new page */
|
||||
mem_cgroup_prepare_migration(page, newpage, &mem);
|
||||
|
||||
if (PageWriteback(page)) {
|
||||
/*
|
||||
* Only in the case of a full synchronous migration is it
|
||||
@@ -833,10 +830,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
||||
*/
|
||||
if (mode != MIGRATE_SYNC) {
|
||||
rc = -EBUSY;
|
||||
goto uncharge;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!force)
|
||||
goto uncharge;
|
||||
goto out_unlock;
|
||||
wait_on_page_writeback(page);
|
||||
}
|
||||
/*
|
||||
@@ -872,7 +869,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
||||
*/
|
||||
remap_swapcache = 0;
|
||||
} else {
|
||||
goto uncharge;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -885,7 +882,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
||||
* the page migration right away (proteced by page lock).
|
||||
*/
|
||||
rc = balloon_page_migrate(newpage, page, mode);
|
||||
goto uncharge;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -904,7 +901,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
||||
VM_BUG_ON_PAGE(PageAnon(page), page);
|
||||
if (page_has_private(page)) {
|
||||
try_to_free_buffers(page);
|
||||
goto uncharge;
|
||||
goto out_unlock;
|
||||
}
|
||||
goto skip_unmap;
|
||||
}
|
||||
@@ -923,10 +920,7 @@ skip_unmap:
|
||||
if (anon_vma)
|
||||
put_anon_vma(anon_vma);
|
||||
|
||||
uncharge:
|
||||
mem_cgroup_end_migration(mem, page, newpage,
|
||||
(rc == MIGRATEPAGE_SUCCESS ||
|
||||
rc == MIGRATEPAGE_BALLOON_SUCCESS));
|
||||
out_unlock:
|
||||
unlock_page(page);
|
||||
out:
|
||||
return rc;
|
||||
@@ -1786,7 +1780,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
||||
pg_data_t *pgdat = NODE_DATA(node);
|
||||
int isolated = 0;
|
||||
struct page *new_page = NULL;
|
||||
struct mem_cgroup *memcg = NULL;
|
||||
int page_lru = page_is_file_cache(page);
|
||||
unsigned long mmun_start = address & HPAGE_PMD_MASK;
|
||||
unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
|
||||
@@ -1852,15 +1845,6 @@ fail_putback:
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Traditional migration needs to prepare the memcg charge
|
||||
* transaction early to prevent the old page from being
|
||||
* uncharged when installing migration entries. Here we can
|
||||
* save the potential rollback and start the charge transfer
|
||||
* only when migration is already known to end successfully.
|
||||
*/
|
||||
mem_cgroup_prepare_migration(page, new_page, &memcg);
|
||||
|
||||
orig_entry = *pmd;
|
||||
entry = mk_pmd(new_page, vma->vm_page_prot);
|
||||
entry = pmd_mkhuge(entry);
|
||||
@@ -1888,14 +1872,10 @@ fail_putback:
|
||||
goto fail_putback;
|
||||
}
|
||||
|
||||
mem_cgroup_migrate(page, new_page, false);
|
||||
|
||||
page_remove_rmap(page);
|
||||
|
||||
/*
|
||||
* Finish the charge transaction under the page table lock to
|
||||
* prevent split_huge_page() from dividing up the charge
|
||||
* before it's fully transferred to the new page.
|
||||
*/
|
||||
mem_cgroup_end_migration(memcg, page, new_page, true);
|
||||
spin_unlock(ptl);
|
||||
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
|
||||
|
||||
|
@@ -1089,7 +1089,6 @@ void page_remove_rmap(struct page *page)
|
||||
if (unlikely(PageHuge(page)))
|
||||
goto out;
|
||||
if (anon) {
|
||||
mem_cgroup_uncharge_page(page);
|
||||
if (PageTransHuge(page))
|
||||
__dec_zone_page_state(page,
|
||||
NR_ANON_TRANSPARENT_HUGEPAGES);
|
||||
|
@@ -419,7 +419,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
||||
pvec.pages, indices);
|
||||
if (!pvec.nr)
|
||||
break;
|
||||
mem_cgroup_uncharge_start();
|
||||
for (i = 0; i < pagevec_count(&pvec); i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
|
||||
@@ -447,7 +446,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
||||
}
|
||||
pagevec_remove_exceptionals(&pvec);
|
||||
pagevec_release(&pvec);
|
||||
mem_cgroup_uncharge_end();
|
||||
cond_resched();
|
||||
index++;
|
||||
}
|
||||
@@ -495,7 +493,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
||||
index = start;
|
||||
continue;
|
||||
}
|
||||
mem_cgroup_uncharge_start();
|
||||
for (i = 0; i < pagevec_count(&pvec); i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
|
||||
@@ -531,7 +528,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
||||
}
|
||||
pagevec_remove_exceptionals(&pvec);
|
||||
pagevec_release(&pvec);
|
||||
mem_cgroup_uncharge_end();
|
||||
index++;
|
||||
}
|
||||
|
||||
@@ -835,7 +831,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
|
||||
}
|
||||
|
||||
mutex_unlock(&shmem_swaplist_mutex);
|
||||
swapcache_free(swap, NULL);
|
||||
swapcache_free(swap);
|
||||
redirty:
|
||||
set_page_dirty(page);
|
||||
if (wbc->for_reclaim)
|
||||
@@ -1008,7 +1004,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
|
||||
*/
|
||||
oldpage = newpage;
|
||||
} else {
|
||||
mem_cgroup_replace_page_cache(oldpage, newpage);
|
||||
mem_cgroup_migrate(oldpage, newpage, false);
|
||||
lru_cache_add_anon(newpage);
|
||||
*pagep = newpage;
|
||||
}
|
||||
|
@@ -62,6 +62,7 @@ static void __page_cache_release(struct page *page)
|
||||
del_page_from_lru_list(page, lruvec, page_off_lru(page));
|
||||
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
||||
}
|
||||
mem_cgroup_uncharge(page);
|
||||
}
|
||||
|
||||
static void __put_single_page(struct page *page)
|
||||
@@ -907,6 +908,8 @@ void release_pages(struct page **pages, int nr, bool cold)
|
||||
struct lruvec *lruvec;
|
||||
unsigned long uninitialized_var(flags);
|
||||
|
||||
mem_cgroup_uncharge_start();
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct page *page = pages[i];
|
||||
|
||||
@@ -938,6 +941,7 @@ void release_pages(struct page **pages, int nr, bool cold)
|
||||
__ClearPageLRU(page);
|
||||
del_page_from_lru_list(page, lruvec, page_off_lru(page));
|
||||
}
|
||||
mem_cgroup_uncharge(page);
|
||||
|
||||
/* Clear Active bit in case of parallel mark_page_accessed */
|
||||
__ClearPageActive(page);
|
||||
@@ -947,6 +951,8 @@ void release_pages(struct page **pages, int nr, bool cold)
|
||||
if (zone)
|
||||
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
||||
|
||||
mem_cgroup_uncharge_end();
|
||||
|
||||
free_hot_cold_page_list(&pages_to_free, cold);
|
||||
}
|
||||
EXPORT_SYMBOL(release_pages);
|
||||
|
@@ -176,7 +176,7 @@ int add_to_swap(struct page *page, struct list_head *list)
|
||||
|
||||
if (unlikely(PageTransHuge(page)))
|
||||
if (unlikely(split_huge_page_to_list(page, list))) {
|
||||
swapcache_free(entry, NULL);
|
||||
swapcache_free(entry);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -202,7 +202,7 @@ int add_to_swap(struct page *page, struct list_head *list)
|
||||
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
|
||||
* clear SWAP_HAS_CACHE flag.
|
||||
*/
|
||||
swapcache_free(entry, NULL);
|
||||
swapcache_free(entry);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -225,7 +225,7 @@ void delete_from_swap_cache(struct page *page)
|
||||
__delete_from_swap_cache(page);
|
||||
spin_unlock_irq(&address_space->tree_lock);
|
||||
|
||||
swapcache_free(entry, page);
|
||||
swapcache_free(entry);
|
||||
page_cache_release(page);
|
||||
}
|
||||
|
||||
@@ -386,7 +386,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
|
||||
* clear SWAP_HAS_CACHE flag.
|
||||
*/
|
||||
swapcache_free(entry, NULL);
|
||||
swapcache_free(entry);
|
||||
} while (err != -ENOMEM);
|
||||
|
||||
if (new_page)
|
||||
|
@@ -843,16 +843,13 @@ void swap_free(swp_entry_t entry)
|
||||
/*
|
||||
* Called after dropping swapcache to decrease refcnt to swap entries.
|
||||
*/
|
||||
void swapcache_free(swp_entry_t entry, struct page *page)
|
||||
void swapcache_free(swp_entry_t entry)
|
||||
{
|
||||
struct swap_info_struct *p;
|
||||
unsigned char count;
|
||||
|
||||
p = swap_info_get(entry);
|
||||
if (p) {
|
||||
count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
|
||||
if (page)
|
||||
mem_cgroup_uncharge_swapcache(page, entry, count != 0);
|
||||
swap_entry_free(p, entry, SWAP_HAS_CACHE);
|
||||
spin_unlock(&p->lock);
|
||||
}
|
||||
}
|
||||
|
@@ -281,7 +281,6 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
||||
while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
|
||||
min(end - index, (pgoff_t)PAGEVEC_SIZE),
|
||||
indices)) {
|
||||
mem_cgroup_uncharge_start();
|
||||
for (i = 0; i < pagevec_count(&pvec); i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
|
||||
@@ -307,7 +306,6 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
||||
}
|
||||
pagevec_remove_exceptionals(&pvec);
|
||||
pagevec_release(&pvec);
|
||||
mem_cgroup_uncharge_end();
|
||||
cond_resched();
|
||||
index++;
|
||||
}
|
||||
@@ -369,7 +367,6 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
||||
pagevec_release(&pvec);
|
||||
break;
|
||||
}
|
||||
mem_cgroup_uncharge_start();
|
||||
for (i = 0; i < pagevec_count(&pvec); i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
|
||||
@@ -394,7 +391,6 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
||||
}
|
||||
pagevec_remove_exceptionals(&pvec);
|
||||
pagevec_release(&pvec);
|
||||
mem_cgroup_uncharge_end();
|
||||
index++;
|
||||
}
|
||||
cleancache_invalidate_inode(mapping);
|
||||
@@ -493,7 +489,6 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
||||
while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
|
||||
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
|
||||
indices)) {
|
||||
mem_cgroup_uncharge_start();
|
||||
for (i = 0; i < pagevec_count(&pvec); i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
|
||||
@@ -522,7 +517,6 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
||||
}
|
||||
pagevec_remove_exceptionals(&pvec);
|
||||
pagevec_release(&pvec);
|
||||
mem_cgroup_uncharge_end();
|
||||
cond_resched();
|
||||
index++;
|
||||
}
|
||||
@@ -553,7 +547,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
|
||||
BUG_ON(page_has_private(page));
|
||||
__delete_from_page_cache(page, NULL);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
mem_cgroup_uncharge_cache_page(page);
|
||||
|
||||
if (mapping->a_ops->freepage)
|
||||
mapping->a_ops->freepage(page);
|
||||
@@ -602,7 +595,6 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||
while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
|
||||
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
|
||||
indices)) {
|
||||
mem_cgroup_uncharge_start();
|
||||
for (i = 0; i < pagevec_count(&pvec); i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
|
||||
@@ -655,7 +647,6 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||
}
|
||||
pagevec_remove_exceptionals(&pvec);
|
||||
pagevec_release(&pvec);
|
||||
mem_cgroup_uncharge_end();
|
||||
cond_resched();
|
||||
index++;
|
||||
}
|
||||
|
12
mm/vmscan.c
12
mm/vmscan.c
@@ -577,9 +577,10 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
|
||||
|
||||
if (PageSwapCache(page)) {
|
||||
swp_entry_t swap = { .val = page_private(page) };
|
||||
mem_cgroup_swapout(page, swap);
|
||||
__delete_from_swap_cache(page);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
swapcache_free(swap, page);
|
||||
swapcache_free(swap);
|
||||
} else {
|
||||
void (*freepage)(struct page *);
|
||||
void *shadow = NULL;
|
||||
@@ -600,7 +601,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
|
||||
shadow = workingset_eviction(mapping, page);
|
||||
__delete_from_page_cache(page, shadow);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
mem_cgroup_uncharge_cache_page(page);
|
||||
|
||||
if (freepage != NULL)
|
||||
freepage(page);
|
||||
@@ -1103,6 +1103,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
||||
*/
|
||||
__clear_page_locked(page);
|
||||
free_it:
|
||||
mem_cgroup_uncharge(page);
|
||||
nr_reclaimed++;
|
||||
|
||||
/*
|
||||
@@ -1132,12 +1133,13 @@ keep:
|
||||
list_add(&page->lru, &ret_pages);
|
||||
VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
|
||||
}
|
||||
mem_cgroup_uncharge_end();
|
||||
|
||||
free_hot_cold_page_list(&free_pages, true);
|
||||
|
||||
list_splice(&ret_pages, page_list);
|
||||
count_vm_events(PGACTIVATE, pgactivate);
|
||||
mem_cgroup_uncharge_end();
|
||||
|
||||
*ret_nr_dirty += nr_dirty;
|
||||
*ret_nr_congested += nr_congested;
|
||||
*ret_nr_unqueued_dirty += nr_unqueued_dirty;
|
||||
@@ -1435,6 +1437,8 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
|
||||
__ClearPageActive(page);
|
||||
del_page_from_lru_list(page, lruvec, lru);
|
||||
|
||||
mem_cgroup_uncharge(page);
|
||||
|
||||
if (unlikely(PageCompound(page))) {
|
||||
spin_unlock_irq(&zone->lru_lock);
|
||||
(*get_compound_page_dtor(page))(page);
|
||||
@@ -1656,6 +1660,8 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
|
||||
__ClearPageActive(page);
|
||||
del_page_from_lru_list(page, lruvec, lru);
|
||||
|
||||
mem_cgroup_uncharge(page);
|
||||
|
||||
if (unlikely(PageCompound(page))) {
|
||||
spin_unlock_irq(&zone->lru_lock);
|
||||
(*get_compound_page_dtor(page))(page);
|
||||
|
@@ -507,7 +507,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
|
||||
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
|
||||
* clear SWAP_HAS_CACHE flag.
|
||||
*/
|
||||
swapcache_free(entry, NULL);
|
||||
swapcache_free(entry);
|
||||
} while (err != -ENOMEM);
|
||||
|
||||
if (new_page)
|
||||
|
Reference in New Issue
Block a user