123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * linux/mm/swap_state.c
- *
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- * Swap reorganised 29.12.95, Stephen Tweedie
- *
- * Rewritten to use page cache, (C) 1998 Stephen Tweedie
- */
- #include <linux/mm.h>
- #include <linux/gfp.h>
- #include <linux/kernel_stat.h>
- #include <linux/swap.h>
- #include <linux/swapops.h>
- #include <linux/init.h>
- #include <linux/pagemap.h>
- #include <linux/backing-dev.h>
- #include <linux/blkdev.h>
- #include <linux/pagevec.h>
- #include <linux/migrate.h>
- #include <linux/vmalloc.h>
- #include <linux/swap_slots.h>
- #include <linux/huge_mm.h>
- #include <linux/shmem_fs.h>
- #include "internal.h"
- #include "swap.h"
- /*
- * swapper_space is a fiction, retained to simplify the path through
- * vmscan's shrink_page_list.
- */
- static const struct address_space_operations swap_aops = {
- .writepage = swap_writepage,
- .dirty_folio = noop_dirty_folio,
- #ifdef CONFIG_MIGRATION
- .migrate_folio = migrate_folio,
- #endif
- };
- struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
- static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
- static bool enable_vma_readahead __read_mostly = true;
- #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
- #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
- #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
- #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
- #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
- #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
- #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
- #define SWAP_RA_VAL(addr, win, hits) \
- (((addr) & PAGE_MASK) | \
- (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
- ((hits) & SWAP_RA_HITS_MASK))
- /* Initial readahead hits is 4 to start up with a small window */
- #define GET_SWAP_RA_VAL(vma) \
- (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
- static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
- void show_swap_cache_info(void)
- {
- printk("%lu pages in swap cache\n", total_swapcache_pages());
- printk("Free swap = %ldkB\n",
- get_nr_swap_pages() << (PAGE_SHIFT - 10));
- printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
- }
- void *get_shadow_from_swap_cache(swp_entry_t entry)
- {
- struct address_space *address_space = swap_address_space(entry);
- pgoff_t idx = swp_offset(entry);
- struct page *page;
- page = xa_load(&address_space->i_pages, idx);
- if (xa_is_value(page))
- return page;
- return NULL;
- }
- /*
- * add_to_swap_cache resembles filemap_add_folio on swapper_space,
- * but sets SwapCache flag and private instead of mapping and index.
- */
- int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
- gfp_t gfp, void **shadowp)
- {
- struct address_space *address_space = swap_address_space(entry);
- pgoff_t idx = swp_offset(entry);
- XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
- unsigned long i, nr = folio_nr_pages(folio);
- void *old;
- VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
- VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
- folio_ref_add(folio, nr);
- folio_set_swapcache(folio);
- do {
- xas_lock_irq(&xas);
- xas_create_range(&xas);
- if (xas_error(&xas))
- goto unlock;
- for (i = 0; i < nr; i++) {
- VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
- old = xas_load(&xas);
- if (xa_is_value(old)) {
- if (shadowp)
- *shadowp = old;
- }
- set_page_private(folio_page(folio, i), entry.val + i);
- xas_store(&xas, folio);
- xas_next(&xas);
- }
- address_space->nrpages += nr;
- __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
- __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
- unlock:
- xas_unlock_irq(&xas);
- } while (xas_nomem(&xas, gfp));
- if (!xas_error(&xas))
- return 0;
- folio_clear_swapcache(folio);
- folio_ref_sub(folio, nr);
- return xas_error(&xas);
- }
- /*
- * This must be called only on folios that have
- * been verified to be in the swap cache.
- */
- void __delete_from_swap_cache(struct folio *folio,
- swp_entry_t entry, void *shadow)
- {
- struct address_space *address_space = swap_address_space(entry);
- int i;
- long nr = folio_nr_pages(folio);
- pgoff_t idx = swp_offset(entry);
- XA_STATE(xas, &address_space->i_pages, idx);
- VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
- VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
- for (i = 0; i < nr; i++) {
- void *entry = xas_store(&xas, shadow);
- VM_BUG_ON_PAGE(entry != folio, entry);
- set_page_private(folio_page(folio, i), 0);
- xas_next(&xas);
- }
- folio_clear_swapcache(folio);
- address_space->nrpages -= nr;
- __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
- __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
- }
- /**
- * add_to_swap - allocate swap space for a folio
- * @folio: folio we want to move to swap
- *
- * Allocate swap space for the folio and add the folio to the
- * swap cache.
- *
- * Context: Caller needs to hold the folio lock.
- * Return: Whether the folio was added to the swap cache.
- */
- bool add_to_swap(struct folio *folio)
- {
- swp_entry_t entry;
- int err;
- VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
- entry = folio_alloc_swap(folio);
- if (!entry.val)
- return false;
- /*
- * XArray node allocations from PF_MEMALLOC contexts could
- * completely exhaust the page allocator. __GFP_NOMEMALLOC
- * stops emergency reserves from being allocated.
- *
- * TODO: this could cause a theoretical memory reclaim
- * deadlock in the swap out path.
- */
- /*
- * Add it to the swap cache.
- */
- err = add_to_swap_cache(folio, entry,
- __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
- if (err)
- /*
- * add_to_swap_cache() doesn't return -EEXIST, so we can safely
- * clear SWAP_HAS_CACHE flag.
- */
- goto fail;
- /*
- * Normally the folio will be dirtied in unmap because its
- * pte should be dirty. A special case is MADV_FREE page. The
- * page's pte could have dirty bit cleared but the folio's
- * SwapBacked flag is still set because clearing the dirty bit
- * and SwapBacked flag has no lock protected. For such folio,
- * unmap will not set dirty bit for it, so folio reclaim will
- * not write the folio out. This can cause data corruption when
- * the folio is swapped in later. Always setting the dirty flag
- * for the folio solves the problem.
- */
- folio_mark_dirty(folio);
- return true;
- fail:
- put_swap_folio(folio, entry);
- return false;
- }
- /*
- * This must be called only on folios that have
- * been verified to be in the swap cache and locked.
- * It will never put the folio into the free list,
- * the caller has a reference on the folio.
- */
- void delete_from_swap_cache(struct folio *folio)
- {
- swp_entry_t entry = folio_swap_entry(folio);
- struct address_space *address_space = swap_address_space(entry);
- xa_lock_irq(&address_space->i_pages);
- __delete_from_swap_cache(folio, entry, NULL);
- xa_unlock_irq(&address_space->i_pages);
- put_swap_folio(folio, entry);
- folio_ref_sub(folio, folio_nr_pages(folio));
- }
- void clear_shadow_from_swap_cache(int type, unsigned long begin,
- unsigned long end)
- {
- unsigned long curr = begin;
- void *old;
- for (;;) {
- swp_entry_t entry = swp_entry(type, curr);
- struct address_space *address_space = swap_address_space(entry);
- XA_STATE(xas, &address_space->i_pages, curr);
- xa_lock_irq(&address_space->i_pages);
- xas_for_each(&xas, old, end) {
- if (!xa_is_value(old))
- continue;
- xas_store(&xas, NULL);
- }
- xa_unlock_irq(&address_space->i_pages);
- /* search the next swapcache until we meet end */
- curr >>= SWAP_ADDRESS_SPACE_SHIFT;
- curr++;
- curr <<= SWAP_ADDRESS_SPACE_SHIFT;
- if (curr > end)
- break;
- }
- }
- /*
- * If we are the only user, then try to free up the swap cache.
- *
- * Its ok to check the swapcache flag without the folio lock
- * here because we are going to recheck again inside
- * folio_free_swap() _with_ the lock.
- * - Marcelo
- */
- void free_swap_cache(struct page *page)
- {
- struct folio *folio = page_folio(page);
- if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
- folio_trylock(folio)) {
- folio_free_swap(folio);
- folio_unlock(folio);
- }
- }
- /*
- * Perform a free_page(), also freeing any swap cache associated with
- * this page if it is the last user of the page.
- */
- void free_page_and_swap_cache(struct page *page)
- {
- free_swap_cache(page);
- if (!is_huge_zero_page(page))
- put_page(page);
- }
- /*
- * Passed an array of pages, drop them all from swapcache and then release
- * them. They are removed from the LRU and freed if this is their last use.
- */
- void free_pages_and_swap_cache(struct page **pages, int nr)
- {
- struct page **pagep = pages;
- int i;
- lru_add_drain();
- for (i = 0; i < nr; i++)
- free_swap_cache(pagep[i]);
- release_pages(pagep, nr);
- }
- static inline bool swap_use_vma_readahead(void)
- {
- return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
- }
- /*
- * Lookup a swap entry in the swap cache. A found folio will be returned
- * unlocked and with its refcount incremented - we rely on the kernel
- * lock getting page table operations atomic even if we drop the folio
- * lock before returning.
- */
- struct folio *swap_cache_get_folio(swp_entry_t entry,
- struct vm_area_struct *vma, unsigned long addr)
- {
- struct folio *folio;
- struct swap_info_struct *si;
- si = get_swap_device(entry);
- if (!si)
- return NULL;
- folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
- put_swap_device(si);
- if (folio) {
- bool vma_ra = swap_use_vma_readahead();
- bool readahead;
- /*
- * At the moment, we don't support PG_readahead for anon THP
- * so let's bail out rather than confusing the readahead stat.
- */
- if (unlikely(folio_test_large(folio)))
- return folio;
- readahead = folio_test_clear_readahead(folio);
- if (vma && vma_ra) {
- unsigned long ra_val;
- int win, hits;
- ra_val = GET_SWAP_RA_VAL(vma);
- win = SWAP_RA_WIN(ra_val);
- hits = SWAP_RA_HITS(ra_val);
- if (readahead)
- hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
- atomic_long_set(&vma->swap_readahead_info,
- SWAP_RA_VAL(addr, win, hits));
- }
- if (readahead) {
- count_vm_event(SWAP_RA_HIT);
- if (!vma || !vma_ra)
- atomic_inc(&swapin_readahead_hits);
- }
- }
- return folio;
- }
- /**
- * find_get_incore_page - Find and get a page from the page or swap caches.
- * @mapping: The address_space to search.
- * @index: The page cache index.
- *
- * This differs from find_get_page() in that it will also look for the
- * page in the swap cache.
- *
- * Return: The found page or %NULL.
- */
- struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
- {
- swp_entry_t swp;
- struct swap_info_struct *si;
- struct page *page = pagecache_get_page(mapping, index,
- FGP_ENTRY | FGP_HEAD, 0);
- if (!page)
- return page;
- if (!xa_is_value(page))
- return find_subpage(page, index);
- if (!shmem_mapping(mapping))
- return NULL;
- swp = radix_to_swp_entry(page);
- /* There might be swapin error entries in shmem mapping. */
- if (non_swap_entry(swp))
- return NULL;
- /* Prevent swapoff from happening to us */
- si = get_swap_device(swp);
- if (!si)
- return NULL;
- page = find_get_page(swap_address_space(swp), swp_offset(swp));
- put_swap_device(si);
- return page;
- }
- struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma, unsigned long addr,
- bool *new_page_allocated)
- {
- struct swap_info_struct *si;
- struct folio *folio;
- void *shadow = NULL;
- *new_page_allocated = false;
- for (;;) {
- int err;
- /*
- * First check the swap cache. Since this is normally
- * called after swap_cache_get_folio() failed, re-calling
- * that would confuse statistics.
- */
- si = get_swap_device(entry);
- if (!si)
- return NULL;
- folio = filemap_get_folio(swap_address_space(entry),
- swp_offset(entry));
- put_swap_device(si);
- if (folio)
- return folio_file_page(folio, swp_offset(entry));
- /*
- * Just skip read ahead for unused swap slot.
- * During swap_off when swap_slot_cache is disabled,
- * we have to handle the race between putting
- * swap entry in swap cache and marking swap slot
- * as SWAP_HAS_CACHE. That's done in later part of code or
- * else swap_off will be aborted if we return NULL.
- */
- if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
- return NULL;
- /*
- * Get a new page to read into from swap. Allocate it now,
- * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
- * cause any racers to loop around until we add it to cache.
- */
- folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
- if (!folio)
- return NULL;
- /*
- * Swap entry may have been freed since our caller observed it.
- */
- err = swapcache_prepare(entry);
- if (!err)
- break;
- folio_put(folio);
- if (err != -EEXIST)
- return NULL;
- /*
- * We might race against __delete_from_swap_cache(), and
- * stumble across a swap_map entry whose SWAP_HAS_CACHE
- * has not yet been cleared. Or race against another
- * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
- * in swap_map, but not yet added its page to swap cache.
- */
- schedule_timeout_uninterruptible(1);
- }
- /*
- * The swap entry is ours to swap in. Prepare the new page.
- */
- __folio_set_locked(folio);
- __folio_set_swapbacked(folio);
- if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
- goto fail_unlock;
- /* May fail (-ENOMEM) if XArray node allocation failed. */
- if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
- goto fail_unlock;
- mem_cgroup_swapin_uncharge_swap(entry);
- if (shadow)
- workingset_refault(folio, shadow);
- /* Caller will initiate read into locked folio */
- folio_add_lru(folio);
- *new_page_allocated = true;
- return &folio->page;
- fail_unlock:
- put_swap_folio(folio, entry);
- folio_unlock(folio);
- folio_put(folio);
- return NULL;
- }
- /*
- * Locate a page of swap in physical memory, reserving swap cache space
- * and reading the disk if it is not already cached.
- * A failure return means that either the page allocation failed or that
- * the swap entry is no longer in use.
- */
- struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma,
- unsigned long addr, struct swap_iocb **plug)
- {
- bool page_was_allocated;
- struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
- vma, addr, &page_was_allocated);
- if (page_was_allocated)
- swap_readpage(retpage, false, plug);
- return retpage;
- }
- static unsigned int __swapin_nr_pages(unsigned long prev_offset,
- unsigned long offset,
- int hits,
- int max_pages,
- int prev_win)
- {
- unsigned int pages, last_ra;
- /*
- * This heuristic has been found to work well on both sequential and
- * random loads, swapping to hard disk or to SSD: please don't ask
- * what the "+ 2" means, it just happens to work well, that's all.
- */
- pages = hits + 2;
- if (pages == 2) {
- /*
- * We can have no readahead hits to judge by: but must not get
- * stuck here forever, so check for an adjacent offset instead
- * (and don't even bother to check whether swap type is same).
- */
- if (offset != prev_offset + 1 && offset != prev_offset - 1)
- pages = 1;
- } else {
- unsigned int roundup = 4;
- while (roundup < pages)
- roundup <<= 1;
- pages = roundup;
- }
- if (pages > max_pages)
- pages = max_pages;
- /* Don't shrink readahead too fast */
- last_ra = prev_win / 2;
- if (pages < last_ra)
- pages = last_ra;
- return pages;
- }
- static unsigned long swapin_nr_pages(unsigned long offset)
- {
- static unsigned long prev_offset;
- unsigned int hits, pages, max_pages;
- static atomic_t last_readahead_pages;
- max_pages = 1 << READ_ONCE(page_cluster);
- if (max_pages <= 1)
- return 1;
- hits = atomic_xchg(&swapin_readahead_hits, 0);
- pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
- max_pages,
- atomic_read(&last_readahead_pages));
- if (!hits)
- WRITE_ONCE(prev_offset, offset);
- atomic_set(&last_readahead_pages, pages);
- return pages;
- }
- /**
- * swap_cluster_readahead - swap in pages in hope we need them soon
- * @entry: swap entry of this memory
- * @gfp_mask: memory allocation flags
- * @vmf: fault information
- *
- * Returns the struct page for entry and addr, after queueing swapin.
- *
- * Primitive swap readahead code. We simply read an aligned block of
- * (1 << page_cluster) entries in the swap area. This method is chosen
- * because it doesn't cost us any seek time. We also make sure to queue
- * the 'original' request together with the readahead ones...
- *
- * This has been extended to use the NUMA policies from the mm triggering
- * the readahead.
- *
- * Caller must hold read mmap_lock if vmf->vma is not NULL.
- */
- struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_fault *vmf)
- {
- struct page *page;
- unsigned long entry_offset = swp_offset(entry);
- unsigned long offset = entry_offset;
- unsigned long start_offset, end_offset;
- unsigned long mask;
- struct swap_info_struct *si = swp_swap_info(entry);
- struct blk_plug plug;
- struct swap_iocb *splug = NULL;
- bool page_allocated;
- struct vm_area_struct *vma = vmf->vma;
- unsigned long addr = vmf->address;
- mask = swapin_nr_pages(offset) - 1;
- if (!mask)
- goto skip;
- /* Read a page_cluster sized and aligned cluster around offset. */
- start_offset = offset & ~mask;
- end_offset = offset | mask;
- if (!start_offset) /* First page is swap header. */
- start_offset++;
- if (end_offset >= si->max)
- end_offset = si->max - 1;
- blk_start_plug(&plug);
- for (offset = start_offset; offset <= end_offset ; offset++) {
- /* Ok, do the async read-ahead now */
- page = __read_swap_cache_async(
- swp_entry(swp_type(entry), offset),
- gfp_mask, vma, addr, &page_allocated);
- if (!page)
- continue;
- if (page_allocated) {
- swap_readpage(page, false, &splug);
- if (offset != entry_offset) {
- SetPageReadahead(page);
- count_vm_event(SWAP_RA);
- }
- }
- put_page(page);
- }
- blk_finish_plug(&plug);
- swap_read_unplug(splug);
- lru_add_drain(); /* Push any new pages onto the LRU now */
- skip:
- /* The page was likely read above, so no need for plugging here */
- return read_swap_cache_async(entry, gfp_mask, vma, addr, NULL);
- }
- int init_swap_address_space(unsigned int type, unsigned long nr_pages)
- {
- struct address_space *spaces, *space;
- unsigned int i, nr;
- nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
- spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
- if (!spaces)
- return -ENOMEM;
- for (i = 0; i < nr; i++) {
- space = spaces + i;
- xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
- atomic_set(&space->i_mmap_writable, 0);
- space->a_ops = &swap_aops;
- /* swap cache doesn't use writeback related tags */
- mapping_set_no_writeback_tags(space);
- }
- nr_swapper_spaces[type] = nr;
- swapper_spaces[type] = spaces;
- return 0;
- }
- void exit_swap_address_space(unsigned int type)
- {
- int i;
- struct address_space *spaces = swapper_spaces[type];
- for (i = 0; i < nr_swapper_spaces[type]; i++)
- VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
- kvfree(spaces);
- nr_swapper_spaces[type] = 0;
- swapper_spaces[type] = NULL;
- }
- static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
- unsigned long faddr,
- unsigned long lpfn,
- unsigned long rpfn,
- unsigned long *start,
- unsigned long *end)
- {
- *start = max3(lpfn, PFN_DOWN(vma->vm_start),
- PFN_DOWN(faddr & PMD_MASK));
- *end = min3(rpfn, PFN_DOWN(vma->vm_end),
- PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
- }
- static void swap_ra_info(struct vm_fault *vmf,
- struct vma_swap_readahead *ra_info)
- {
- struct vm_area_struct *vma = vmf->vma;
- unsigned long ra_val;
- unsigned long faddr, pfn, fpfn;
- unsigned long start, end;
- pte_t *pte, *orig_pte;
- unsigned int max_win, hits, prev_win, win, left;
- #ifndef CONFIG_64BIT
- pte_t *tpte;
- #endif
- max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
- SWAP_RA_ORDER_CEILING);
- if (max_win == 1) {
- ra_info->win = 1;
- return;
- }
- faddr = vmf->address;
- orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
- fpfn = PFN_DOWN(faddr);
- ra_val = GET_SWAP_RA_VAL(vma);
- pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
- prev_win = SWAP_RA_WIN(ra_val);
- hits = SWAP_RA_HITS(ra_val);
- ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
- max_win, prev_win);
- atomic_long_set(&vma->swap_readahead_info,
- SWAP_RA_VAL(faddr, win, 0));
- if (win == 1) {
- pte_unmap(orig_pte);
- return;
- }
- /* Copy the PTEs because the page table may be unmapped */
- if (fpfn == pfn + 1)
- swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
- else if (pfn == fpfn + 1)
- swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
- &start, &end);
- else {
- left = (win - 1) / 2;
- swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
- &start, &end);
- }
- ra_info->nr_pte = end - start;
- ra_info->offset = fpfn - start;
- pte -= ra_info->offset;
- #ifdef CONFIG_64BIT
- ra_info->ptes = pte;
- #else
- tpte = ra_info->ptes;
- for (pfn = start; pfn != end; pfn++)
- *tpte++ = *pte++;
- #endif
- pte_unmap(orig_pte);
- }
- /**
- * swap_vma_readahead - swap in pages in hope we need them soon
- * @fentry: swap entry of this memory
- * @gfp_mask: memory allocation flags
- * @vmf: fault information
- *
- * Returns the struct page for entry and addr, after queueing swapin.
- *
- * Primitive swap readahead code. We simply read in a few pages whose
- * virtual addresses are around the fault address in the same vma.
- *
- * Caller must hold read mmap_lock if vmf->vma is not NULL.
- *
- */
- static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
- struct vm_fault *vmf)
- {
- struct blk_plug plug;
- struct swap_iocb *splug = NULL;
- struct vm_area_struct *vma = vmf->vma;
- struct page *page;
- pte_t *pte, pentry;
- swp_entry_t entry;
- unsigned int i;
- bool page_allocated;
- struct vma_swap_readahead ra_info = {
- .win = 1,
- };
- swap_ra_info(vmf, &ra_info);
- if (ra_info.win == 1)
- goto skip;
- blk_start_plug(&plug);
- for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
- i++, pte++) {
- pentry = *pte;
- if (!is_swap_pte(pentry))
- continue;
- entry = pte_to_swp_entry(pentry);
- if (unlikely(non_swap_entry(entry)))
- continue;
- page = __read_swap_cache_async(entry, gfp_mask, vma,
- vmf->address, &page_allocated);
- if (!page)
- continue;
- if (page_allocated) {
- swap_readpage(page, false, &splug);
- if (i != ra_info.offset) {
- SetPageReadahead(page);
- count_vm_event(SWAP_RA);
- }
- }
- put_page(page);
- }
- blk_finish_plug(&plug);
- swap_read_unplug(splug);
- lru_add_drain();
- skip:
- /* The page was likely read above, so no need for plugging here */
- return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
- NULL);
- }
- /**
- * swapin_readahead - swap in pages in hope we need them soon
- * @entry: swap entry of this memory
- * @gfp_mask: memory allocation flags
- * @vmf: fault information
- *
- * Returns the struct page for entry and addr, after queueing swapin.
- *
- * It's a main entry function for swap readahead. By the configuration,
- * it will read ahead blocks by cluster-based(ie, physical disk based)
- * or vma-based(ie, virtual address based on faulty address) readahead.
- */
- struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_fault *vmf)
- {
- return swap_use_vma_readahead() ?
- swap_vma_readahead(entry, gfp_mask, vmf) :
- swap_cluster_readahead(entry, gfp_mask, vmf);
- }
- #ifdef CONFIG_SYSFS
- static ssize_t vma_ra_enabled_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
- {
- return sysfs_emit(buf, "%s\n",
- enable_vma_readahead ? "true" : "false");
- }
- static ssize_t vma_ra_enabled_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
- {
- ssize_t ret;
- ret = kstrtobool(buf, &enable_vma_readahead);
- if (ret)
- return ret;
- return count;
- }
- static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
- static struct attribute *swap_attrs[] = {
- &vma_ra_enabled_attr.attr,
- NULL,
- };
- static const struct attribute_group swap_attr_group = {
- .attrs = swap_attrs,
- };
- static int __init swap_init_sysfs(void)
- {
- int err;
- struct kobject *swap_kobj;
- swap_kobj = kobject_create_and_add("swap", mm_kobj);
- if (!swap_kobj) {
- pr_err("failed to create swap kobject\n");
- return -ENOMEM;
- }
- err = sysfs_create_group(swap_kobj, &swap_attr_group);
- if (err) {
- pr_err("failed to register swap group\n");
- goto delete_obj;
- }
- return 0;
- delete_obj:
- kobject_put(swap_kobj);
- return err;
- }
- subsys_initcall(swap_init_sysfs);
- #endif
|