Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "More mm/ work, plenty more to come Subsystems affected by this patch series: slub, memcg, gup, kasan, pagealloc, hugetlb, vmscan, tools, mempolicy, memblock, hugetlbfs, thp, mmap, kconfig" * akpm: (131 commits) arm64: mm: use ARCH_HAS_DEBUG_WX instead of arch defined x86: mm: use ARCH_HAS_DEBUG_WX instead of arch defined riscv: support DEBUG_WX mm: add DEBUG_WX support drivers/base/memory.c: cache memory blocks in xarray to accelerate lookup mm/thp: rename pmd_mknotpresent() as pmd_mkinvalid() powerpc/mm: drop platform defined pmd_mknotpresent() mm: thp: don't need to drain lru cache when splitting and mlocking THP hugetlbfs: get unmapped area below TASK_UNMAPPED_BASE for hugetlbfs sparc32: register memory occupied by kernel as memblock.memory include/linux/memblock.h: fix minor typo and unclear comment mm, mempolicy: fix up gup usage in lookup_node tools/vm/page_owner_sort.c: filter out unneeded line mm: swap: memcg: fix memcg stats for huge pages mm: swap: fix vmstats for huge pages mm: vmscan: limit the range of LRU type balancing mm: vmscan: reclaim writepage is IO cost mm: vmscan: determine anon/file pressure balance at the reclaim root mm: balance LRU lists based on relative thrashing mm: only count actual rotations as LRU reclaim cost ...
This commit is contained in:
210
mm/hugetlb.c
210
mm/hugetlb.c
@@ -59,8 +59,8 @@ __initdata LIST_HEAD(huge_boot_pages);
|
||||
/* for command line parsing */
|
||||
static struct hstate * __initdata parsed_hstate;
|
||||
static unsigned long __initdata default_hstate_max_huge_pages;
|
||||
static unsigned long __initdata default_hstate_size;
|
||||
static bool __initdata parsed_valid_hugepagesz = true;
|
||||
static bool __initdata parsed_default_hugepagesz;
|
||||
|
||||
/*
|
||||
* Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
|
||||
@@ -3060,7 +3060,7 @@ static void __init hugetlb_sysfs_init(void)
|
||||
err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
|
||||
hstate_kobjs, &hstate_attr_group);
|
||||
if (err)
|
||||
pr_err("Hugetlb: Unable to add hstate %s", h->name);
|
||||
pr_err("HugeTLB: Unable to add hstate %s", h->name);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3164,7 +3164,7 @@ static void hugetlb_register_node(struct node *node)
|
||||
nhs->hstate_kobjs,
|
||||
&per_node_hstate_attr_group);
|
||||
if (err) {
|
||||
pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
|
||||
pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
|
||||
h->name, node->dev.id);
|
||||
hugetlb_unregister_node(node);
|
||||
break;
|
||||
@@ -3212,23 +3212,41 @@ static int __init hugetlb_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!hugepages_supported())
|
||||
if (!hugepages_supported()) {
|
||||
if (hugetlb_max_hstate || default_hstate_max_huge_pages)
|
||||
pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
|
||||
return 0;
|
||||
|
||||
if (!size_to_hstate(default_hstate_size)) {
|
||||
if (default_hstate_size != 0) {
|
||||
pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
|
||||
default_hstate_size, HPAGE_SIZE);
|
||||
}
|
||||
|
||||
default_hstate_size = HPAGE_SIZE;
|
||||
if (!size_to_hstate(default_hstate_size))
|
||||
hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
|
||||
}
|
||||
default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
|
||||
if (default_hstate_max_huge_pages) {
|
||||
if (!default_hstate.max_huge_pages)
|
||||
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
|
||||
|
||||
/*
|
||||
* Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
|
||||
* architectures depend on setup being done here.
|
||||
*/
|
||||
hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
|
||||
if (!parsed_default_hugepagesz) {
|
||||
/*
|
||||
* If we did not parse a default huge page size, set
|
||||
* default_hstate_idx to HPAGE_SIZE hstate. And, if the
|
||||
* number of huge pages for this default size was implicitly
|
||||
* specified, set that here as well.
|
||||
* Note that the implicit setting will overwrite an explicit
|
||||
* setting. A warning will be printed in this case.
|
||||
*/
|
||||
default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
|
||||
if (default_hstate_max_huge_pages) {
|
||||
if (default_hstate.max_huge_pages) {
|
||||
char buf[32];
|
||||
|
||||
string_get_size(huge_page_size(&default_hstate),
|
||||
1, STRING_UNITS_2, buf, 32);
|
||||
pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
|
||||
default_hstate.max_huge_pages, buf);
|
||||
pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
|
||||
default_hstate_max_huge_pages);
|
||||
}
|
||||
default_hstate.max_huge_pages =
|
||||
default_hstate_max_huge_pages;
|
||||
}
|
||||
}
|
||||
|
||||
hugetlb_cma_check();
|
||||
@@ -3256,10 +3274,10 @@ static int __init hugetlb_init(void)
|
||||
}
|
||||
subsys_initcall(hugetlb_init);
|
||||
|
||||
/* Should be called on processing a hugepagesz=... option */
|
||||
void __init hugetlb_bad_size(void)
|
||||
/* Overwritten by architectures with more huge page sizes */
|
||||
bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
|
||||
{
|
||||
parsed_valid_hugepagesz = false;
|
||||
return size == HPAGE_SIZE;
|
||||
}
|
||||
|
||||
void __init hugetlb_add_hstate(unsigned int order)
|
||||
@@ -3268,7 +3286,6 @@ void __init hugetlb_add_hstate(unsigned int order)
|
||||
unsigned long i;
|
||||
|
||||
if (size_to_hstate(PAGE_SIZE << order)) {
|
||||
pr_warn("hugepagesz= specified twice, ignoring\n");
|
||||
return;
|
||||
}
|
||||
BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
|
||||
@@ -3289,20 +3306,29 @@ void __init hugetlb_add_hstate(unsigned int order)
|
||||
parsed_hstate = h;
|
||||
}
|
||||
|
||||
static int __init hugetlb_nrpages_setup(char *s)
|
||||
/*
|
||||
* hugepages command line processing
|
||||
* hugepages normally follows a valid hugepagsz or default_hugepagsz
|
||||
* specification. If not, ignore the hugepages value. hugepages can also
|
||||
* be the first huge page command line option in which case it implicitly
|
||||
* specifies the number of huge pages for the default size.
|
||||
*/
|
||||
static int __init hugepages_setup(char *s)
|
||||
{
|
||||
unsigned long *mhp;
|
||||
static unsigned long *last_mhp;
|
||||
|
||||
if (!parsed_valid_hugepagesz) {
|
||||
pr_warn("hugepages = %s preceded by "
|
||||
"an unsupported hugepagesz, ignoring\n", s);
|
||||
pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
|
||||
parsed_valid_hugepagesz = true;
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
|
||||
* so this hugepages= parameter goes to the "default hstate".
|
||||
* !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
|
||||
* yet, so this hugepages= parameter goes to the "default hstate".
|
||||
* Otherwise, it goes with the previously parsed hugepagesz or
|
||||
* default_hugepagesz.
|
||||
*/
|
||||
else if (!hugetlb_max_hstate)
|
||||
mhp = &default_hstate_max_huge_pages;
|
||||
@@ -3310,8 +3336,8 @@ static int __init hugetlb_nrpages_setup(char *s)
|
||||
mhp = &parsed_hstate->max_huge_pages;
|
||||
|
||||
if (mhp == last_mhp) {
|
||||
pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
|
||||
return 1;
|
||||
pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (sscanf(s, "%lu", mhp) <= 0)
|
||||
@@ -3329,14 +3355,102 @@ static int __init hugetlb_nrpages_setup(char *s)
|
||||
|
||||
return 1;
|
||||
}
|
||||
__setup("hugepages=", hugetlb_nrpages_setup);
|
||||
__setup("hugepages=", hugepages_setup);
|
||||
|
||||
static int __init hugetlb_default_setup(char *s)
|
||||
/*
|
||||
* hugepagesz command line processing
|
||||
* A specific huge page size can only be specified once with hugepagesz.
|
||||
* hugepagesz is followed by hugepages on the command line. The global
|
||||
* variable 'parsed_valid_hugepagesz' is used to determine if prior
|
||||
* hugepagesz argument was valid.
|
||||
*/
|
||||
static int __init hugepagesz_setup(char *s)
|
||||
{
|
||||
default_hstate_size = memparse(s, &s);
|
||||
unsigned long size;
|
||||
struct hstate *h;
|
||||
|
||||
parsed_valid_hugepagesz = false;
|
||||
size = (unsigned long)memparse(s, NULL);
|
||||
|
||||
if (!arch_hugetlb_valid_size(size)) {
|
||||
pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
h = size_to_hstate(size);
|
||||
if (h) {
|
||||
/*
|
||||
* hstate for this size already exists. This is normally
|
||||
* an error, but is allowed if the existing hstate is the
|
||||
* default hstate. More specifically, it is only allowed if
|
||||
* the number of huge pages for the default hstate was not
|
||||
* previously specified.
|
||||
*/
|
||||
if (!parsed_default_hugepagesz || h != &default_hstate ||
|
||||
default_hstate.max_huge_pages) {
|
||||
pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* No need to call hugetlb_add_hstate() as hstate already
|
||||
* exists. But, do set parsed_hstate so that a following
|
||||
* hugepages= parameter will be applied to this hstate.
|
||||
*/
|
||||
parsed_hstate = h;
|
||||
parsed_valid_hugepagesz = true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
|
||||
parsed_valid_hugepagesz = true;
|
||||
return 1;
|
||||
}
|
||||
__setup("default_hugepagesz=", hugetlb_default_setup);
|
||||
__setup("hugepagesz=", hugepagesz_setup);
|
||||
|
||||
/*
|
||||
* default_hugepagesz command line input
|
||||
* Only one instance of default_hugepagesz allowed on command line.
|
||||
*/
|
||||
static int __init default_hugepagesz_setup(char *s)
|
||||
{
|
||||
unsigned long size;
|
||||
|
||||
parsed_valid_hugepagesz = false;
|
||||
if (parsed_default_hugepagesz) {
|
||||
pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
size = (unsigned long)memparse(s, NULL);
|
||||
|
||||
if (!arch_hugetlb_valid_size(size)) {
|
||||
pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
|
||||
parsed_valid_hugepagesz = true;
|
||||
parsed_default_hugepagesz = true;
|
||||
default_hstate_idx = hstate_index(size_to_hstate(size));
|
||||
|
||||
/*
|
||||
* The number of default huge pages (for this size) could have been
|
||||
* specified as the first hugetlb parameter: hugepages=X. If so,
|
||||
* then default_hstate_max_huge_pages is set. If the default huge
|
||||
* page size is gigantic (>= MAX_ORDER), then the pages must be
|
||||
* allocated here from bootmem allocator.
|
||||
*/
|
||||
if (default_hstate_max_huge_pages) {
|
||||
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
|
||||
if (hstate_is_gigantic(&default_hstate))
|
||||
hugetlb_hstate_alloc_pages(&default_hstate);
|
||||
default_hstate_max_huge_pages = 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
__setup("default_hugepagesz=", default_hugepagesz_setup);
|
||||
|
||||
static unsigned int cpuset_mems_nr(unsigned int *array)
|
||||
{
|
||||
@@ -5354,8 +5468,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
* huge_pte_offset() - Walk the page table to resolve the hugepage
|
||||
* entry at address @addr
|
||||
*
|
||||
* Return: Pointer to page table or swap entry (PUD or PMD) for
|
||||
* address @addr, or NULL if a p*d_none() entry is encountered and the
|
||||
* Return: Pointer to page table entry (PUD or PMD) for
|
||||
* address @addr, or NULL if a !p*d_present() entry is encountered and the
|
||||
* size @sz doesn't match the hugepage size at this level of the page
|
||||
* table.
|
||||
*/
|
||||
@@ -5364,8 +5478,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud, pud_entry;
|
||||
pmd_t *pmd, pmd_entry;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (!pgd_present(*pgd))
|
||||
@@ -5375,22 +5489,16 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||
return NULL;
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
pud_entry = READ_ONCE(*pud);
|
||||
if (sz != PUD_SIZE && pud_none(pud_entry))
|
||||
return NULL;
|
||||
/* hugepage or swap? */
|
||||
if (pud_huge(pud_entry) || !pud_present(pud_entry))
|
||||
if (sz == PUD_SIZE)
|
||||
/* must be pud huge, non-present or none */
|
||||
return (pte_t *)pud;
|
||||
if (!pud_present(*pud))
|
||||
return NULL;
|
||||
/* must have a valid entry and size to go further */
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
pmd_entry = READ_ONCE(*pmd);
|
||||
if (sz != PMD_SIZE && pmd_none(pmd_entry))
|
||||
return NULL;
|
||||
/* hugepage or swap? */
|
||||
if (pmd_huge(pmd_entry) || !pmd_present(pmd_entry))
|
||||
return (pte_t *)pmd;
|
||||
|
||||
return NULL;
|
||||
/* must be pmd huge, non-present or none */
|
||||
return (pte_t *)pmd;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
|
||||
|
||||
Reference in New Issue
Block a user