Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm changes from Ingo Molnar: "The main changes are: continued PAT work by Toshi Kani, plus a new boot time warning about insecure RWX kernel mappings, by Stephen Smalley. The new CONFIG_DEBUG_WX=y warning is marked default-y if CONFIG_DEBUG_RODATA=y is already eanbled, as a special exception, as these bugs are hard to notice and this check already found several live bugs" * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Warn on W^X mappings x86/mm: Fix no-change case in try_preserve_large_page() x86/mm: Fix __split_large_page() to handle large PAT bit x86/mm: Fix try_preserve_large_page() to handle large PAT bit x86/mm: Fix gup_huge_p?d() to handle large PAT bit x86/mm: Fix slow_virt_to_phys() to handle large PAT bit x86/mm: Fix page table dump to show PAT bit x86/asm: Add pud_pgprot() and pmd_pgprot() x86/asm: Fix pud/pmd interfaces to handle large PAT bit x86/asm: Add pud/pmd mask interfaces to handle large PAT bit x86/asm: Move PUD_PAGE macros to page_types.h x86/vdso32: Define PGTABLE_LEVELS to 32bit VDSO
This commit is contained in:
@@ -414,18 +414,28 @@ pmd_t *lookup_pmd_address(unsigned long address)
|
||||
phys_addr_t slow_virt_to_phys(void *__virt_addr)
|
||||
{
|
||||
unsigned long virt_addr = (unsigned long)__virt_addr;
|
||||
phys_addr_t phys_addr;
|
||||
unsigned long offset;
|
||||
unsigned long phys_addr, offset;
|
||||
enum pg_level level;
|
||||
unsigned long pmask;
|
||||
pte_t *pte;
|
||||
|
||||
pte = lookup_address(virt_addr, &level);
|
||||
BUG_ON(!pte);
|
||||
pmask = page_level_mask(level);
|
||||
offset = virt_addr & ~pmask;
|
||||
phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
|
||||
return (phys_addr | offset);
|
||||
|
||||
switch (level) {
|
||||
case PG_LEVEL_1G:
|
||||
phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
|
||||
offset = virt_addr & ~PUD_PAGE_MASK;
|
||||
break;
|
||||
case PG_LEVEL_2M:
|
||||
phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
|
||||
offset = virt_addr & ~PMD_PAGE_MASK;
|
||||
break;
|
||||
default:
|
||||
phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
|
||||
offset = virt_addr & ~PAGE_MASK;
|
||||
}
|
||||
|
||||
return (phys_addr_t)(phys_addr | offset);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(slow_virt_to_phys);
|
||||
|
||||
@@ -458,7 +468,7 @@ static int
|
||||
try_preserve_large_page(pte_t *kpte, unsigned long address,
|
||||
struct cpa_data *cpa)
|
||||
{
|
||||
unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
|
||||
unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn, old_pfn;
|
||||
pte_t new_pte, old_pte, *tmp;
|
||||
pgprot_t old_prot, new_prot, req_prot;
|
||||
int i, do_split = 1;
|
||||
@@ -478,17 +488,21 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
|
||||
|
||||
switch (level) {
|
||||
case PG_LEVEL_2M:
|
||||
#ifdef CONFIG_X86_64
|
||||
old_prot = pmd_pgprot(*(pmd_t *)kpte);
|
||||
old_pfn = pmd_pfn(*(pmd_t *)kpte);
|
||||
break;
|
||||
case PG_LEVEL_1G:
|
||||
#endif
|
||||
psize = page_level_size(level);
|
||||
pmask = page_level_mask(level);
|
||||
old_prot = pud_pgprot(*(pud_t *)kpte);
|
||||
old_pfn = pud_pfn(*(pud_t *)kpte);
|
||||
break;
|
||||
default:
|
||||
do_split = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
psize = page_level_size(level);
|
||||
pmask = page_level_mask(level);
|
||||
|
||||
/*
|
||||
* Calculate the number of pages, which fit into this large
|
||||
* page starting at address:
|
||||
@@ -504,7 +518,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
|
||||
* up accordingly.
|
||||
*/
|
||||
old_pte = *kpte;
|
||||
old_prot = req_prot = pgprot_large_2_4k(pte_pgprot(old_pte));
|
||||
req_prot = pgprot_large_2_4k(old_prot);
|
||||
|
||||
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
|
||||
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
|
||||
@@ -530,10 +544,10 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
|
||||
req_prot = canon_pgprot(req_prot);
|
||||
|
||||
/*
|
||||
* old_pte points to the large page base address. So we need
|
||||
* old_pfn points to the large page base pfn. So we need
|
||||
* to add the offset of the virtual address:
|
||||
*/
|
||||
pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
|
||||
pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
|
||||
cpa->pfn = pfn;
|
||||
|
||||
new_prot = static_protections(req_prot, address, pfn);
|
||||
@@ -544,7 +558,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
|
||||
* the pages in the range we try to preserve:
|
||||
*/
|
||||
addr = address & pmask;
|
||||
pfn = pte_pfn(old_pte);
|
||||
pfn = old_pfn;
|
||||
for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
|
||||
pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
|
||||
|
||||
@@ -574,7 +588,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
|
||||
* The address is aligned and the number of pages
|
||||
* covers the full page.
|
||||
*/
|
||||
new_pte = pfn_pte(pte_pfn(old_pte), new_prot);
|
||||
new_pte = pfn_pte(old_pfn, new_prot);
|
||||
__set_pmd_pte(kpte, address, new_pte);
|
||||
cpa->flags |= CPA_FLUSHTLB;
|
||||
do_split = 0;
|
||||
@@ -591,7 +605,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
|
||||
struct page *base)
|
||||
{
|
||||
pte_t *pbase = (pte_t *)page_address(base);
|
||||
unsigned long pfn, pfninc = 1;
|
||||
unsigned long ref_pfn, pfn, pfninc = 1;
|
||||
unsigned int i, level;
|
||||
pte_t *tmp;
|
||||
pgprot_t ref_prot;
|
||||
@@ -608,26 +622,33 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
|
||||
}
|
||||
|
||||
paravirt_alloc_pte(&init_mm, page_to_pfn(base));
|
||||
ref_prot = pte_pgprot(pte_clrhuge(*kpte));
|
||||
|
||||
/* promote PAT bit to correct position */
|
||||
if (level == PG_LEVEL_2M)
|
||||
switch (level) {
|
||||
case PG_LEVEL_2M:
|
||||
ref_prot = pmd_pgprot(*(pmd_t *)kpte);
|
||||
/* clear PSE and promote PAT bit to correct position */
|
||||
ref_prot = pgprot_large_2_4k(ref_prot);
|
||||
ref_pfn = pmd_pfn(*(pmd_t *)kpte);
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (level == PG_LEVEL_1G) {
|
||||
case PG_LEVEL_1G:
|
||||
ref_prot = pud_pgprot(*(pud_t *)kpte);
|
||||
ref_pfn = pud_pfn(*(pud_t *)kpte);
|
||||
pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* Set the PSE flags only if the PRESENT flag is set
|
||||
* Clear the PSE flags if the PRESENT flag is not set
|
||||
* otherwise pmd_present/pmd_huge will return true
|
||||
* even on a non present pmd.
|
||||
*/
|
||||
if (pgprot_val(ref_prot) & _PAGE_PRESENT)
|
||||
pgprot_val(ref_prot) |= _PAGE_PSE;
|
||||
else
|
||||
if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
|
||||
pgprot_val(ref_prot) &= ~_PAGE_PSE;
|
||||
break;
|
||||
|
||||
default:
|
||||
spin_unlock(&pgd_lock);
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set the GLOBAL flags only if the PRESENT flag is set
|
||||
@@ -643,7 +664,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
|
||||
/*
|
||||
* Get the target pfn from the original entry:
|
||||
*/
|
||||
pfn = pte_pfn(*kpte);
|
||||
pfn = ref_pfn;
|
||||
for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
|
||||
set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));
|
||||
|
||||
|
Reference in New Issue
Block a user