Merge tag 'powerpc-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Notable changes: - A large series to rewrite our SLB miss handling, replacing a lot of fairly complicated asm with much fewer lines of C. - Following on from that, we now maintain a cache of SLB entries for each process and preload them on context switch. Leading to a 27% speedup for our context switch benchmark on Power9. - Improvements to our handling of SLB multi-hit errors. We now print more debug information when they occur, and try to continue running by flushing the SLB and reloading, rather than treating them as fatal. - Enable THP migration on 64-bit Book3S machines (eg. Power7/8/9). - Add support for physical memory up to 2PB in the linear mapping on 64-bit Book3S. We only support up to 512TB as regular system memory, otherwise the percpu allocator runs out of vmalloc space. - Add stack protector support for 32 and 64-bit, with a per-task canary. - Add support for PTRACE_SYSEMU and PTRACE_SYSEMU_SINGLESTEP. - Support recognising "big cores" on Power9, where two SMT4 cores are presented to us as a single SMT8 core. - A large series to cleanup some of our ioremap handling and PTE flags. - Add a driver for the PAPR SCM (storage class memory) interface, allowing guests to operate on SCM devices (acked by Dan). - Changes to our ftrace code to handle very large kernels, where we need to use a trampoline to get to ftrace_caller(). And many other smaller enhancements and cleanups. Thanks to: Alan Modra, Alistair Popple, Aneesh Kumar K.V, Anton Blanchard, Aravinda Prasad, Bartlomiej Zolnierkiewicz, Benjamin Herrenschmidt, Breno Leitao, Cédric Le Goater, Christophe Leroy, Christophe Lombard, Dan Carpenter, Daniel Axtens, Finn Thain, Gautham R. Shenoy, Gustavo Romero, Haren Myneni, Hari Bathini, Jia Hongtao, Joel Stanley, John Allen, Laurent Dufour, Madhavan Srinivasan, Mahesh Salgaonkar, Mark Hairgrove, Masahiro Yamada, Michael Bringmann, Michael Neuling, Michal Suchanek, Murilo Opsfelder Araujo, Nathan Fontenot, Naveen N. Rao, Nicholas Piggin, Nick Desaulniers, Oliver O'Halloran, Paul Mackerras, Petr Vorel, Rashmica Gupta, Reza Arbab, Rob Herring, Sam Bobroff, Samuel Mendoza-Jonas, Scott Wood, Stan Johnson, Stephen Rothwell, Stewart Smith, Suraj Jitindar Singh, Tyrel Datwyler, Vaibhav Jain, Vasant Hegde, YueHaibing, zhong jiang" * tag 'powerpc-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (221 commits) Revert "selftests/powerpc: Fix out-of-tree build errors" powerpc/msi: Fix compile error on mpc83xx powerpc: Fix stack protector crashes on CPU hotplug powerpc/traps: restore recoverability of machine_check interrupts powerpc/64/module: REL32 relocation range check powerpc/64s/radix: Fix radix__flush_tlb_collapsed_pmd double flushing pmd selftests/powerpc: Add a test of wild bctr powerpc/mm: Fix page table dump to work on Radix powerpc/mm/radix: Display if mappings are exec or not powerpc/mm/radix: Simplify split mapping logic powerpc/mm/radix: Remove the retry in the split mapping logic powerpc/mm/radix: Fix small page at boundary when splitting powerpc/mm/radix: Fix overuse of small pages in splitting logic powerpc/mm/radix: Fix off-by-one in split mapping logic powerpc/ftrace: Handle large kernel configs powerpc/mm: Fix WARN_ON with THP NUMA migration selftests/powerpc: Fix out-of-tree build errors powerpc/time: no steal_time when CONFIG_PPC_SPLPAR is not selected powerpc/time: Only set CONFIG_ARCH_HAS_SCALED_CPUTIME on PPC64 powerpc/time: isolate scaled cputime accounting in dedicated functions. ...
This commit is contained in:
@@ -8,7 +8,97 @@
|
||||
#include <asm/book3s/32/hash.h>
|
||||
|
||||
/* And here we include common definitions */
|
||||
#include <asm/pte-common.h>
|
||||
|
||||
#define _PAGE_KERNEL_RO 0
|
||||
#define _PAGE_KERNEL_ROX 0
|
||||
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
|
||||
#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW)
|
||||
|
||||
#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static inline bool pte_user(pte_t pte)
|
||||
{
|
||||
return pte_val(pte) & _PAGE_USER;
|
||||
}
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Location of the PFN in the PTE. Most 32-bit platforms use the same
|
||||
* as _PAGE_SHIFT here (ie, naturally aligned).
|
||||
* Platform who don't just pre-define the value so we don't override it here.
|
||||
*/
|
||||
#define PTE_RPN_SHIFT (PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* The mask covered by the RPN must be a ULL on 32-bit platforms with
|
||||
* 64-bit PTEs.
|
||||
*/
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
|
||||
#else
|
||||
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* _PAGE_CHG_MASK masks of bits that are to be preserved across
|
||||
* pgprot changes.
|
||||
*/
|
||||
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
|
||||
_PAGE_ACCESSED | _PAGE_SPECIAL)
|
||||
|
||||
/*
|
||||
* We define 2 sets of base prot bits, one for basic pages (ie,
|
||||
* cacheable kernel and user pages) and one for non cacheable
|
||||
* pages. We always set _PAGE_COHERENT when SMP is enabled or
|
||||
* the processor might need it for DMA coherency.
|
||||
*/
|
||||
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
|
||||
#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
|
||||
|
||||
/*
|
||||
* Permission masks used to generate the __P and __S table.
|
||||
*
|
||||
* Note:__pgprot is defined in arch/powerpc/include/asm/page.h
|
||||
*
|
||||
* Write permissions imply read permissions for now.
|
||||
*/
|
||||
#define PAGE_NONE __pgprot(_PAGE_BASE)
|
||||
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
|
||||
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
|
||||
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
|
||||
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER)
|
||||
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
|
||||
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER)
|
||||
|
||||
/* Permission masks used for kernel mappings */
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
|
||||
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
|
||||
#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
|
||||
_PAGE_NO_CACHE | _PAGE_GUARDED)
|
||||
#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
|
||||
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
|
||||
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
|
||||
|
||||
/*
|
||||
* Protection used for kernel text. We want the debuggers to be able to
|
||||
* set breakpoints anywhere, so don't write protect the kernel text
|
||||
* on platforms where such control is possible.
|
||||
*/
|
||||
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
|
||||
defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
|
||||
#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
|
||||
#else
|
||||
#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
|
||||
#endif
|
||||
|
||||
/* Make modules code happy. We don't set RO yet */
|
||||
#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
|
||||
|
||||
/* Advertise special mapping type for AGP */
|
||||
#define PAGE_AGP (PAGE_KERNEL_NC)
|
||||
#define HAVE_PAGE_AGP
|
||||
|
||||
#define PTE_INDEX_SIZE PTE_SHIFT
|
||||
#define PMD_INDEX_SIZE 0
|
||||
@@ -219,7 +309,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
|
||||
pte_update(ptep, _PAGE_RW, 0);
|
||||
}
|
||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
@@ -234,10 +324,9 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
int psize)
|
||||
{
|
||||
unsigned long set = pte_val(entry) &
|
||||
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
|
||||
unsigned long clr = ~pte_val(entry) & _PAGE_RO;
|
||||
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW);
|
||||
|
||||
pte_update(ptep, clr, set);
|
||||
pte_update(ptep, 0, set);
|
||||
|
||||
flush_tlb_page(vma, address);
|
||||
}
|
||||
@@ -292,7 +381,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
|
||||
|
||||
int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
|
||||
int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
|
||||
|
||||
/* Generic accessors to PTE bits */
|
||||
static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
|
||||
@@ -301,13 +390,28 @@ static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY);
|
||||
static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
|
||||
static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
|
||||
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
|
||||
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
|
||||
static inline bool pte_exec(pte_t pte) { return true; }
|
||||
|
||||
static inline int pte_present(pte_t pte)
|
||||
{
|
||||
return pte_val(pte) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline bool pte_hw_valid(pte_t pte)
|
||||
{
|
||||
return pte_val(pte) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline bool pte_hashpte(pte_t pte)
|
||||
{
|
||||
return !!(pte_val(pte) & _PAGE_HASHPTE);
|
||||
}
|
||||
|
||||
static inline bool pte_ci(pte_t pte)
|
||||
{
|
||||
return !!(pte_val(pte) & _PAGE_NO_CACHE);
|
||||
}
|
||||
|
||||
/*
|
||||
* We only find page table entry in the last level
|
||||
* Hence no need for other accessors
|
||||
@@ -315,17 +419,14 @@ static inline int pte_present(pte_t pte)
|
||||
#define pte_access_permitted pte_access_permitted
|
||||
static inline bool pte_access_permitted(pte_t pte, bool write)
|
||||
{
|
||||
unsigned long pteval = pte_val(pte);
|
||||
/*
|
||||
* A read-only access is controlled by _PAGE_USER bit.
|
||||
* We have _PAGE_READ set for WRITE and EXECUTE
|
||||
*/
|
||||
unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_USER;
|
||||
if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
|
||||
return false;
|
||||
|
||||
if (write)
|
||||
need_pte_bits |= _PAGE_WRITE;
|
||||
|
||||
if ((pteval & need_pte_bits) != need_pte_bits)
|
||||
if (write && !pte_write(pte))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@@ -354,6 +455,11 @@ static inline pte_t pte_wrprotect(pte_t pte)
|
||||
return __pte(pte_val(pte) & ~_PAGE_RW);
|
||||
}
|
||||
|
||||
static inline pte_t pte_exprotect(pte_t pte)
|
||||
{
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkclean(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
|
||||
@@ -364,6 +470,16 @@ static inline pte_t pte_mkold(pte_t pte)
|
||||
return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkexec(pte_t pte)
|
||||
{
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkpte(pte_t pte)
|
||||
{
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_RW);
|
||||
@@ -389,6 +505,16 @@ static inline pte_t pte_mkhuge(pte_t pte)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkprivileged(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_USER);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkuser(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_USER);
|
||||
}
|
||||
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
|
||||
|
@@ -66,7 +66,7 @@ static inline int hash__hugepd_ok(hugepd_t hpd)
|
||||
* if it is not a pte and have hugepd shift mask
|
||||
* set, then it is a hugepd directory pointer
|
||||
*/
|
||||
if (!(hpdval & _PAGE_PTE) &&
|
||||
if (!(hpdval & _PAGE_PTE) && (hpdval & _PAGE_PRESENT) &&
|
||||
((hpdval & HUGEPD_SHIFT_MASK) != 0))
|
||||
return true;
|
||||
return false;
|
||||
|
@@ -18,6 +18,11 @@
|
||||
#include <asm/book3s/64/hash-4k.h>
|
||||
#endif
|
||||
|
||||
/* Bits to set in a PMD/PUD/PGD entry valid bit*/
|
||||
#define HASH_PMD_VAL_BITS (0x8000000000000000UL)
|
||||
#define HASH_PUD_VAL_BITS (0x8000000000000000UL)
|
||||
#define HASH_PGD_VAL_BITS (0x8000000000000000UL)
|
||||
|
||||
/*
|
||||
* Size of EA range mapped by our pagetables.
|
||||
*/
|
||||
@@ -196,8 +201,7 @@ static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
|
||||
extern int hash__map_kernel_page(unsigned long ea, unsigned long pa,
|
||||
unsigned long flags);
|
||||
int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
|
||||
extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
|
||||
unsigned long page_size,
|
||||
unsigned long phys);
|
||||
|
@@ -39,4 +39,7 @@ static inline bool gigantic_page_supported(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
/* hugepd entry valid bit */
|
||||
#define HUGEPD_VAL_BITS (0x8000000000000000UL)
|
||||
|
||||
#endif
|
||||
|
@@ -30,7 +30,7 @@
|
||||
* SLB
|
||||
*/
|
||||
|
||||
#define SLB_NUM_BOLTED 3
|
||||
#define SLB_NUM_BOLTED 2
|
||||
#define SLB_CACHE_ENTRIES 8
|
||||
#define SLB_MIN_SIZE 32
|
||||
|
||||
@@ -499,6 +499,8 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
|
||||
extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
|
||||
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
|
||||
|
||||
extern void hash__setup_new_exec(void);
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
void hpte_init_pseries(void);
|
||||
#else
|
||||
@@ -507,11 +509,18 @@ static inline void hpte_init_pseries(void) { }
|
||||
|
||||
extern void hpte_init_native(void);
|
||||
|
||||
struct slb_entry {
|
||||
u64 esid;
|
||||
u64 vsid;
|
||||
};
|
||||
|
||||
extern void slb_initialize(void);
|
||||
extern void slb_flush_and_rebolt(void);
|
||||
void slb_flush_and_restore_bolted(void);
|
||||
void slb_flush_all_realmode(void);
|
||||
void __slb_restore_bolted_realmode(void);
|
||||
void slb_restore_bolted_realmode(void);
|
||||
void slb_save_contents(struct slb_entry *slb_ptr);
|
||||
void slb_dump_contents(struct slb_entry *slb_ptr);
|
||||
|
||||
extern void slb_vmalloc_update(void);
|
||||
extern void slb_set_size(u16 size);
|
||||
@@ -524,13 +533,9 @@ extern void slb_set_size(u16 size);
|
||||
* from mmu context id and effective segment id of the address.
|
||||
*
|
||||
* For user processes max context id is limited to MAX_USER_CONTEXT.
|
||||
|
||||
* For kernel space, we use context ids 1-4 to map addresses as below:
|
||||
* NOTE: each context only support 64TB now.
|
||||
* 0x00001 - [ 0xc000000000000000 - 0xc0003fffffffffff ]
|
||||
* 0x00002 - [ 0xd000000000000000 - 0xd0003fffffffffff ]
|
||||
* 0x00003 - [ 0xe000000000000000 - 0xe0003fffffffffff ]
|
||||
* 0x00004 - [ 0xf000000000000000 - 0xf0003fffffffffff ]
|
||||
* more details in get_user_context
|
||||
*
|
||||
* For kernel space get_kernel_context
|
||||
*
|
||||
* The proto-VSIDs are then scrambled into real VSIDs with the
|
||||
* multiplicative hash:
|
||||
@@ -570,6 +575,21 @@ extern void slb_set_size(u16 size);
|
||||
#define ESID_BITS_MASK ((1 << ESID_BITS) - 1)
|
||||
#define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
|
||||
|
||||
/*
|
||||
* Now certain config support MAX_PHYSMEM more than 512TB. Hence we will need
|
||||
* to use more than one context for linear mapping the kernel.
|
||||
* For vmalloc and memmap, we use just one context with 512TB. With 64 byte
|
||||
* struct page size, we need ony 32 TB in memmap for 2PB (51 bits (MAX_PHYSMEM_BITS)).
|
||||
*/
|
||||
#if (MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
|
||||
#define MAX_KERNEL_CTX_CNT (1UL << (MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
|
||||
#else
|
||||
#define MAX_KERNEL_CTX_CNT 1
|
||||
#endif
|
||||
|
||||
#define MAX_VMALLOC_CTX_CNT 1
|
||||
#define MAX_MEMMAP_CTX_CNT 1
|
||||
|
||||
/*
|
||||
* 256MB segment
|
||||
* The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
|
||||
@@ -580,12 +600,13 @@ extern void slb_set_size(u16 size);
|
||||
* We also need to avoid the last segment of the last context, because that
|
||||
* would give a protovsid of 0x1fffffffff. That will result in a VSID 0
|
||||
* because of the modulo operation in vsid scramble.
|
||||
*
|
||||
* We add one extra context to MIN_USER_CONTEXT so that we can map kernel
|
||||
* context easily. The +1 is to map the unused 0xe region mapping.
|
||||
*/
|
||||
#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
|
||||
#define MIN_USER_CONTEXT (5)
|
||||
|
||||
/* Would be nice to use KERNEL_REGION_ID here */
|
||||
#define KERNEL_REGION_CONTEXT_OFFSET (0xc - 1)
|
||||
#define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
|
||||
MAX_MEMMAP_CTX_CNT + 2)
|
||||
|
||||
/*
|
||||
* For platforms that support on 65bit VA we limit the context bits
|
||||
@@ -745,6 +766,39 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
|
||||
return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
|
||||
}
|
||||
|
||||
/*
|
||||
* For kernel space, we use context ids as below
|
||||
* below. Range is 512TB per context.
|
||||
*
|
||||
* 0x00001 - [ 0xc000000000000000 - 0xc001ffffffffffff]
|
||||
* 0x00002 - [ 0xc002000000000000 - 0xc003ffffffffffff]
|
||||
* 0x00003 - [ 0xc004000000000000 - 0xc005ffffffffffff]
|
||||
* 0x00004 - [ 0xc006000000000000 - 0xc007ffffffffffff]
|
||||
|
||||
* 0x00005 - [ 0xd000000000000000 - 0xd001ffffffffffff ]
|
||||
* 0x00006 - Not used - Can map 0xe000000000000000 range.
|
||||
* 0x00007 - [ 0xf000000000000000 - 0xf001ffffffffffff ]
|
||||
*
|
||||
* So we can compute the context from the region (top nibble) by
|
||||
* subtracting 11, or 0xc - 1.
|
||||
*/
|
||||
static inline unsigned long get_kernel_context(unsigned long ea)
|
||||
{
|
||||
unsigned long region_id = REGION_ID(ea);
|
||||
unsigned long ctx;
|
||||
/*
|
||||
* For linear mapping we do support multiple context
|
||||
*/
|
||||
if (region_id == KERNEL_REGION_ID) {
|
||||
/*
|
||||
* We already verified ea to be not beyond the addr limit.
|
||||
*/
|
||||
ctx = 1 + ((ea & ~REGION_MASK) >> MAX_EA_BITS_PER_CONTEXT);
|
||||
} else
|
||||
ctx = (region_id - 0xc) + MAX_KERNEL_CTX_CNT;
|
||||
return ctx;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is only valid for addresses >= PAGE_OFFSET
|
||||
*/
|
||||
@@ -755,20 +809,7 @@ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
|
||||
if (!is_kernel_addr(ea))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* For kernel space, we use context ids 1-4 to map the address space as
|
||||
* below:
|
||||
*
|
||||
* 0x00001 - [ 0xc000000000000000 - 0xc0003fffffffffff ]
|
||||
* 0x00002 - [ 0xd000000000000000 - 0xd0003fffffffffff ]
|
||||
* 0x00003 - [ 0xe000000000000000 - 0xe0003fffffffffff ]
|
||||
* 0x00004 - [ 0xf000000000000000 - 0xf0003fffffffffff ]
|
||||
*
|
||||
* So we can compute the context from the region (top nibble) by
|
||||
* subtracting 11, or 0xc - 1.
|
||||
*/
|
||||
context = (ea >> 60) - KERNEL_REGION_CONTEXT_OFFSET;
|
||||
|
||||
context = get_kernel_context(ea);
|
||||
return get_vsid(context, ea, ssize);
|
||||
}
|
||||
|
||||
|
@@ -208,7 +208,7 @@ extern void radix_init_pseries(void);
|
||||
static inline void radix_init_pseries(void) { };
|
||||
#endif
|
||||
|
||||
static inline int get_ea_context(mm_context_t *ctx, unsigned long ea)
|
||||
static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
|
||||
{
|
||||
int index = ea >> MAX_EA_BITS_PER_CONTEXT;
|
||||
|
||||
@@ -223,7 +223,7 @@ static inline int get_ea_context(mm_context_t *ctx, unsigned long ea)
|
||||
static inline unsigned long get_user_vsid(mm_context_t *ctx,
|
||||
unsigned long ea, int ssize)
|
||||
{
|
||||
unsigned long context = get_ea_context(ctx, ea);
|
||||
unsigned long context = get_user_context(ctx, ea);
|
||||
|
||||
return get_vsid(context, ea, ssize);
|
||||
}
|
||||
|
@@ -10,6 +10,9 @@
|
||||
*
|
||||
* Defined in such a way that we can optimize away code block at build time
|
||||
* if CONFIG_HUGETLB_PAGE=n.
|
||||
*
|
||||
* returns true for pmd migration entries, THP, devmap, hugetlb
|
||||
* But compile time dependent on CONFIG_HUGETLB_PAGE
|
||||
*/
|
||||
static inline int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
|
@@ -14,10 +14,6 @@
|
||||
*/
|
||||
#define _PAGE_BIT_SWAP_TYPE 0
|
||||
|
||||
#define _PAGE_NA 0
|
||||
#define _PAGE_RO 0
|
||||
#define _PAGE_USER 0
|
||||
|
||||
#define _PAGE_EXEC 0x00001 /* execute permission */
|
||||
#define _PAGE_WRITE 0x00002 /* write access allowed */
|
||||
#define _PAGE_READ 0x00004 /* read access allowed */
|
||||
@@ -122,10 +118,6 @@
|
||||
#define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ)
|
||||
#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
|
||||
_PAGE_RW | _PAGE_EXEC)
|
||||
/*
|
||||
* No page size encoding in the linux PTE
|
||||
*/
|
||||
#define _PAGE_PSIZE 0
|
||||
/*
|
||||
* _PAGE_CHG_MASK masks of bits that are to be preserved across
|
||||
* pgprot changes
|
||||
@@ -136,20 +128,13 @@
|
||||
|
||||
#define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \
|
||||
H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
|
||||
/*
|
||||
* Mask of bits returned by pte_pgprot()
|
||||
*/
|
||||
#define PAGE_PROT_BITS (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \
|
||||
H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \
|
||||
_PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
|
||||
_PAGE_SOFT_DIRTY | H_PTE_PKEY)
|
||||
/*
|
||||
* We define 2 sets of base prot bits, one for basic pages (ie,
|
||||
* cacheable kernel and user pages) and one for non cacheable
|
||||
* pages. We always set _PAGE_COHERENT when SMP is enabled or
|
||||
* the processor might need it for DMA coherency.
|
||||
*/
|
||||
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
|
||||
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
|
||||
#define _PAGE_BASE (_PAGE_BASE_NC)
|
||||
|
||||
/* Permission masks used to generate the __P and __S table,
|
||||
@@ -159,8 +144,6 @@
|
||||
* Write permissions imply read permissions for now (we could make write-only
|
||||
* pages on BookE but we don't bother for now). Execute permission control is
|
||||
* possible on platforms that define _PAGE_EXEC
|
||||
*
|
||||
* Note due to the way vm flags are laid out, the bits are XWR
|
||||
*/
|
||||
#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
|
||||
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
|
||||
@@ -170,24 +153,6 @@
|
||||
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
|
||||
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
|
||||
|
||||
#define __P000 PAGE_NONE
|
||||
#define __P001 PAGE_READONLY
|
||||
#define __P010 PAGE_COPY
|
||||
#define __P011 PAGE_COPY
|
||||
#define __P100 PAGE_READONLY_X
|
||||
#define __P101 PAGE_READONLY_X
|
||||
#define __P110 PAGE_COPY_X
|
||||
#define __P111 PAGE_COPY_X
|
||||
|
||||
#define __S000 PAGE_NONE
|
||||
#define __S001 PAGE_READONLY
|
||||
#define __S010 PAGE_SHARED
|
||||
#define __S011 PAGE_SHARED
|
||||
#define __S100 PAGE_READONLY_X
|
||||
#define __S101 PAGE_READONLY_X
|
||||
#define __S110 PAGE_SHARED_X
|
||||
#define __S111 PAGE_SHARED_X
|
||||
|
||||
/* Permission masks used for kernel mappings */
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
|
||||
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
|
||||
@@ -519,7 +484,11 @@ static inline int pte_special(pte_t pte)
|
||||
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL));
|
||||
}
|
||||
|
||||
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
|
||||
static inline bool pte_exec(pte_t pte)
|
||||
{
|
||||
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_EXEC));
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
|
||||
static inline bool pte_soft_dirty(pte_t pte)
|
||||
@@ -529,12 +498,12 @@ static inline bool pte_soft_dirty(pte_t pte)
|
||||
|
||||
static inline pte_t pte_mksoft_dirty(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SOFT_DIRTY));
|
||||
}
|
||||
|
||||
static inline pte_t pte_clear_soft_dirty(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY);
|
||||
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SOFT_DIRTY));
|
||||
}
|
||||
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
|
||||
|
||||
@@ -555,7 +524,7 @@ static inline pte_t pte_mk_savedwrite(pte_t pte)
|
||||
*/
|
||||
VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) !=
|
||||
cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED));
|
||||
return __pte(pte_val(pte) & ~_PAGE_PRIVILEGED);
|
||||
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
|
||||
}
|
||||
|
||||
#define pte_clear_savedwrite pte_clear_savedwrite
|
||||
@@ -565,14 +534,14 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
|
||||
* Used by KSM subsystem to make a protnone pte readonly.
|
||||
*/
|
||||
VM_BUG_ON(!pte_protnone(pte));
|
||||
return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
|
||||
}
|
||||
#else
|
||||
#define pte_clear_savedwrite pte_clear_savedwrite
|
||||
static inline pte_t pte_clear_savedwrite(pte_t pte)
|
||||
{
|
||||
VM_WARN_ON(1);
|
||||
return __pte(pte_val(pte) & ~_PAGE_WRITE);
|
||||
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
|
||||
}
|
||||
#endif /* CONFIG_NUMA_BALANCING */
|
||||
|
||||
@@ -587,6 +556,11 @@ static inline int pte_present(pte_t pte)
|
||||
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID));
|
||||
}
|
||||
|
||||
static inline bool pte_hw_valid(pte_t pte)
|
||||
{
|
||||
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_MEM_KEYS
|
||||
extern bool arch_pte_access_permitted(u64 pte, bool write, bool execute);
|
||||
#else
|
||||
@@ -596,25 +570,22 @@ static inline bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
|
||||
}
|
||||
#endif /* CONFIG_PPC_MEM_KEYS */
|
||||
|
||||
static inline bool pte_user(pte_t pte)
|
||||
{
|
||||
return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
|
||||
}
|
||||
|
||||
#define pte_access_permitted pte_access_permitted
|
||||
static inline bool pte_access_permitted(pte_t pte, bool write)
|
||||
{
|
||||
unsigned long pteval = pte_val(pte);
|
||||
/* Also check for pte_user */
|
||||
unsigned long clear_pte_bits = _PAGE_PRIVILEGED;
|
||||
/*
|
||||
* _PAGE_READ is needed for any access and will be
|
||||
* cleared for PROT_NONE
|
||||
*/
|
||||
unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_READ;
|
||||
|
||||
if (write)
|
||||
need_pte_bits |= _PAGE_WRITE;
|
||||
|
||||
if ((pteval & need_pte_bits) != need_pte_bits)
|
||||
if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
|
||||
return false;
|
||||
|
||||
if ((pteval & clear_pte_bits) == clear_pte_bits)
|
||||
if (write && !pte_write(pte))
|
||||
return false;
|
||||
|
||||
return arch_pte_access_permitted(pte_val(pte), write, 0);
|
||||
@@ -643,17 +614,32 @@ static inline pte_t pte_wrprotect(pte_t pte)
|
||||
{
|
||||
if (unlikely(pte_savedwrite(pte)))
|
||||
return pte_clear_savedwrite(pte);
|
||||
return __pte(pte_val(pte) & ~_PAGE_WRITE);
|
||||
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
|
||||
}
|
||||
|
||||
static inline pte_t pte_exprotect(pte_t pte)
|
||||
{
|
||||
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_EXEC));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkclean(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
|
||||
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_DIRTY));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkold(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
|
||||
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_ACCESSED));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkexec(pte_t pte)
|
||||
{
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkpte(pte_t pte)
|
||||
{
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
@@ -661,22 +647,22 @@ static inline pte_t pte_mkwrite(pte_t pte)
|
||||
/*
|
||||
* write implies read, hence set both
|
||||
*/
|
||||
return __pte(pte_val(pte) | _PAGE_RW);
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_RW));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkdirty(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkyoung(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_ACCESSED);
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_ACCESSED));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkspecial(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_SPECIAL);
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkhuge(pte_t pte)
|
||||
@@ -686,7 +672,17 @@ static inline pte_t pte_mkhuge(pte_t pte)
|
||||
|
||||
static inline pte_t pte_mkdevmap(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP);
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkprivileged(pte_t pte)
|
||||
{
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkuser(pte_t pte)
|
||||
{
|
||||
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -705,12 +701,8 @@ static inline int pte_devmap(pte_t pte)
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
/* FIXME!! check whether this need to be a conditional */
|
||||
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
|
||||
}
|
||||
|
||||
static inline bool pte_user(pte_t pte)
|
||||
{
|
||||
return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
|
||||
return __pte_raw((pte_raw(pte) & cpu_to_be64(_PAGE_CHG_MASK)) |
|
||||
cpu_to_be64(pgprot_val(newprot)));
|
||||
}
|
||||
|
||||
/* Encode and de-code a swap entry */
|
||||
@@ -741,6 +733,8 @@ static inline bool pte_user(pte_t pte)
|
||||
*/
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
|
||||
#define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
|
||||
#define __pmd_to_swp_entry(pmd) (__pte_to_swp_entry(pmd_pte(pmd)))
|
||||
#define __swp_entry_to_pmd(x) (pte_pmd(__swp_entry_to_pte(x)))
|
||||
|
||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||
#define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
|
||||
@@ -751,7 +745,7 @@ static inline bool pte_user(pte_t pte)
|
||||
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
|
||||
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
|
||||
}
|
||||
|
||||
static inline bool pte_swp_soft_dirty(pte_t pte)
|
||||
@@ -761,7 +755,7 @@ static inline bool pte_swp_soft_dirty(pte_t pte)
|
||||
|
||||
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
|
||||
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_SOFT_DIRTY));
|
||||
}
|
||||
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
|
||||
|
||||
@@ -850,10 +844,10 @@ static inline pgprot_t pgprot_writecombine(pgprot_t prot)
|
||||
*/
|
||||
static inline bool pte_ci(pte_t pte)
|
||||
{
|
||||
unsigned long pte_v = pte_val(pte);
|
||||
__be64 pte_v = pte_raw(pte);
|
||||
|
||||
if (((pte_v & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) ||
|
||||
((pte_v & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT))
|
||||
if (((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_TOLERANT)) ||
|
||||
((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_NON_IDEMPOTENT)))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
@@ -875,8 +869,16 @@ static inline int pmd_none(pmd_t pmd)
|
||||
|
||||
static inline int pmd_present(pmd_t pmd)
|
||||
{
|
||||
/*
|
||||
* A pmd is considerent present if _PAGE_PRESENT is set.
|
||||
* We also need to consider the pmd present which is marked
|
||||
* invalid during a split. Hence we look for _PAGE_INVALID
|
||||
* if we find _PAGE_PRESENT cleared.
|
||||
*/
|
||||
if (pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID))
|
||||
return true;
|
||||
|
||||
return !pmd_none(pmd);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int pmd_bad(pmd_t pmd)
|
||||
@@ -903,7 +905,7 @@ static inline int pud_none(pud_t pud)
|
||||
|
||||
static inline int pud_present(pud_t pud)
|
||||
{
|
||||
return !pud_none(pud);
|
||||
return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
|
||||
}
|
||||
|
||||
extern struct page *pud_page(pud_t pud);
|
||||
@@ -950,7 +952,7 @@ static inline int pgd_none(pgd_t pgd)
|
||||
|
||||
static inline int pgd_present(pgd_t pgd)
|
||||
{
|
||||
return !pgd_none(pgd);
|
||||
return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
|
||||
}
|
||||
|
||||
static inline pte_t pgd_pte(pgd_t pgd)
|
||||
@@ -1020,17 +1022,16 @@ extern struct page *pgd_page(pgd_t pgd);
|
||||
#define pgd_ERROR(e) \
|
||||
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
|
||||
|
||||
static inline int map_kernel_page(unsigned long ea, unsigned long pa,
|
||||
unsigned long flags)
|
||||
static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
|
||||
{
|
||||
if (radix_enabled()) {
|
||||
#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
|
||||
unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
|
||||
WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
|
||||
#endif
|
||||
return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE);
|
||||
return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE);
|
||||
}
|
||||
return hash__map_kernel_page(ea, pa, flags);
|
||||
return hash__map_kernel_page(ea, pa, prot);
|
||||
}
|
||||
|
||||
static inline int __meminit vmemmap_create_mapping(unsigned long start,
|
||||
@@ -1082,6 +1083,12 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd)
|
||||
#define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
|
||||
#define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
|
||||
#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
|
||||
|
||||
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
||||
#define pmd_swp_mksoft_dirty(pmd) pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd)))
|
||||
#define pmd_swp_soft_dirty(pmd) pte_swp_soft_dirty(pmd_pte(pmd))
|
||||
#define pmd_swp_clear_soft_dirty(pmd) pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd)))
|
||||
#endif
|
||||
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
@@ -1127,6 +1134,10 @@ pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
|
||||
return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
|
||||
}
|
||||
|
||||
/*
|
||||
* returns true for pmd migration entries, THP, devmap, hugetlb
|
||||
* But compile time dependent on THP config
|
||||
*/
|
||||
static inline int pmd_large(pmd_t pmd)
|
||||
{
|
||||
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
|
||||
@@ -1161,8 +1172,22 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only returns true for a THP. False for pmd migration entry.
|
||||
* We also need to return true when we come across a pte that
|
||||
* in between a thp split. While splitting THP, we mark the pmd
|
||||
* invalid (pmdp_invalidate()) before we set it with pte page
|
||||
* address. A pmd_trans_huge() check against a pmd entry during that time
|
||||
* should return true.
|
||||
* We should not call this on a hugetlb entry. We should check for HugeTLB
|
||||
* entry using vma->vm_flags
|
||||
* The page table walk rule is explained in Documentation/vm/transhuge.rst
|
||||
*/
|
||||
static inline int pmd_trans_huge(pmd_t pmd)
|
||||
{
|
||||
if (!pmd_present(pmd))
|
||||
return false;
|
||||
|
||||
if (radix_enabled())
|
||||
return radix__pmd_trans_huge(pmd);
|
||||
return hash__pmd_trans_huge(pmd);
|
||||
|
Reference in New Issue
Block a user