Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "There are a couple of new things for s390 with this merge request: - a new scheduling domain "drawer" is added to reflect the unusual topology found on z13 machines. Performance tests showed up to 8 percent gain with the additional domain. - the new crc-32 checksum crypto module uses the vector-galois-field multiply and sum SIMD instruction to speed up crc-32 and crc-32c. - proper __ro_after_init support, this requires RO_AFTER_INIT_DATA in the generic vmlinux.lds linker script definitions. - kcov instrumentation support. A prerequisite for that is the inline assembly basic block cleanup, which is the reason for the net/iucv/iucv.c change. - support for 2GB pages is added to the hugetlbfs backend. Then there are two removals: - the oprofile hardware sampling support is dead code and is removed. The oprofile user space uses the perf interface nowadays. - the ETR clock synchronization is removed, this has been superseeded be the STP clock synchronization. And it always has been "interesting" code.. And the usual bug fixes and cleanups" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (82 commits) s390/pci: Delete an unnecessary check before the function call "pci_dev_put" s390/smp: clean up a condition s390/cio/chp : Remove deprecated create_singlethread_workqueue s390/chsc: improve channel path descriptor determination s390/chsc: sanitize fmt check for chp_desc determination s390/cio: make fmt1 channel path descriptor optional s390/chsc: fix ioctl CHSC_INFO_CU command s390/cio/device_ops: fix kernel doc s390/cio: allow to reset channel measurement block s390/console: Make preferred console handling more consistent s390/mm: fix gmap tlb flush issues s390/mm: add support for 2GB hugepages s390: have unique symbol for __switch_to address s390/cpuinfo: show maximum thread id s390/ptrace: clarify bits in the per_struct s390: stack address vs thread_info s390: remove pointless load within __switch_to s390: enable kcov support s390/cpumf: use basic block for ecctr inline assembly s390/hypfs: use basic block for diag inline assembly ...
This commit is contained in:
@@ -157,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (!pud_none(*pud))
|
||||
if (pud_large(*pud)) {
|
||||
prot = pud_val(*pud) & _REGION3_ENTRY_RO;
|
||||
prot = pud_val(*pud) & _REGION_ENTRY_PROTECT;
|
||||
note_page(m, st, prot, 2);
|
||||
} else
|
||||
walk_pmd_level(m, st, pud, addr);
|
||||
|
@@ -624,7 +624,7 @@ void pfault_fini(void)
|
||||
diag_stat_inc(DIAG_STAT_X258);
|
||||
asm volatile(
|
||||
" diag %0,0,0x258\n"
|
||||
"0:\n"
|
||||
"0: nopr %%r7\n"
|
||||
EX_TABLE(0b,0b)
|
||||
: : "a" (&refbk), "m" (refbk) : "cc");
|
||||
}
|
||||
|
@@ -85,7 +85,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc);
|
||||
static void gmap_flush_tlb(struct gmap *gmap)
|
||||
{
|
||||
if (MACHINE_HAS_IDTE)
|
||||
__tlb_flush_asce(gmap->mm, gmap->asce);
|
||||
__tlb_flush_idte(gmap->asce);
|
||||
else
|
||||
__tlb_flush_global();
|
||||
}
|
||||
@@ -124,7 +124,7 @@ void gmap_free(struct gmap *gmap)
|
||||
|
||||
/* Flush tlb. */
|
||||
if (MACHINE_HAS_IDTE)
|
||||
__tlb_flush_asce(gmap->mm, gmap->asce);
|
||||
__tlb_flush_idte(gmap->asce);
|
||||
else
|
||||
__tlb_flush_global();
|
||||
|
||||
@@ -430,6 +430,9 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
|
||||
VM_BUG_ON(pgd_none(*pgd));
|
||||
pud = pud_offset(pgd, vmaddr);
|
||||
VM_BUG_ON(pud_none(*pud));
|
||||
/* large puds cannot yet be handled */
|
||||
if (pud_large(*pud))
|
||||
return -EFAULT;
|
||||
pmd = pmd_offset(pud, vmaddr);
|
||||
VM_BUG_ON(pmd_none(*pmd));
|
||||
/* large pmds cannot yet be handled */
|
||||
|
@@ -128,6 +128,44 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
struct page *head, *page;
|
||||
unsigned long mask;
|
||||
int refs;
|
||||
|
||||
mask = (write ? _REGION_ENTRY_PROTECT : 0) | _REGION_ENTRY_INVALID;
|
||||
if ((pud_val(pud) & mask) != 0)
|
||||
return 0;
|
||||
VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
|
||||
|
||||
refs = 0;
|
||||
head = pud_page(pud);
|
||||
page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
|
||||
do {
|
||||
VM_BUG_ON_PAGE(compound_head(page) != head, page);
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
page++;
|
||||
refs++;
|
||||
} while (addr += PAGE_SIZE, addr != end);
|
||||
|
||||
if (!page_cache_add_speculative(head, refs)) {
|
||||
*nr -= refs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (unlikely(pud_val(pud) != pud_val(*pudp))) {
|
||||
*nr -= refs;
|
||||
while (refs--)
|
||||
put_page(head);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
@@ -144,7 +182,12 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(pud))
|
||||
return 0;
|
||||
if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
|
||||
if (unlikely(pud_large(pud))) {
|
||||
if (!gup_huge_pud(pudp, pud, addr, next, write, pages,
|
||||
nr))
|
||||
return 0;
|
||||
} else if (!gup_pmd_range(pudp, pud, addr, next, write, pages,
|
||||
nr))
|
||||
return 0;
|
||||
} while (pudp++, addr = next, addr != end);
|
||||
|
||||
|
@@ -1,19 +1,22 @@
|
||||
/*
|
||||
* IBM System z Huge TLB Page Support for Kernel.
|
||||
*
|
||||
* Copyright IBM Corp. 2007
|
||||
* Copyright IBM Corp. 2007,2016
|
||||
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "hugetlb"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
|
||||
static inline pmd_t __pte_to_pmd(pte_t pte)
|
||||
static inline unsigned long __pte_to_rste(pte_t pte)
|
||||
{
|
||||
pmd_t pmd;
|
||||
unsigned long rste;
|
||||
|
||||
/*
|
||||
* Convert encoding pte bits pmd bits
|
||||
* Convert encoding pte bits pmd / pud bits
|
||||
* lIR.uswrdy.p dy..R...I...wr
|
||||
* empty 010.000000.0 -> 00..0...1...00
|
||||
* prot-none, clean, old 111.000000.1 -> 00..1...1...00
|
||||
@@ -33,25 +36,31 @@ static inline pmd_t __pte_to_pmd(pte_t pte)
|
||||
* u unused, l large
|
||||
*/
|
||||
if (pte_present(pte)) {
|
||||
pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_READ) >> 4;
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_WRITE) >> 4;
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_INVALID) >> 5;
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT);
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_SOFT_DIRTY) << 13;
|
||||
rste = pte_val(pte) & PAGE_MASK;
|
||||
rste |= (pte_val(pte) & _PAGE_READ) >> 4;
|
||||
rste |= (pte_val(pte) & _PAGE_WRITE) >> 4;
|
||||
rste |= (pte_val(pte) & _PAGE_INVALID) >> 5;
|
||||
rste |= (pte_val(pte) & _PAGE_PROTECT);
|
||||
rste |= (pte_val(pte) & _PAGE_DIRTY) << 10;
|
||||
rste |= (pte_val(pte) & _PAGE_YOUNG) << 10;
|
||||
rste |= (pte_val(pte) & _PAGE_SOFT_DIRTY) << 13;
|
||||
} else
|
||||
pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
|
||||
return pmd;
|
||||
rste = _SEGMENT_ENTRY_INVALID;
|
||||
return rste;
|
||||
}
|
||||
|
||||
static inline pte_t __pmd_to_pte(pmd_t pmd)
|
||||
static inline pte_t __rste_to_pte(unsigned long rste)
|
||||
{
|
||||
int present;
|
||||
pte_t pte;
|
||||
|
||||
if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
||||
present = pud_present(__pud(rste));
|
||||
else
|
||||
present = pmd_present(__pmd(rste));
|
||||
|
||||
/*
|
||||
* Convert encoding pmd bits pte bits
|
||||
* Convert encoding pmd / pud bits pte bits
|
||||
* dy..R...I...wr lIR.uswrdy.p
|
||||
* empty 00..0...1...00 -> 010.000000.0
|
||||
* prot-none, clean, old 00..1...1...00 -> 111.000000.1
|
||||
@@ -70,16 +79,16 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
|
||||
* SW-bits: p present, y young, d dirty, r read, w write, s special,
|
||||
* u unused, l large
|
||||
*/
|
||||
if (pmd_present(pmd)) {
|
||||
pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE;
|
||||
if (present) {
|
||||
pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
|
||||
pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
|
||||
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_READ) << 4;
|
||||
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4;
|
||||
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5;
|
||||
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
|
||||
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10;
|
||||
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10;
|
||||
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY) >> 13;
|
||||
pte_val(pte) |= (rste & _SEGMENT_ENTRY_READ) << 4;
|
||||
pte_val(pte) |= (rste & _SEGMENT_ENTRY_WRITE) << 4;
|
||||
pte_val(pte) |= (rste & _SEGMENT_ENTRY_INVALID) << 5;
|
||||
pte_val(pte) |= (rste & _SEGMENT_ENTRY_PROTECT);
|
||||
pte_val(pte) |= (rste & _SEGMENT_ENTRY_DIRTY) >> 10;
|
||||
pte_val(pte) |= (rste & _SEGMENT_ENTRY_YOUNG) >> 10;
|
||||
pte_val(pte) |= (rste & _SEGMENT_ENTRY_SOFT_DIRTY) >> 13;
|
||||
} else
|
||||
pte_val(pte) = _PAGE_INVALID;
|
||||
return pte;
|
||||
@@ -88,27 +97,33 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
|
||||
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
pmd_t pmd = __pte_to_pmd(pte);
|
||||
unsigned long rste = __pte_to_rste(pte);
|
||||
|
||||
pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
|
||||
*(pmd_t *) ptep = pmd;
|
||||
/* Set correct table type for 2G hugepages */
|
||||
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
||||
rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
|
||||
else
|
||||
rste |= _SEGMENT_ENTRY_LARGE;
|
||||
pte_val(*ptep) = rste;
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
pmd_t pmd = *(pmd_t *) ptep;
|
||||
|
||||
return __pmd_to_pte(pmd);
|
||||
return __rste_to_pte(pte_val(*ptep));
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = huge_ptep_get(ptep);
|
||||
pmd_t *pmdp = (pmd_t *) ptep;
|
||||
pmd_t old;
|
||||
pud_t *pudp = (pud_t *) ptep;
|
||||
|
||||
old = pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
|
||||
return __pmd_to_pte(old);
|
||||
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
||||
pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
|
||||
else
|
||||
pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
|
||||
return pte;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
@@ -120,8 +135,12 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
pudp = pud_alloc(mm, pgdp, addr);
|
||||
if (pudp)
|
||||
pmdp = pmd_alloc(mm, pudp, addr);
|
||||
if (pudp) {
|
||||
if (sz == PUD_SIZE)
|
||||
return (pte_t *) pudp;
|
||||
else if (sz == PMD_SIZE)
|
||||
pmdp = pmd_alloc(mm, pudp, addr);
|
||||
}
|
||||
return (pte_t *) pmdp;
|
||||
}
|
||||
|
||||
@@ -134,8 +153,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
if (pgd_present(*pgdp)) {
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
if (pud_present(*pudp))
|
||||
if (pud_present(*pudp)) {
|
||||
if (pud_large(*pudp))
|
||||
return (pte_t *) pudp;
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
}
|
||||
}
|
||||
return (pte_t *) pmdp;
|
||||
}
|
||||
@@ -147,5 +169,34 @@ int pmd_huge(pmd_t pmd)
|
||||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
return 0;
|
||||
return pud_large(pud);
|
||||
}
|
||||
|
||||
struct page *
|
||||
follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
||||
pud_t *pud, int flags)
|
||||
{
|
||||
if (flags & FOLL_GET)
|
||||
return NULL;
|
||||
|
||||
return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static __init int setup_hugepagesz(char *opt)
|
||||
{
|
||||
unsigned long size;
|
||||
char *string = opt;
|
||||
|
||||
size = memparse(opt, &opt);
|
||||
if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) {
|
||||
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
|
||||
} else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) {
|
||||
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
||||
} else {
|
||||
pr_err("hugepagesz= specifies an unsupported page size %s\n",
|
||||
string);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
__setup("hugepagesz=", setup_hugepagesz);
|
||||
|
@@ -40,7 +40,7 @@
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/sclp.h>
|
||||
|
||||
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
|
||||
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
|
||||
|
||||
unsigned long empty_zero_page, zero_page_mask;
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
@@ -111,17 +111,16 @@ void __init paging_init(void)
|
||||
|
||||
void mark_rodata_ro(void)
|
||||
{
|
||||
/* Text and rodata are already protected. Nothing to do here. */
|
||||
pr_info("Write protecting the kernel read-only data: %luk\n",
|
||||
((unsigned long)&_eshared - (unsigned long)&_stext) >> 10);
|
||||
unsigned long size = __end_ro_after_init - __start_ro_after_init;
|
||||
|
||||
set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
|
||||
pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
if (MACHINE_HAS_TLB_LC)
|
||||
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
|
||||
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
|
||||
cpumask_set_cpu(0, mm_cpumask(&init_mm));
|
||||
atomic_set(&init_mm.context.attach_count, 1);
|
||||
|
||||
set_max_mapnr(max_low_pfn);
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
|
@@ -34,20 +34,25 @@ static int __init cmma(char *str)
|
||||
}
|
||||
__setup("cmma=", cmma);
|
||||
|
||||
void __init cmma_init(void)
|
||||
static inline int cmma_test_essa(void)
|
||||
{
|
||||
register unsigned long tmp asm("0") = 0;
|
||||
register int rc asm("1") = -EOPNOTSUPP;
|
||||
|
||||
if (!cmma_flag)
|
||||
return;
|
||||
asm volatile(
|
||||
" .insn rrf,0xb9ab0000,%1,%1,0,0\n"
|
||||
"0: la %0,0\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+&d" (rc), "+&d" (tmp));
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void __init cmma_init(void)
|
||||
{
|
||||
if (!cmma_flag)
|
||||
return;
|
||||
if (cmma_test_essa())
|
||||
cmma_flag = 0;
|
||||
}
|
||||
|
||||
|
@@ -10,7 +10,6 @@
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#if PAGE_DEFAULT_KEY
|
||||
static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
|
||||
{
|
||||
asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
|
||||
@@ -22,6 +21,8 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long boundary, size;
|
||||
|
||||
if (!PAGE_DEFAULT_KEY)
|
||||
return;
|
||||
while (start < end) {
|
||||
if (MACHINE_HAS_EDAT1) {
|
||||
/* set storage keys for a 1MB frame */
|
||||
@@ -38,56 +39,254 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static pte_t *walk_page_table(unsigned long addr)
|
||||
#ifdef CONFIG_PROC_FS
|
||||
atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
|
||||
|
||||
void arch_report_meminfo(struct seq_file *m)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
seq_printf(m, "DirectMap4k: %8lu kB\n",
|
||||
atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
|
||||
seq_printf(m, "DirectMap1M: %8lu kB\n",
|
||||
atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
|
||||
seq_printf(m, "DirectMap2G: %8lu kB\n",
|
||||
atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
|
||||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
pgdp = pgd_offset_k(addr);
|
||||
if (pgd_none(*pgdp))
|
||||
return NULL;
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
if (pud_none(*pudp) || pud_large(*pudp))
|
||||
return NULL;
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
if (pmd_none(*pmdp) || pmd_large(*pmdp))
|
||||
return NULL;
|
||||
ptep = pte_offset_kernel(pmdp, addr);
|
||||
if (pte_none(*ptep))
|
||||
return NULL;
|
||||
return ptep;
|
||||
static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
|
||||
unsigned long dtt)
|
||||
{
|
||||
unsigned long table, mask;
|
||||
|
||||
mask = 0;
|
||||
if (MACHINE_HAS_EDAT2) {
|
||||
switch (dtt) {
|
||||
case CRDTE_DTT_REGION3:
|
||||
mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1);
|
||||
break;
|
||||
case CRDTE_DTT_SEGMENT:
|
||||
mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
|
||||
break;
|
||||
case CRDTE_DTT_PAGE:
|
||||
mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
|
||||
break;
|
||||
}
|
||||
table = (unsigned long)old & mask;
|
||||
crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
|
||||
} else if (MACHINE_HAS_IDTE) {
|
||||
cspg(old, *old, new);
|
||||
} else {
|
||||
csp((unsigned int *)old + 1, *old, new);
|
||||
}
|
||||
}
|
||||
|
||||
static void change_page_attr(unsigned long addr, int numpages,
|
||||
pte_t (*set) (pte_t))
|
||||
{
|
||||
pte_t *ptep;
|
||||
int i;
|
||||
struct cpa {
|
||||
unsigned int set_ro : 1;
|
||||
unsigned int clear_ro : 1;
|
||||
};
|
||||
|
||||
for (i = 0; i < numpages; i++) {
|
||||
ptep = walk_page_table(addr);
|
||||
if (WARN_ON_ONCE(!ptep))
|
||||
break;
|
||||
*ptep = set(*ptep);
|
||||
static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
||||
struct cpa cpa)
|
||||
{
|
||||
pte_t *ptep, new;
|
||||
|
||||
ptep = pte_offset(pmdp, addr);
|
||||
do {
|
||||
if (pte_none(*ptep))
|
||||
return -EINVAL;
|
||||
if (cpa.set_ro)
|
||||
new = pte_wrprotect(*ptep);
|
||||
else if (cpa.clear_ro)
|
||||
new = pte_mkwrite(pte_mkdirty(*ptep));
|
||||
pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
|
||||
ptep++;
|
||||
addr += PAGE_SIZE;
|
||||
cond_resched();
|
||||
} while (addr < end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
|
||||
{
|
||||
unsigned long pte_addr, prot;
|
||||
pte_t *pt_dir, *ptep;
|
||||
pmd_t new;
|
||||
int i, ro;
|
||||
|
||||
pt_dir = vmem_pte_alloc();
|
||||
if (!pt_dir)
|
||||
return -ENOMEM;
|
||||
pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
|
||||
ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
|
||||
prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
|
||||
ptep = pt_dir;
|
||||
for (i = 0; i < PTRS_PER_PTE; i++) {
|
||||
pte_val(*ptep) = pte_addr | prot;
|
||||
pte_addr += PAGE_SIZE;
|
||||
ptep++;
|
||||
}
|
||||
__tlb_flush_kernel();
|
||||
pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY;
|
||||
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
|
||||
update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
|
||||
update_page_count(PG_DIRECT_MAP_1M, -1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void modify_pmd_page(pmd_t *pmdp, unsigned long addr, struct cpa cpa)
|
||||
{
|
||||
pmd_t new;
|
||||
|
||||
if (cpa.set_ro)
|
||||
new = pmd_wrprotect(*pmdp);
|
||||
else if (cpa.clear_ro)
|
||||
new = pmd_mkwrite(pmd_mkdirty(*pmdp));
|
||||
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
|
||||
}
|
||||
|
||||
static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
|
||||
struct cpa cpa)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmdp;
|
||||
int rc = 0;
|
||||
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
do {
|
||||
if (pmd_none(*pmdp))
|
||||
return -EINVAL;
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_large(*pmdp)) {
|
||||
if (addr & ~PMD_MASK || addr + PMD_SIZE > next) {
|
||||
rc = split_pmd_page(pmdp, addr);
|
||||
if (rc)
|
||||
return rc;
|
||||
continue;
|
||||
}
|
||||
modify_pmd_page(pmdp, addr, cpa);
|
||||
} else {
|
||||
rc = walk_pte_level(pmdp, addr, next, cpa);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
pmdp++;
|
||||
addr = next;
|
||||
cond_resched();
|
||||
} while (addr < end);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int split_pud_page(pud_t *pudp, unsigned long addr)
|
||||
{
|
||||
unsigned long pmd_addr, prot;
|
||||
pmd_t *pm_dir, *pmdp;
|
||||
pud_t new;
|
||||
int i, ro;
|
||||
|
||||
pm_dir = vmem_pmd_alloc();
|
||||
if (!pm_dir)
|
||||
return -ENOMEM;
|
||||
pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
|
||||
ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
|
||||
prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
|
||||
pmdp = pm_dir;
|
||||
for (i = 0; i < PTRS_PER_PMD; i++) {
|
||||
pmd_val(*pmdp) = pmd_addr | prot;
|
||||
pmd_addr += PMD_SIZE;
|
||||
pmdp++;
|
||||
}
|
||||
pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY;
|
||||
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
|
||||
update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
|
||||
update_page_count(PG_DIRECT_MAP_2G, -1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void modify_pud_page(pud_t *pudp, unsigned long addr, struct cpa cpa)
|
||||
{
|
||||
pud_t new;
|
||||
|
||||
if (cpa.set_ro)
|
||||
new = pud_wrprotect(*pudp);
|
||||
else if (cpa.clear_ro)
|
||||
new = pud_mkwrite(pud_mkdirty(*pudp));
|
||||
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
|
||||
}
|
||||
|
||||
static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
struct cpa cpa)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t *pudp;
|
||||
int rc = 0;
|
||||
|
||||
pudp = pud_offset(pgd, addr);
|
||||
do {
|
||||
if (pud_none(*pudp))
|
||||
return -EINVAL;
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_large(*pudp)) {
|
||||
if (addr & ~PUD_MASK || addr + PUD_SIZE > next) {
|
||||
rc = split_pud_page(pudp, addr);
|
||||
if (rc)
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
modify_pud_page(pudp, addr, cpa);
|
||||
} else {
|
||||
rc = walk_pmd_level(pudp, addr, next, cpa);
|
||||
}
|
||||
pudp++;
|
||||
addr = next;
|
||||
cond_resched();
|
||||
} while (addr < end && !rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(cpa_mutex);
|
||||
|
||||
static int change_page_attr(unsigned long addr, unsigned long end,
|
||||
struct cpa cpa)
|
||||
{
|
||||
unsigned long next;
|
||||
int rc = -EINVAL;
|
||||
pgd_t *pgdp;
|
||||
|
||||
if (end >= MODULES_END)
|
||||
return -EINVAL;
|
||||
mutex_lock(&cpa_mutex);
|
||||
pgdp = pgd_offset_k(addr);
|
||||
do {
|
||||
if (pgd_none(*pgdp))
|
||||
break;
|
||||
next = pgd_addr_end(addr, end);
|
||||
rc = walk_pud_level(pgdp, addr, next, cpa);
|
||||
if (rc)
|
||||
break;
|
||||
cond_resched();
|
||||
} while (pgdp++, addr = next, addr < end && !rc);
|
||||
mutex_unlock(&cpa_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int set_memory_ro(unsigned long addr, int numpages)
|
||||
{
|
||||
change_page_attr(addr, numpages, pte_wrprotect);
|
||||
return 0;
|
||||
struct cpa cpa = {
|
||||
.set_ro = 1,
|
||||
};
|
||||
|
||||
addr &= PAGE_MASK;
|
||||
return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
|
||||
}
|
||||
|
||||
int set_memory_rw(unsigned long addr, int numpages)
|
||||
{
|
||||
change_page_attr(addr, numpages, pte_mkwrite);
|
||||
return 0;
|
||||
struct cpa cpa = {
|
||||
.clear_ro = 1,
|
||||
};
|
||||
|
||||
addr &= PAGE_MASK;
|
||||
return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
|
||||
}
|
||||
|
||||
/* not possible */
|
||||
@@ -138,7 +337,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
nr = min(numpages - i, nr);
|
||||
if (enable) {
|
||||
for (j = 0; j < nr; j++) {
|
||||
pte_val(*pte) = __pa(address);
|
||||
pte_val(*pte) = address | pgprot_val(PAGE_KERNEL);
|
||||
address += PAGE_SIZE;
|
||||
pte++;
|
||||
}
|
||||
|
@@ -27,40 +27,37 @@
|
||||
static inline pte_t ptep_flush_direct(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
int active, count;
|
||||
pte_t old;
|
||||
|
||||
old = *ptep;
|
||||
if (unlikely(pte_val(old) & _PAGE_INVALID))
|
||||
return old;
|
||||
active = (mm == current->active_mm) ? 1 : 0;
|
||||
count = atomic_add_return(0x10000, &mm->context.attach_count);
|
||||
if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
|
||||
atomic_inc(&mm->context.flush_count);
|
||||
if (MACHINE_HAS_TLB_LC &&
|
||||
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
|
||||
__ptep_ipte_local(addr, ptep);
|
||||
else
|
||||
__ptep_ipte(addr, ptep);
|
||||
atomic_sub(0x10000, &mm->context.attach_count);
|
||||
atomic_dec(&mm->context.flush_count);
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
int active, count;
|
||||
pte_t old;
|
||||
|
||||
old = *ptep;
|
||||
if (unlikely(pte_val(old) & _PAGE_INVALID))
|
||||
return old;
|
||||
active = (mm == current->active_mm) ? 1 : 0;
|
||||
count = atomic_add_return(0x10000, &mm->context.attach_count);
|
||||
if ((count & 0xffff) <= active) {
|
||||
atomic_inc(&mm->context.flush_count);
|
||||
if (cpumask_equal(&mm->context.cpu_attach_mask,
|
||||
cpumask_of(smp_processor_id()))) {
|
||||
pte_val(*ptep) |= _PAGE_INVALID;
|
||||
mm->context.flush_mm = 1;
|
||||
} else
|
||||
__ptep_ipte(addr, ptep);
|
||||
atomic_sub(0x10000, &mm->context.attach_count);
|
||||
atomic_dec(&mm->context.flush_count);
|
||||
return old;
|
||||
}
|
||||
|
||||
@@ -70,7 +67,6 @@ static inline pgste_t pgste_get_lock(pte_t *ptep)
|
||||
#ifdef CONFIG_PGSTE
|
||||
unsigned long old;
|
||||
|
||||
preempt_disable();
|
||||
asm(
|
||||
" lg %0,%2\n"
|
||||
"0: lgr %1,%0\n"
|
||||
@@ -93,7 +89,6 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
|
||||
: "=Q" (ptep[PTRS_PER_PTE])
|
||||
: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
|
||||
: "cc", "memory");
|
||||
preempt_enable();
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -230,9 +225,11 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
||||
pgste_t pgste;
|
||||
pte_t old;
|
||||
|
||||
preempt_disable();
|
||||
pgste = ptep_xchg_start(mm, addr, ptep);
|
||||
old = ptep_flush_direct(mm, addr, ptep);
|
||||
ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
||||
preempt_enable();
|
||||
return old;
|
||||
}
|
||||
EXPORT_SYMBOL(ptep_xchg_direct);
|
||||
@@ -243,9 +240,11 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
|
||||
pgste_t pgste;
|
||||
pte_t old;
|
||||
|
||||
preempt_disable();
|
||||
pgste = ptep_xchg_start(mm, addr, ptep);
|
||||
old = ptep_flush_lazy(mm, addr, ptep);
|
||||
ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
||||
preempt_enable();
|
||||
return old;
|
||||
}
|
||||
EXPORT_SYMBOL(ptep_xchg_lazy);
|
||||
@@ -256,6 +255,7 @@ pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
|
||||
pgste_t pgste;
|
||||
pte_t old;
|
||||
|
||||
preempt_disable();
|
||||
pgste = ptep_xchg_start(mm, addr, ptep);
|
||||
old = ptep_flush_lazy(mm, addr, ptep);
|
||||
if (mm_has_pgste(mm)) {
|
||||
@@ -279,13 +279,13 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
|
||||
} else {
|
||||
*ptep = pte;
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(ptep_modify_prot_commit);
|
||||
|
||||
static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
|
||||
unsigned long addr, pmd_t *pmdp)
|
||||
{
|
||||
int active, count;
|
||||
pmd_t old;
|
||||
|
||||
old = *pmdp;
|
||||
@@ -295,36 +295,34 @@ static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
|
||||
__pmdp_csp(pmdp);
|
||||
return old;
|
||||
}
|
||||
active = (mm == current->active_mm) ? 1 : 0;
|
||||
count = atomic_add_return(0x10000, &mm->context.attach_count);
|
||||
if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
|
||||
atomic_inc(&mm->context.flush_count);
|
||||
if (MACHINE_HAS_TLB_LC &&
|
||||
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
|
||||
__pmdp_idte_local(addr, pmdp);
|
||||
else
|
||||
__pmdp_idte(addr, pmdp);
|
||||
atomic_sub(0x10000, &mm->context.attach_count);
|
||||
atomic_dec(&mm->context.flush_count);
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
|
||||
unsigned long addr, pmd_t *pmdp)
|
||||
{
|
||||
int active, count;
|
||||
pmd_t old;
|
||||
|
||||
old = *pmdp;
|
||||
if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
|
||||
return old;
|
||||
active = (mm == current->active_mm) ? 1 : 0;
|
||||
count = atomic_add_return(0x10000, &mm->context.attach_count);
|
||||
if ((count & 0xffff) <= active) {
|
||||
atomic_inc(&mm->context.flush_count);
|
||||
if (cpumask_equal(&mm->context.cpu_attach_mask,
|
||||
cpumask_of(smp_processor_id()))) {
|
||||
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
|
||||
mm->context.flush_mm = 1;
|
||||
} else if (MACHINE_HAS_IDTE)
|
||||
__pmdp_idte(addr, pmdp);
|
||||
else
|
||||
__pmdp_csp(pmdp);
|
||||
atomic_sub(0x10000, &mm->context.attach_count);
|
||||
atomic_dec(&mm->context.flush_count);
|
||||
return old;
|
||||
}
|
||||
|
||||
@@ -333,8 +331,10 @@ pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
||||
{
|
||||
pmd_t old;
|
||||
|
||||
preempt_disable();
|
||||
old = pmdp_flush_direct(mm, addr, pmdp);
|
||||
*pmdp = new;
|
||||
preempt_enable();
|
||||
return old;
|
||||
}
|
||||
EXPORT_SYMBOL(pmdp_xchg_direct);
|
||||
@@ -344,12 +344,53 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
|
||||
{
|
||||
pmd_t old;
|
||||
|
||||
preempt_disable();
|
||||
old = pmdp_flush_lazy(mm, addr, pmdp);
|
||||
*pmdp = new;
|
||||
preempt_enable();
|
||||
return old;
|
||||
}
|
||||
EXPORT_SYMBOL(pmdp_xchg_lazy);
|
||||
|
||||
static inline pud_t pudp_flush_direct(struct mm_struct *mm,
|
||||
unsigned long addr, pud_t *pudp)
|
||||
{
|
||||
pud_t old;
|
||||
|
||||
old = *pudp;
|
||||
if (pud_val(old) & _REGION_ENTRY_INVALID)
|
||||
return old;
|
||||
if (!MACHINE_HAS_IDTE) {
|
||||
/*
|
||||
* Invalid bit position is the same for pmd and pud, so we can
|
||||
* re-use _pmd_csp() here
|
||||
*/
|
||||
__pmdp_csp((pmd_t *) pudp);
|
||||
return old;
|
||||
}
|
||||
atomic_inc(&mm->context.flush_count);
|
||||
if (MACHINE_HAS_TLB_LC &&
|
||||
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
|
||||
__pudp_idte_local(addr, pudp);
|
||||
else
|
||||
__pudp_idte(addr, pudp);
|
||||
atomic_dec(&mm->context.flush_count);
|
||||
return old;
|
||||
}
|
||||
|
||||
pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
||||
pud_t *pudp, pud_t new)
|
||||
{
|
||||
pud_t old;
|
||||
|
||||
preempt_disable();
|
||||
old = pudp_flush_direct(mm, addr, pudp);
|
||||
*pudp = new;
|
||||
preempt_enable();
|
||||
return old;
|
||||
}
|
||||
EXPORT_SYMBOL(pudp_xchg_direct);
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
||||
pgtable_t pgtable)
|
||||
@@ -398,20 +439,24 @@ void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pgste_t pgste;
|
||||
|
||||
/* the mm_has_pgste() check is done in set_pte_at() */
|
||||
preempt_disable();
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
|
||||
pgste_set_key(ptep, pgste, entry, mm);
|
||||
pgste = pgste_set_pte(ptep, pgste, entry);
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pgste_t pgste;
|
||||
|
||||
preempt_disable();
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste_val(pgste) |= PGSTE_IN_BIT;
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
|
||||
@@ -434,6 +479,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t pte;
|
||||
|
||||
/* Zap unused and logically-zero pages */
|
||||
preempt_disable();
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgstev = pgste_val(pgste);
|
||||
pte = *ptep;
|
||||
@@ -446,6 +492,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
|
||||
if (reset)
|
||||
pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
@@ -454,6 +501,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
pgste_t pgste;
|
||||
|
||||
/* Clear storage key */
|
||||
preempt_disable();
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
|
||||
PGSTE_GR_BIT | PGSTE_GC_BIT);
|
||||
@@ -461,6 +509,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
|
||||
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -11,6 +11,7 @@
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/setup.h>
|
||||
@@ -29,9 +30,11 @@ static LIST_HEAD(mem_segs);
|
||||
|
||||
static void __ref *vmem_alloc_pages(unsigned int order)
|
||||
{
|
||||
unsigned long size = PAGE_SIZE << order;
|
||||
|
||||
if (slab_is_available())
|
||||
return (void *)__get_free_pages(GFP_KERNEL, order);
|
||||
return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
|
||||
return alloc_bootmem_align(size, size);
|
||||
}
|
||||
|
||||
static inline pud_t *vmem_pud_alloc(void)
|
||||
@@ -45,7 +48,7 @@ static inline pud_t *vmem_pud_alloc(void)
|
||||
return pud;
|
||||
}
|
||||
|
||||
static inline pmd_t *vmem_pmd_alloc(void)
|
||||
pmd_t *vmem_pmd_alloc(void)
|
||||
{
|
||||
pmd_t *pmd = NULL;
|
||||
|
||||
@@ -56,7 +59,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
|
||||
return pmd;
|
||||
}
|
||||
|
||||
static pte_t __ref *vmem_pte_alloc(void)
|
||||
pte_t __ref *vmem_pte_alloc(void)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
@@ -75,8 +78,9 @@ static pte_t __ref *vmem_pte_alloc(void)
|
||||
/*
|
||||
* Add a physical memory range to the 1:1 mapping.
|
||||
*/
|
||||
static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
|
||||
static int vmem_add_mem(unsigned long start, unsigned long size)
|
||||
{
|
||||
unsigned long pages4k, pages1m, pages2g;
|
||||
unsigned long end = start + size;
|
||||
unsigned long address = start;
|
||||
pgd_t *pg_dir;
|
||||
@@ -85,6 +89,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
|
||||
pte_t *pt_dir;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
pages4k = pages1m = pages2g = 0;
|
||||
while (address < end) {
|
||||
pg_dir = pgd_offset_k(address);
|
||||
if (pgd_none(*pg_dir)) {
|
||||
@@ -97,10 +102,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
|
||||
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
|
||||
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
|
||||
!debug_pagealloc_enabled()) {
|
||||
pud_val(*pu_dir) = __pa(address) |
|
||||
_REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
|
||||
(ro ? _REGION_ENTRY_PROTECT : 0);
|
||||
pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
|
||||
address += PUD_SIZE;
|
||||
pages2g++;
|
||||
continue;
|
||||
}
|
||||
if (pud_none(*pu_dir)) {
|
||||
@@ -113,11 +117,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
|
||||
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
|
||||
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
|
||||
!debug_pagealloc_enabled()) {
|
||||
pmd_val(*pm_dir) = __pa(address) |
|
||||
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
|
||||
_SEGMENT_ENTRY_YOUNG |
|
||||
(ro ? _SEGMENT_ENTRY_PROTECT : 0);
|
||||
pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
|
||||
address += PMD_SIZE;
|
||||
pages1m++;
|
||||
continue;
|
||||
}
|
||||
if (pmd_none(*pm_dir)) {
|
||||
@@ -128,12 +130,15 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
|
||||
}
|
||||
|
||||
pt_dir = pte_offset_kernel(pm_dir, address);
|
||||
pte_val(*pt_dir) = __pa(address) |
|
||||
pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
|
||||
pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL);
|
||||
address += PAGE_SIZE;
|
||||
pages4k++;
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
update_page_count(PG_DIRECT_MAP_4K, pages4k);
|
||||
update_page_count(PG_DIRECT_MAP_1M, pages1m);
|
||||
update_page_count(PG_DIRECT_MAP_2G, pages2g);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -143,15 +148,15 @@ out:
|
||||
*/
|
||||
static void vmem_remove_range(unsigned long start, unsigned long size)
|
||||
{
|
||||
unsigned long pages4k, pages1m, pages2g;
|
||||
unsigned long end = start + size;
|
||||
unsigned long address = start;
|
||||
pgd_t *pg_dir;
|
||||
pud_t *pu_dir;
|
||||
pmd_t *pm_dir;
|
||||
pte_t *pt_dir;
|
||||
pte_t pte;
|
||||
|
||||
pte_val(pte) = _PAGE_INVALID;
|
||||
pages4k = pages1m = pages2g = 0;
|
||||
while (address < end) {
|
||||
pg_dir = pgd_offset_k(address);
|
||||
if (pgd_none(*pg_dir)) {
|
||||
@@ -166,6 +171,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
|
||||
if (pud_large(*pu_dir)) {
|
||||
pud_clear(pu_dir);
|
||||
address += PUD_SIZE;
|
||||
pages2g++;
|
||||
continue;
|
||||
}
|
||||
pm_dir = pmd_offset(pu_dir, address);
|
||||
@@ -176,13 +182,18 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
|
||||
if (pmd_large(*pm_dir)) {
|
||||
pmd_clear(pm_dir);
|
||||
address += PMD_SIZE;
|
||||
pages1m++;
|
||||
continue;
|
||||
}
|
||||
pt_dir = pte_offset_kernel(pm_dir, address);
|
||||
*pt_dir = pte;
|
||||
pte_clear(&init_mm, address, pt_dir);
|
||||
address += PAGE_SIZE;
|
||||
pages4k++;
|
||||
}
|
||||
flush_tlb_kernel_range(start, end);
|
||||
update_page_count(PG_DIRECT_MAP_4K, -pages4k);
|
||||
update_page_count(PG_DIRECT_MAP_1M, -pages1m);
|
||||
update_page_count(PG_DIRECT_MAP_2G, -pages2g);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -341,7 +352,7 @@ int vmem_add_mapping(unsigned long start, unsigned long size)
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
ret = vmem_add_mem(start, size, 0);
|
||||
ret = vmem_add_mem(start, size);
|
||||
if (ret)
|
||||
goto out_remove;
|
||||
goto out;
|
||||
@@ -362,31 +373,13 @@ out:
|
||||
*/
|
||||
void __init vmem_map_init(void)
|
||||
{
|
||||
unsigned long ro_start, ro_end;
|
||||
unsigned long size = _eshared - _stext;
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t start, end;
|
||||
|
||||
ro_start = PFN_ALIGN((unsigned long)&_stext);
|
||||
ro_end = (unsigned long)&_eshared & PAGE_MASK;
|
||||
for_each_memblock(memory, reg) {
|
||||
start = reg->base;
|
||||
end = reg->base + reg->size;
|
||||
if (start >= ro_end || end <= ro_start)
|
||||
vmem_add_mem(start, end - start, 0);
|
||||
else if (start >= ro_start && end <= ro_end)
|
||||
vmem_add_mem(start, end - start, 1);
|
||||
else if (start >= ro_start) {
|
||||
vmem_add_mem(start, ro_end - start, 1);
|
||||
vmem_add_mem(ro_end, end - ro_end, 0);
|
||||
} else if (end < ro_end) {
|
||||
vmem_add_mem(start, ro_start - start, 0);
|
||||
vmem_add_mem(ro_start, end - ro_start, 1);
|
||||
} else {
|
||||
vmem_add_mem(start, ro_start - start, 0);
|
||||
vmem_add_mem(ro_start, ro_end - ro_start, 1);
|
||||
vmem_add_mem(ro_end, end - ro_end, 0);
|
||||
}
|
||||
}
|
||||
for_each_memblock(memory, reg)
|
||||
vmem_add_mem(reg->base, reg->size);
|
||||
set_memory_ro((unsigned long)_stext, size >> PAGE_SHIFT);
|
||||
pr_info("Write protected kernel read-only data: %luk\n", size >> 10);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user