Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: - New entropy generation for the pseudo random number generator. - Early boot printk output via sclp to help debug crashes on boot. This needs to be enabled with a kernel parameter. - Add proper no-execute support with a bit in the page table entry. - Bug fixes and cleanups. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (65 commits) s390/syscall: fix single stepped system calls s390/zcrypt: make ap_bus explicitly non-modular s390/zcrypt: Removed unneeded debug feature directory creation. s390: add missing "do {} while (0)" loop constructs to multiline macros s390/mm: add cond_resched call to kernel page table dumper s390: get rid of MACHINE_HAS_PFMF and MACHINE_HAS_HPAGE s390/mm: make memory_block_size_bytes available for !MEMORY_HOTPLUG s390: replace ACCESS_ONCE with READ_ONCE s390: Audit and remove any remaining unnecessary uses of module.h s390: mm: Audit and remove any unnecessary uses of module.h s390: kernel: Audit and remove any unnecessary uses of module.h s390/kdump: Use "LINUX" ELF note name instead of "CORE" s390: add no-execute support s390: report new vector facilities s390: use correct input data address for setup_randomness s390/sclp: get rid of common response code handling s390/sclp: don't add new lines to each printed string s390/sclp: make early sclp code readable s390/sclp: disable early sclp code as soon as the base sclp driver is active s390/sclp: move early printk code to drivers ...
Этот коммит содержится в:
@@ -10,6 +10,7 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/pgtable.h>
|
||||
@@ -49,8 +49,8 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
|
||||
seq_printf(m, "I\n");
|
||||
return;
|
||||
}
|
||||
seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
|
||||
seq_putc(m, '\n');
|
||||
seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW ");
|
||||
seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n");
|
||||
}
|
||||
|
||||
static void note_page(struct seq_file *m, struct pg_state *st,
|
||||
@@ -117,7 +117,8 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
|
||||
for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
|
||||
st->current_address = addr;
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
|
||||
prot = pte_val(*pte) &
|
||||
(_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
|
||||
note_page(m, st, prot, 4);
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
@@ -135,7 +136,9 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_none(*pmd)) {
|
||||
if (pmd_large(*pmd)) {
|
||||
prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT;
|
||||
prot = pmd_val(*pmd) &
|
||||
(_SEGMENT_ENTRY_PROTECT |
|
||||
_SEGMENT_ENTRY_NOEXEC);
|
||||
note_page(m, st, prot, 3);
|
||||
} else
|
||||
walk_pte_level(m, st, pmd, addr);
|
||||
@@ -157,7 +160,9 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (!pud_none(*pud))
|
||||
if (pud_large(*pud)) {
|
||||
prot = pud_val(*pud) & _REGION_ENTRY_PROTECT;
|
||||
prot = pud_val(*pud) &
|
||||
(_REGION_ENTRY_PROTECT |
|
||||
_REGION_ENTRY_NOEXEC);
|
||||
note_page(m, st, prot, 2);
|
||||
} else
|
||||
walk_pmd_level(m, st, pud, addr);
|
||||
@@ -183,6 +188,7 @@ static void walk_pgd_level(struct seq_file *m)
|
||||
else
|
||||
note_page(m, &st, _PAGE_INVALID, 1);
|
||||
addr += PGDIR_SIZE;
|
||||
cond_resched();
|
||||
}
|
||||
/* Flush out the last page */
|
||||
st.current_address = max_addr;
|
||||
|
@@ -14,7 +14,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/ioport.h>
|
||||
@@ -154,7 +154,7 @@ dcss_mkname(char *name, char *dcss_name)
|
||||
if (name[i] == '\0')
|
||||
break;
|
||||
dcss_name[i] = toupper(name[i]);
|
||||
};
|
||||
}
|
||||
for (; i < 8; i++)
|
||||
dcss_name[i] = ' ';
|
||||
ASCEBC(dcss_name, 8);
|
||||
|
@@ -311,12 +311,34 @@ static noinline void do_sigbus(struct pt_regs *regs)
|
||||
force_sig_info(SIGBUS, &si, tsk);
|
||||
}
|
||||
|
||||
static noinline void do_fault_error(struct pt_regs *regs, int fault)
|
||||
static noinline int signal_return(struct pt_regs *regs)
|
||||
{
|
||||
u16 instruction;
|
||||
int rc;
|
||||
|
||||
rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (instruction == 0x0a77) {
|
||||
set_pt_regs_flag(regs, PIF_SYSCALL);
|
||||
regs->int_code = 0x00040077;
|
||||
return 0;
|
||||
} else if (instruction == 0x0aad) {
|
||||
set_pt_regs_flag(regs, PIF_SYSCALL);
|
||||
regs->int_code = 0x000400ad;
|
||||
return 0;
|
||||
}
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
static noinline void do_fault_error(struct pt_regs *regs, int access, int fault)
|
||||
{
|
||||
int si_code;
|
||||
|
||||
switch (fault) {
|
||||
case VM_FAULT_BADACCESS:
|
||||
if (access == VM_EXEC && signal_return(regs) == 0)
|
||||
break;
|
||||
case VM_FAULT_BADMAP:
|
||||
/* Bad memory access. Check if it is kernel or user space. */
|
||||
if (user_mode(regs)) {
|
||||
@@ -324,7 +346,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
|
||||
si_code = (fault == VM_FAULT_BADMAP) ?
|
||||
SEGV_MAPERR : SEGV_ACCERR;
|
||||
do_sigsegv(regs, si_code);
|
||||
return;
|
||||
break;
|
||||
}
|
||||
case VM_FAULT_BADCONTEXT:
|
||||
case VM_FAULT_PFAULT:
|
||||
@@ -525,7 +547,7 @@ out:
|
||||
void do_protection_exception(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long trans_exc_code;
|
||||
int fault;
|
||||
int access, fault;
|
||||
|
||||
trans_exc_code = regs->int_parm_long;
|
||||
/*
|
||||
@@ -544,9 +566,17 @@ void do_protection_exception(struct pt_regs *regs)
|
||||
do_low_address(regs);
|
||||
return;
|
||||
}
|
||||
fault = do_exception(regs, VM_WRITE);
|
||||
if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
|
||||
regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
|
||||
(regs->psw.addr & PAGE_MASK);
|
||||
access = VM_EXEC;
|
||||
fault = VM_FAULT_BADACCESS;
|
||||
} else {
|
||||
access = VM_WRITE;
|
||||
fault = do_exception(regs, access);
|
||||
}
|
||||
if (unlikely(fault))
|
||||
do_fault_error(regs, fault);
|
||||
do_fault_error(regs, access, fault);
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_protection_exception);
|
||||
|
||||
@@ -557,7 +587,7 @@ void do_dat_exception(struct pt_regs *regs)
|
||||
access = VM_READ | VM_EXEC | VM_WRITE;
|
||||
fault = do_exception(regs, access);
|
||||
if (unlikely(fault))
|
||||
do_fault_error(regs, fault);
|
||||
do_fault_error(regs, access, fault);
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_dat_exception);
|
||||
|
||||
|
@@ -59,6 +59,8 @@ static inline unsigned long __pte_to_rste(pte_t pte)
|
||||
rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
|
||||
_SEGMENT_ENTRY_SOFT_DIRTY);
|
||||
#endif
|
||||
rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
|
||||
_SEGMENT_ENTRY_NOEXEC);
|
||||
} else
|
||||
rste = _SEGMENT_ENTRY_INVALID;
|
||||
return rste;
|
||||
@@ -113,6 +115,8 @@ static inline pte_t __rste_to_pte(unsigned long rste)
|
||||
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
|
||||
_PAGE_DIRTY);
|
||||
#endif
|
||||
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
|
||||
_PAGE_NOEXEC);
|
||||
} else
|
||||
pte_val(pte) = _PAGE_INVALID;
|
||||
return pte;
|
||||
@@ -121,7 +125,11 @@ static inline pte_t __rste_to_pte(unsigned long rste)
|
||||
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
unsigned long rste = __pte_to_rste(pte);
|
||||
unsigned long rste;
|
||||
|
||||
rste = __pte_to_rste(pte);
|
||||
if (!MACHINE_HAS_NX)
|
||||
rste &= ~_SEGMENT_ENTRY_NOEXEC;
|
||||
|
||||
/* Set correct table type for 2G hugepages */
|
||||
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
||||
|
@@ -137,6 +137,9 @@ void __init mem_init(void)
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
__set_memory((unsigned long) _sinittext,
|
||||
(_einittext - _sinittext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RW | SET_MEMORY_NX);
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
||||
@@ -148,6 +151,15 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned long memory_block_size_bytes(void)
|
||||
{
|
||||
/*
|
||||
* Make sure the memory block size is always greater
|
||||
* or equal than the memory increment size.
|
||||
*/
|
||||
return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
|
||||
{
|
||||
@@ -191,15 +203,6 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
|
||||
return rc;
|
||||
}
|
||||
|
||||
unsigned long memory_block_size_bytes(void)
|
||||
{
|
||||
/*
|
||||
* Make sure the memory block size is always greater
|
||||
* or equal than the memory increment size.
|
||||
*/
|
||||
return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
int arch_remove_memory(u64 start, u64 size)
|
||||
{
|
||||
|
@@ -5,7 +5,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/debugfs.h>
|
||||
@@ -19,6 +18,8 @@
|
||||
|
||||
static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
|
||||
{
|
||||
memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
|
||||
start, start + size - 1);
|
||||
memblock_add_range(&memblock.memory, start, size, 0, 0);
|
||||
memblock_add_range(&memblock.physmem, start, size, 0, 0);
|
||||
}
|
||||
@@ -39,7 +40,8 @@ void __init detect_memory_memblock(void)
|
||||
memblock_set_bottom_up(true);
|
||||
do {
|
||||
size = 0;
|
||||
type = tprot(addr);
|
||||
/* assume lowcore is writable */
|
||||
type = addr ? tprot(addr) : CHUNK_READ_WRITE;
|
||||
do {
|
||||
size += rzm;
|
||||
if (max_physmem_end && addr + size >= max_physmem_end)
|
||||
@@ -55,4 +57,5 @@ void __init detect_memory_memblock(void)
|
||||
memblock_set_bottom_up(false);
|
||||
if (!max_physmem_end)
|
||||
max_physmem_end = memblock_end_of_DRAM();
|
||||
memblock_dump_all();
|
||||
}
|
||||
|
@@ -26,11 +26,11 @@
|
||||
#include <linux/personality.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/security.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/elf.h>
|
||||
|
||||
static unsigned long stack_maxrandom_size(void)
|
||||
{
|
||||
|
@@ -3,7 +3,6 @@
|
||||
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
|
||||
*/
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/facility.h>
|
||||
@@ -81,24 +80,24 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
|
||||
}
|
||||
}
|
||||
|
||||
struct cpa {
|
||||
unsigned int set_ro : 1;
|
||||
unsigned int clear_ro : 1;
|
||||
};
|
||||
|
||||
static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
||||
struct cpa cpa)
|
||||
unsigned long flags)
|
||||
{
|
||||
pte_t *ptep, new;
|
||||
|
||||
ptep = pte_offset(pmdp, addr);
|
||||
do {
|
||||
if (pte_none(*ptep))
|
||||
new = *ptep;
|
||||
if (pte_none(new))
|
||||
return -EINVAL;
|
||||
if (cpa.set_ro)
|
||||
new = pte_wrprotect(*ptep);
|
||||
else if (cpa.clear_ro)
|
||||
new = pte_mkwrite(pte_mkdirty(*ptep));
|
||||
if (flags & SET_MEMORY_RO)
|
||||
new = pte_wrprotect(new);
|
||||
else if (flags & SET_MEMORY_RW)
|
||||
new = pte_mkwrite(pte_mkdirty(new));
|
||||
if ((flags & SET_MEMORY_NX) && MACHINE_HAS_NX)
|
||||
pte_val(new) |= _PAGE_NOEXEC;
|
||||
else if (flags & SET_MEMORY_X)
|
||||
pte_val(new) &= ~_PAGE_NOEXEC;
|
||||
pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
|
||||
ptep++;
|
||||
addr += PAGE_SIZE;
|
||||
@@ -112,14 +111,17 @@ static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
|
||||
unsigned long pte_addr, prot;
|
||||
pte_t *pt_dir, *ptep;
|
||||
pmd_t new;
|
||||
int i, ro;
|
||||
int i, ro, nx;
|
||||
|
||||
pt_dir = vmem_pte_alloc();
|
||||
if (!pt_dir)
|
||||
return -ENOMEM;
|
||||
pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
|
||||
ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
|
||||
nx = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_NOEXEC);
|
||||
prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
|
||||
if (!nx)
|
||||
prot &= ~_PAGE_NOEXEC;
|
||||
ptep = pt_dir;
|
||||
for (i = 0; i < PTRS_PER_PTE; i++) {
|
||||
pte_val(*ptep) = pte_addr | prot;
|
||||
@@ -133,19 +135,24 @@ static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void modify_pmd_page(pmd_t *pmdp, unsigned long addr, struct cpa cpa)
|
||||
static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
|
||||
unsigned long flags)
|
||||
{
|
||||
pmd_t new;
|
||||
pmd_t new = *pmdp;
|
||||
|
||||
if (cpa.set_ro)
|
||||
new = pmd_wrprotect(*pmdp);
|
||||
else if (cpa.clear_ro)
|
||||
new = pmd_mkwrite(pmd_mkdirty(*pmdp));
|
||||
if (flags & SET_MEMORY_RO)
|
||||
new = pmd_wrprotect(new);
|
||||
else if (flags & SET_MEMORY_RW)
|
||||
new = pmd_mkwrite(pmd_mkdirty(new));
|
||||
if ((flags & SET_MEMORY_NX) && MACHINE_HAS_NX)
|
||||
pmd_val(new) |= _SEGMENT_ENTRY_NOEXEC;
|
||||
else if (flags & SET_MEMORY_X)
|
||||
pmd_val(new) &= ~_SEGMENT_ENTRY_NOEXEC;
|
||||
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
|
||||
}
|
||||
|
||||
static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
|
||||
struct cpa cpa)
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmdp;
|
||||
@@ -163,9 +170,9 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
|
||||
return rc;
|
||||
continue;
|
||||
}
|
||||
modify_pmd_page(pmdp, addr, cpa);
|
||||
modify_pmd_page(pmdp, addr, flags);
|
||||
} else {
|
||||
rc = walk_pte_level(pmdp, addr, next, cpa);
|
||||
rc = walk_pte_level(pmdp, addr, next, flags);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
@@ -181,14 +188,17 @@ static int split_pud_page(pud_t *pudp, unsigned long addr)
|
||||
unsigned long pmd_addr, prot;
|
||||
pmd_t *pm_dir, *pmdp;
|
||||
pud_t new;
|
||||
int i, ro;
|
||||
int i, ro, nx;
|
||||
|
||||
pm_dir = vmem_pmd_alloc();
|
||||
if (!pm_dir)
|
||||
return -ENOMEM;
|
||||
pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
|
||||
ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
|
||||
nx = !!(pud_val(*pudp) & _REGION_ENTRY_NOEXEC);
|
||||
prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
|
||||
if (!nx)
|
||||
prot &= ~_SEGMENT_ENTRY_NOEXEC;
|
||||
pmdp = pm_dir;
|
||||
for (i = 0; i < PTRS_PER_PMD; i++) {
|
||||
pmd_val(*pmdp) = pmd_addr | prot;
|
||||
@@ -202,19 +212,24 @@ static int split_pud_page(pud_t *pudp, unsigned long addr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void modify_pud_page(pud_t *pudp, unsigned long addr, struct cpa cpa)
|
||||
static void modify_pud_page(pud_t *pudp, unsigned long addr,
|
||||
unsigned long flags)
|
||||
{
|
||||
pud_t new;
|
||||
pud_t new = *pudp;
|
||||
|
||||
if (cpa.set_ro)
|
||||
new = pud_wrprotect(*pudp);
|
||||
else if (cpa.clear_ro)
|
||||
new = pud_mkwrite(pud_mkdirty(*pudp));
|
||||
if (flags & SET_MEMORY_RO)
|
||||
new = pud_wrprotect(new);
|
||||
else if (flags & SET_MEMORY_RW)
|
||||
new = pud_mkwrite(pud_mkdirty(new));
|
||||
if ((flags & SET_MEMORY_NX) && MACHINE_HAS_NX)
|
||||
pud_val(new) |= _REGION_ENTRY_NOEXEC;
|
||||
else if (flags & SET_MEMORY_X)
|
||||
pud_val(new) &= ~_REGION_ENTRY_NOEXEC;
|
||||
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
|
||||
}
|
||||
|
||||
static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
struct cpa cpa)
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t *pudp;
|
||||
@@ -232,9 +247,9 @@ static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
modify_pud_page(pudp, addr, cpa);
|
||||
modify_pud_page(pudp, addr, flags);
|
||||
} else {
|
||||
rc = walk_pmd_level(pudp, addr, next, cpa);
|
||||
rc = walk_pmd_level(pudp, addr, next, flags);
|
||||
}
|
||||
pudp++;
|
||||
addr = next;
|
||||
@@ -246,7 +261,7 @@ static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
static DEFINE_MUTEX(cpa_mutex);
|
||||
|
||||
static int change_page_attr(unsigned long addr, unsigned long end,
|
||||
struct cpa cpa)
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long next;
|
||||
int rc = -EINVAL;
|
||||
@@ -262,7 +277,7 @@ static int change_page_attr(unsigned long addr, unsigned long end,
|
||||
if (pgd_none(*pgdp))
|
||||
break;
|
||||
next = pgd_addr_end(addr, end);
|
||||
rc = walk_pud_level(pgdp, addr, next, cpa);
|
||||
rc = walk_pud_level(pgdp, addr, next, flags);
|
||||
if (rc)
|
||||
break;
|
||||
cond_resched();
|
||||
@@ -271,35 +286,10 @@ static int change_page_attr(unsigned long addr, unsigned long end,
|
||||
return rc;
|
||||
}
|
||||
|
||||
int set_memory_ro(unsigned long addr, int numpages)
|
||||
int __set_memory(unsigned long addr, int numpages, unsigned long flags)
|
||||
{
|
||||
struct cpa cpa = {
|
||||
.set_ro = 1,
|
||||
};
|
||||
|
||||
addr &= PAGE_MASK;
|
||||
return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
|
||||
}
|
||||
|
||||
int set_memory_rw(unsigned long addr, int numpages)
|
||||
{
|
||||
struct cpa cpa = {
|
||||
.clear_ro = 1,
|
||||
};
|
||||
|
||||
addr &= PAGE_MASK;
|
||||
return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
|
||||
}
|
||||
|
||||
/* not possible */
|
||||
int set_memory_nx(unsigned long addr, int numpages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int set_memory_x(unsigned long addr, int numpages)
|
||||
{
|
||||
return 0;
|
||||
return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
@@ -339,7 +329,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
nr = min(numpages - i, nr);
|
||||
if (enable) {
|
||||
for (j = 0; j < nr; j++) {
|
||||
pte_val(*pte) = address | pgprot_val(PAGE_KERNEL);
|
||||
pte_val(*pte) &= ~_PAGE_INVALID;
|
||||
address += PAGE_SIZE;
|
||||
pte++;
|
||||
}
|
||||
|
@@ -275,6 +275,8 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
|
||||
{
|
||||
pgste_t pgste;
|
||||
|
||||
if (!MACHINE_HAS_NX)
|
||||
pte_val(pte) &= ~_PAGE_NOEXEC;
|
||||
if (mm_has_pgste(mm)) {
|
||||
pgste = pgste_get(ptep);
|
||||
pgste_set_key(ptep, pgste, pte, mm);
|
||||
|
@@ -6,7 +6,7 @@
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/slab.h>
|
||||
@@ -79,6 +79,7 @@ pte_t __ref *vmem_pte_alloc(void)
|
||||
*/
|
||||
static int vmem_add_mem(unsigned long start, unsigned long size)
|
||||
{
|
||||
unsigned long pgt_prot, sgt_prot, r3_prot;
|
||||
unsigned long pages4k, pages1m, pages2g;
|
||||
unsigned long end = start + size;
|
||||
unsigned long address = start;
|
||||
@@ -88,6 +89,14 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
|
||||
pte_t *pt_dir;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
pgt_prot = pgprot_val(PAGE_KERNEL);
|
||||
sgt_prot = pgprot_val(SEGMENT_KERNEL);
|
||||
r3_prot = pgprot_val(REGION3_KERNEL);
|
||||
if (!MACHINE_HAS_NX) {
|
||||
pgt_prot &= ~_PAGE_NOEXEC;
|
||||
sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
|
||||
r3_prot &= ~_REGION_ENTRY_NOEXEC;
|
||||
}
|
||||
pages4k = pages1m = pages2g = 0;
|
||||
while (address < end) {
|
||||
pg_dir = pgd_offset_k(address);
|
||||
@@ -101,7 +110,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
|
||||
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
|
||||
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
|
||||
!debug_pagealloc_enabled()) {
|
||||
pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
|
||||
pud_val(*pu_dir) = address | r3_prot;
|
||||
address += PUD_SIZE;
|
||||
pages2g++;
|
||||
continue;
|
||||
@@ -116,7 +125,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
|
||||
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
|
||||
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
|
||||
!debug_pagealloc_enabled()) {
|
||||
pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
|
||||
pmd_val(*pm_dir) = address | sgt_prot;
|
||||
address += PMD_SIZE;
|
||||
pages1m++;
|
||||
continue;
|
||||
@@ -129,7 +138,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
|
||||
}
|
||||
|
||||
pt_dir = pte_offset_kernel(pm_dir, address);
|
||||
pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL);
|
||||
pte_val(*pt_dir) = address | pgt_prot;
|
||||
address += PAGE_SIZE;
|
||||
pages4k++;
|
||||
}
|
||||
@@ -200,6 +209,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
|
||||
*/
|
||||
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
||||
{
|
||||
unsigned long pgt_prot, sgt_prot;
|
||||
unsigned long address = start;
|
||||
pgd_t *pg_dir;
|
||||
pud_t *pu_dir;
|
||||
@@ -207,6 +217,12 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
||||
pte_t *pt_dir;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
pgt_prot = pgprot_val(PAGE_KERNEL);
|
||||
sgt_prot = pgprot_val(SEGMENT_KERNEL);
|
||||
if (!MACHINE_HAS_NX) {
|
||||
pgt_prot &= ~_PAGE_NOEXEC;
|
||||
sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
|
||||
}
|
||||
for (address = start; address < end;) {
|
||||
pg_dir = pgd_offset_k(address);
|
||||
if (pgd_none(*pg_dir)) {
|
||||
@@ -238,8 +254,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
||||
new_page = vmemmap_alloc_block(PMD_SIZE, node);
|
||||
if (!new_page)
|
||||
goto out;
|
||||
pmd_val(*pm_dir) = __pa(new_page) |
|
||||
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
|
||||
pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
continue;
|
||||
}
|
||||
@@ -259,8 +274,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
||||
new_page = vmemmap_alloc_block(PAGE_SIZE, node);
|
||||
if (!new_page)
|
||||
goto out;
|
||||
pte_val(*pt_dir) =
|
||||
__pa(new_page) | pgprot_val(PAGE_KERNEL);
|
||||
pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
|
||||
}
|
||||
address += PAGE_SIZE;
|
||||
}
|
||||
@@ -372,13 +386,21 @@ out:
|
||||
*/
|
||||
void __init vmem_map_init(void)
|
||||
{
|
||||
unsigned long size = _eshared - _stext;
|
||||
struct memblock_region *reg;
|
||||
|
||||
for_each_memblock(memory, reg)
|
||||
vmem_add_mem(reg->base, reg->size);
|
||||
set_memory_ro((unsigned long)_stext, size >> PAGE_SHIFT);
|
||||
pr_info("Write protected kernel read-only data: %luk\n", size >> 10);
|
||||
__set_memory((unsigned long) _stext,
|
||||
(_etext - _stext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO | SET_MEMORY_X);
|
||||
__set_memory((unsigned long) _etext,
|
||||
(_eshared - _etext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO);
|
||||
__set_memory((unsigned long) _sinittext,
|
||||
(_einittext - _sinittext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO | SET_MEMORY_X);
|
||||
pr_info("Write protected kernel read-only data: %luk\n",
|
||||
(_eshared - _stext) >> 10);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Ссылка в новой задаче
Block a user