Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Catalin Marinas:
 "Here are the main arm64 updates for 4.6.  There are some relatively
  intrusive changes to support KASLR, the reworking of the kernel
  virtual memory layout and initial page table creation.

  Summary:

   - Initial page table creation reworked to avoid breaking large block
     mappings (huge pages) into smaller ones.  The ARM architecture
     requires break-before-make in such cases to avoid TLB conflicts but
     that's not always possible on live page tables

   - Kernel virtual memory layout: the kernel image is no longer linked
     to the bottom of the linear mapping (PAGE_OFFSET) but at the bottom
     of the vmalloc space, allowing the kernel to be loaded (nearly)
     anywhere in physical RAM

   - Kernel ASLR: position independent kernel Image and modules being
     randomly mapped in the vmalloc space with the randomness is
     provided by UEFI (efi_get_random_bytes() patches merged via the
     arm64 tree, acked by Matt Fleming)

   - Implement relative exception tables for arm64, required by KASLR
     (initial code for ARCH_HAS_RELATIVE_EXTABLE added to lib/extable.c
     but actual x86 conversion to deferred to 4.7 because of the merge
     dependencies)

   - Support for the User Access Override feature of ARMv8.2: this
     allows uaccess functions (get_user etc.) to be implemented using
     LDTR/STTR instructions.  Such instructions, when run by the kernel,
     perform unprivileged accesses adding an extra level of protection.
     The set_fs() macro is used to "upgrade" such instruction to
     privileged accesses via the UAO bit

   - Half-precision floating point support (part of ARMv8.2)

   - Optimisations for CPUs with or without a hardware prefetcher (using
     run-time code patching)

   - copy_page performance improvement to deal with 128 bytes at a time

   - Sanity checks on the CPU capabilities (via CPUID) to prevent
     incompatible secondary CPUs from being brought up (e.g.  weird
     big.LITTLE configurations)

   - valid_user_regs() reworked for better sanity check of the
     sigcontext information (restored pstate information)

   - ACPI parking protocol implementation

   - CONFIG_DEBUG_RODATA enabled by default

   - VDSO code marked as read-only

   - DEBUG_PAGEALLOC support

   - ARCH_HAS_UBSAN_SANITIZE_ALL enabled

   - Erratum workaround Cavium ThunderX SoC

   - set_pte_at() fix for PROT_NONE mappings

   - Code clean-ups"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (99 commits)
  arm64: kasan: Fix zero shadow mapping overriding kernel image shadow
  arm64: kasan: Use actual memory node when populating the kernel image shadow
  arm64: Update PTE_RDONLY in set_pte_at() for PROT_NONE permission
  arm64: Fix misspellings in comments.
  arm64: efi: add missing frame pointer assignment
  arm64: make mrs_s prefixing implicit in read_cpuid
  arm64: enable CONFIG_DEBUG_RODATA by default
  arm64: Rework valid_user_regs
  arm64: mm: check at build time that PAGE_OFFSET divides the VA space evenly
  arm64: KVM: Move kvm_call_hyp back to its original localtion
  arm64: mm: treat memstart_addr as a signed quantity
  arm64: mm: list kernel sections in order
  arm64: lse: deal with clobbered IP registers after branch via PLT
  arm64: mm: dump: Use VA_START directly instead of private LOWEST_ADDR
  arm64: kconfig: add submenu for 8.2 architectural features
  arm64: kernel: acpi: fix ioremap in ACPI parking protocol cpu_postboot
  arm64: Add support for Half precision floating point
  arm64: Remove fixmap include fragility
  arm64: Add workaround for Cavium erratum 27456
  arm64: mm: Mark .rodata as RO
  ...
Esse commit está contido em:
Linus Torvalds
2016-03-17 20:03:47 -07:00
106 arquivos alterados com 3137 adições e 909 exclusões

Ver arquivo

@@ -4,15 +4,16 @@ lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
strchr.o strrchr.o
# Tell the compiler to treat all general purpose registers as
# callee-saved, which allows for efficient runtime patching of the bl
# instruction in the caller with an atomic instruction when supported by
# the CPU. Result and argument registers are handled correctly, based on
# the function prototype.
# Tell the compiler to treat all general purpose registers (with the
# exception of the IP registers, which are already handled by the caller
# in case of a PLT) as callee-saved, which allows for efficient runtime
# patching of the bl instruction in the caller with an atomic instruction
# when supported by the CPU. Result and argument registers are handled
# correctly, based on the function prototype.
lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
-ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \
-ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \
-fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
-fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
-fcall-saved-x16 -fcall-saved-x17 -fcall-saved-x18
-fcall-saved-x18

Ver arquivo

@@ -33,28 +33,28 @@
* Alignment fixed up by hardware.
*/
ENTRY(__clear_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
1:
USER(9f, str xzr, [x0], #8 )
uao_user_alternative 9f, str, sttr, xzr, x0, 8
subs x1, x1, #8
b.pl 1b
2: adds x1, x1, #4
b.mi 3f
USER(9f, str wzr, [x0], #4 )
uao_user_alternative 9f, str, sttr, wzr, x0, 4
sub x1, x1, #4
3: adds x1, x1, #2
b.mi 4f
USER(9f, strh wzr, [x0], #2 )
uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
sub x1, x1, #2
4: adds x1, x1, #1
b.mi 5f
USER(9f, strb wzr, [x0] )
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
ret
ENDPROC(__clear_user)

Ver arquivo

@@ -34,7 +34,7 @@
*/
.macro ldrb1 ptr, regB, val
USER(9998f, ldrb \ptr, [\regB], \val)
uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val
.endm
.macro strb1 ptr, regB, val
@@ -42,7 +42,7 @@
.endm
.macro ldrh1 ptr, regB, val
USER(9998f, ldrh \ptr, [\regB], \val)
uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val
.endm
.macro strh1 ptr, regB, val
@@ -50,7 +50,7 @@
.endm
.macro ldr1 ptr, regB, val
USER(9998f, ldr \ptr, [\regB], \val)
uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val
.endm
.macro str1 ptr, regB, val
@@ -58,7 +58,7 @@
.endm
.macro ldp1 ptr, regB, regC, val
USER(9998f, ldp \ptr, \regB, [\regC], \val)
uao_ldp 9998f, \ptr, \regB, \regC, \val
.endm
.macro stp1 ptr, regB, regC, val
@@ -67,11 +67,11 @@
end .req x5
ENTRY(__copy_from_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
#include "copy_template.S"
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0 // Nothing to copy
ret

Ver arquivo

@@ -35,44 +35,44 @@
* x0 - bytes not copied
*/
.macro ldrb1 ptr, regB, val
USER(9998f, ldrb \ptr, [\regB], \val)
uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val
.endm
.macro strb1 ptr, regB, val
USER(9998f, strb \ptr, [\regB], \val)
uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val
.endm
.macro ldrh1 ptr, regB, val
USER(9998f, ldrh \ptr, [\regB], \val)
uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val
.endm
.macro strh1 ptr, regB, val
USER(9998f, strh \ptr, [\regB], \val)
uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val
.endm
.macro ldr1 ptr, regB, val
USER(9998f, ldr \ptr, [\regB], \val)
uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val
.endm
.macro str1 ptr, regB, val
USER(9998f, str \ptr, [\regB], \val)
uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val
.endm
.macro ldp1 ptr, regB, regC, val
USER(9998f, ldp \ptr, \regB, [\regC], \val)
uao_ldp 9998f, \ptr, \regB, \regC, \val
.endm
.macro stp1 ptr, regB, regC, val
USER(9998f, stp \ptr, \regB, [\regC], \val)
uao_stp 9998f, \ptr, \regB, \regC, \val
.endm
end .req x5
ENTRY(__copy_in_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
#include "copy_template.S"
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0
ret

Ver arquivo

@@ -18,6 +18,8 @@
#include <linux/const.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
/*
* Copy a page from src to dest (both are page aligned)
@@ -27,20 +29,65 @@
* x1 - src
*/
ENTRY(copy_page)
/* Assume cache line size is 64 bytes. */
prfm pldl1strm, [x1, #64]
1: ldp x2, x3, [x1]
alternative_if_not ARM64_HAS_NO_HW_PREFETCH
nop
nop
alternative_else
# Prefetch two cache lines ahead.
prfm pldl1strm, [x1, #128]
prfm pldl1strm, [x1, #256]
alternative_endif
ldp x2, x3, [x1]
ldp x4, x5, [x1, #16]
ldp x6, x7, [x1, #32]
ldp x8, x9, [x1, #48]
add x1, x1, #64
prfm pldl1strm, [x1, #64]
ldp x10, x11, [x1, #64]
ldp x12, x13, [x1, #80]
ldp x14, x15, [x1, #96]
ldp x16, x17, [x1, #112]
mov x18, #(PAGE_SIZE - 128)
add x1, x1, #128
1:
subs x18, x18, #128
alternative_if_not ARM64_HAS_NO_HW_PREFETCH
nop
alternative_else
prfm pldl1strm, [x1, #384]
alternative_endif
stnp x2, x3, [x0]
ldp x2, x3, [x1]
stnp x4, x5, [x0, #16]
ldp x4, x5, [x1, #16]
stnp x6, x7, [x0, #32]
ldp x6, x7, [x1, #32]
stnp x8, x9, [x0, #48]
ldp x8, x9, [x1, #48]
stnp x10, x11, [x0, #64]
ldp x10, x11, [x1, #64]
stnp x12, x13, [x0, #80]
ldp x12, x13, [x1, #80]
stnp x14, x15, [x0, #96]
ldp x14, x15, [x1, #96]
stnp x16, x17, [x0, #112]
ldp x16, x17, [x1, #112]
add x0, x0, #128
add x1, x1, #128
b.gt 1b
stnp x2, x3, [x0]
stnp x4, x5, [x0, #16]
stnp x6, x7, [x0, #32]
stnp x8, x9, [x0, #48]
add x0, x0, #64
tst x1, #(PAGE_SIZE - 1)
b.ne 1b
stnp x10, x11, [x0, #64]
stnp x12, x13, [x0, #80]
stnp x14, x15, [x0, #96]
stnp x16, x17, [x0, #112]
ret
ENDPROC(copy_page)

Ver arquivo

@@ -37,7 +37,7 @@
.endm
.macro strb1 ptr, regB, val
USER(9998f, strb \ptr, [\regB], \val)
uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val
.endm
.macro ldrh1 ptr, regB, val
@@ -45,7 +45,7 @@
.endm
.macro strh1 ptr, regB, val
USER(9998f, strh \ptr, [\regB], \val)
uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val
.endm
.macro ldr1 ptr, regB, val
@@ -53,7 +53,7 @@
.endm
.macro str1 ptr, regB, val
USER(9998f, str \ptr, [\regB], \val)
uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val
.endm
.macro ldp1 ptr, regB, regC, val
@@ -61,16 +61,16 @@
.endm
.macro stp1 ptr, regB, regC, val
USER(9998f, stp \ptr, \regB, [\regC], \val)
uao_stp 9998f, \ptr, \regB, \regC, \val
.endm
end .req x5
ENTRY(__copy_to_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
#include "copy_template.S"
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0
ret

Ver arquivo

@@ -211,7 +211,7 @@ CPU_LE( lsr tmp2, tmp2, tmp1 )
.Lunequal_proc:
cbz diff, .Lremain8
/*There is differnence occured in the latest comparison.*/
/* There is difference occurred in the latest comparison. */
.Lnot_limit:
/*
* For little endian,reverse the low significant equal bits into MSB,then