Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 asm updates from Ingo Molnar:
 "The main changes in this cycle were:

   - Cross-arch changes to move the linker sections for NOTES and
     EXCEPTION_TABLE into the RO_DATA area, where they belong on most
     architectures. (Kees Cook)

   - Switch the x86 linker fill byte from x90 (NOP) to 0xcc (INT3), to
     trap jumps into the middle of those padding areas instead of
     sliding execution. (Kees Cook)

   - A thorough cleanup of symbol definitions within x86 assembler code.
     The rather randomly named macros got streamlined around a
     (hopefully) straightforward naming scheme:

        SYM_START(name, linkage, align...)
        SYM_END(name, sym_type)

        SYM_FUNC_START(name)
        SYM_FUNC_END(name)

        SYM_CODE_START(name)
        SYM_CODE_END(name)

        SYM_DATA_START(name)
        SYM_DATA_END(name)

     etc - with about three times of these basic primitives with some
     label, local symbol or attribute variant, expressed via postfixes.

     No change in functionality intended. (Jiri Slaby)

   - Misc other changes, cleanups and smaller fixes"

* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits)
  x86/entry/64: Remove pointless jump in paranoid_exit
  x86/entry/32: Remove unused resume_userspace label
  x86/build/vdso: Remove meaningless CFLAGS_REMOVE_*.o
  m68k: Convert missed RODATA to RO_DATA
  x86/vmlinux: Use INT3 instead of NOP for linker fill bytes
  x86/mm: Report actual image regions in /proc/iomem
  x86/mm: Report which part of kernel image is freed
  x86/mm: Remove redundant address-of operators on addresses
  xtensa: Move EXCEPTION_TABLE to RO_DATA segment
  powerpc: Move EXCEPTION_TABLE to RO_DATA segment
  parisc: Move EXCEPTION_TABLE to RO_DATA segment
  microblaze: Move EXCEPTION_TABLE to RO_DATA segment
  ia64: Move EXCEPTION_TABLE to RO_DATA segment
  h8300: Move EXCEPTION_TABLE to RO_DATA segment
  c6x: Move EXCEPTION_TABLE to RO_DATA segment
  arm64: Move EXCEPTION_TABLE to RO_DATA segment
  alpha: Move EXCEPTION_TABLE to RO_DATA segment
  x86/vmlinux: Move EXCEPTION_TABLE to RO_DATA segment
  x86/vmlinux: Actually use _etext for the end of the text segment
  vmlinux.lds.h: Allow EXCEPTION_TABLE to live in RO_DATA
  ...
This commit is contained in:
Linus Torvalds
2019-11-26 10:42:40 -08:00
165 changed files with 1656 additions and 1188 deletions

View File

@@ -20,10 +20,10 @@
#define BEGIN(op) \
.macro endp; \
ENDPROC(atomic64_##op##_386); \
SYM_FUNC_END(atomic64_##op##_386); \
.purgem endp; \
.endm; \
ENTRY(atomic64_##op##_386); \
SYM_FUNC_START(atomic64_##op##_386); \
LOCK v;
#define ENDP endp

View File

@@ -16,12 +16,12 @@
cmpxchg8b (\reg)
.endm
ENTRY(atomic64_read_cx8)
SYM_FUNC_START(atomic64_read_cx8)
read64 %ecx
ret
ENDPROC(atomic64_read_cx8)
SYM_FUNC_END(atomic64_read_cx8)
ENTRY(atomic64_set_cx8)
SYM_FUNC_START(atomic64_set_cx8)
1:
/* we don't need LOCK_PREFIX since aligned 64-bit writes
* are atomic on 586 and newer */
@@ -29,19 +29,19 @@ ENTRY(atomic64_set_cx8)
jne 1b
ret
ENDPROC(atomic64_set_cx8)
SYM_FUNC_END(atomic64_set_cx8)
ENTRY(atomic64_xchg_cx8)
SYM_FUNC_START(atomic64_xchg_cx8)
1:
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
ret
ENDPROC(atomic64_xchg_cx8)
SYM_FUNC_END(atomic64_xchg_cx8)
.macro addsub_return func ins insc
ENTRY(atomic64_\func\()_return_cx8)
SYM_FUNC_START(atomic64_\func\()_return_cx8)
pushl %ebp
pushl %ebx
pushl %esi
@@ -69,14 +69,14 @@ ENTRY(atomic64_\func\()_return_cx8)
popl %ebx
popl %ebp
ret
ENDPROC(atomic64_\func\()_return_cx8)
SYM_FUNC_END(atomic64_\func\()_return_cx8)
.endm
addsub_return add add adc
addsub_return sub sub sbb
.macro incdec_return func ins insc
ENTRY(atomic64_\func\()_return_cx8)
SYM_FUNC_START(atomic64_\func\()_return_cx8)
pushl %ebx
read64 %esi
@@ -94,13 +94,13 @@ ENTRY(atomic64_\func\()_return_cx8)
movl %ecx, %edx
popl %ebx
ret
ENDPROC(atomic64_\func\()_return_cx8)
SYM_FUNC_END(atomic64_\func\()_return_cx8)
.endm
incdec_return inc add adc
incdec_return dec sub sbb
ENTRY(atomic64_dec_if_positive_cx8)
SYM_FUNC_START(atomic64_dec_if_positive_cx8)
pushl %ebx
read64 %esi
@@ -119,9 +119,9 @@ ENTRY(atomic64_dec_if_positive_cx8)
movl %ecx, %edx
popl %ebx
ret
ENDPROC(atomic64_dec_if_positive_cx8)
SYM_FUNC_END(atomic64_dec_if_positive_cx8)
ENTRY(atomic64_add_unless_cx8)
SYM_FUNC_START(atomic64_add_unless_cx8)
pushl %ebp
pushl %ebx
/* these just push these two parameters on the stack */
@@ -155,9 +155,9 @@ ENTRY(atomic64_add_unless_cx8)
jne 2b
xorl %eax, %eax
jmp 3b
ENDPROC(atomic64_add_unless_cx8)
SYM_FUNC_END(atomic64_add_unless_cx8)
ENTRY(atomic64_inc_not_zero_cx8)
SYM_FUNC_START(atomic64_inc_not_zero_cx8)
pushl %ebx
read64 %esi
@@ -177,4 +177,4 @@ ENTRY(atomic64_inc_not_zero_cx8)
3:
popl %ebx
ret
ENDPROC(atomic64_inc_not_zero_cx8)
SYM_FUNC_END(atomic64_inc_not_zero_cx8)

View File

@@ -46,7 +46,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop.
*/
ENTRY(csum_partial)
SYM_FUNC_START(csum_partial)
pushl %esi
pushl %ebx
movl 20(%esp),%eax # Function arg: unsigned int sum
@@ -128,13 +128,13 @@ ENTRY(csum_partial)
popl %ebx
popl %esi
ret
ENDPROC(csum_partial)
SYM_FUNC_END(csum_partial)
#else
/* Version for PentiumII/PPro */
ENTRY(csum_partial)
SYM_FUNC_START(csum_partial)
pushl %esi
pushl %ebx
movl 20(%esp),%eax # Function arg: unsigned int sum
@@ -246,7 +246,7 @@ ENTRY(csum_partial)
popl %ebx
popl %esi
ret
ENDPROC(csum_partial)
SYM_FUNC_END(csum_partial)
#endif
EXPORT_SYMBOL(csum_partial)
@@ -280,7 +280,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
#define ARGBASE 16
#define FP 12
ENTRY(csum_partial_copy_generic)
SYM_FUNC_START(csum_partial_copy_generic)
subl $4,%esp
pushl %edi
pushl %esi
@@ -398,7 +398,7 @@ DST( movb %cl, (%edi) )
popl %edi
popl %ecx # equivalent to addl $4,%esp
ret
ENDPROC(csum_partial_copy_generic)
SYM_FUNC_END(csum_partial_copy_generic)
#else
@@ -416,7 +416,7 @@ ENDPROC(csum_partial_copy_generic)
#define ARGBASE 12
ENTRY(csum_partial_copy_generic)
SYM_FUNC_START(csum_partial_copy_generic)
pushl %ebx
pushl %edi
pushl %esi
@@ -483,7 +483,7 @@ DST( movb %dl, (%edi) )
popl %edi
popl %ebx
ret
ENDPROC(csum_partial_copy_generic)
SYM_FUNC_END(csum_partial_copy_generic)
#undef ROUND
#undef ROUND1

View File

@@ -13,15 +13,15 @@
* Zero a page.
* %rdi - page
*/
ENTRY(clear_page_rep)
SYM_FUNC_START(clear_page_rep)
movl $4096/8,%ecx
xorl %eax,%eax
rep stosq
ret
ENDPROC(clear_page_rep)
SYM_FUNC_END(clear_page_rep)
EXPORT_SYMBOL_GPL(clear_page_rep)
ENTRY(clear_page_orig)
SYM_FUNC_START(clear_page_orig)
xorl %eax,%eax
movl $4096/64,%ecx
.p2align 4
@@ -40,13 +40,13 @@ ENTRY(clear_page_orig)
jnz .Lloop
nop
ret
ENDPROC(clear_page_orig)
SYM_FUNC_END(clear_page_orig)
EXPORT_SYMBOL_GPL(clear_page_orig)
ENTRY(clear_page_erms)
SYM_FUNC_START(clear_page_erms)
movl $4096,%ecx
xorl %eax,%eax
rep stosb
ret
ENDPROC(clear_page_erms)
SYM_FUNC_END(clear_page_erms)
EXPORT_SYMBOL_GPL(clear_page_erms)

View File

@@ -13,7 +13,7 @@
* %rcx : high 64 bits of new value
* %al : Operation successful
*/
ENTRY(this_cpu_cmpxchg16b_emu)
SYM_FUNC_START(this_cpu_cmpxchg16b_emu)
#
# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
@@ -44,4 +44,4 @@ ENTRY(this_cpu_cmpxchg16b_emu)
xor %al,%al
ret
ENDPROC(this_cpu_cmpxchg16b_emu)
SYM_FUNC_END(this_cpu_cmpxchg16b_emu)

View File

@@ -13,7 +13,7 @@
* %ebx : low 32 bits of new value
* %ecx : high 32 bits of new value
*/
ENTRY(cmpxchg8b_emu)
SYM_FUNC_START(cmpxchg8b_emu)
#
# Emulate 'cmpxchg8b (%esi)' on UP except we don't
@@ -42,5 +42,5 @@ ENTRY(cmpxchg8b_emu)
popfl
ret
ENDPROC(cmpxchg8b_emu)
SYM_FUNC_END(cmpxchg8b_emu)
EXPORT_SYMBOL(cmpxchg8b_emu)

View File

@@ -13,15 +13,15 @@
* prefetch distance based on SMP/UP.
*/
ALIGN
ENTRY(copy_page)
SYM_FUNC_START(copy_page)
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx
rep movsq
ret
ENDPROC(copy_page)
SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)
ENTRY(copy_page_regs)
SYM_FUNC_START_LOCAL(copy_page_regs)
subq $2*8, %rsp
movq %rbx, (%rsp)
movq %r12, 1*8(%rsp)
@@ -86,4 +86,4 @@ ENTRY(copy_page_regs)
movq 1*8(%rsp), %r12
addq $2*8, %rsp
ret
ENDPROC(copy_page_regs)
SYM_FUNC_END(copy_page_regs)

View File

@@ -53,7 +53,7 @@
* Output:
* eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_generic_unrolled)
SYM_FUNC_START(copy_user_generic_unrolled)
ASM_STAC
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
@@ -136,7 +136,7 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE_UA(19b, 40b)
_ASM_EXTABLE_UA(21b, 50b)
_ASM_EXTABLE_UA(22b, 50b)
ENDPROC(copy_user_generic_unrolled)
SYM_FUNC_END(copy_user_generic_unrolled)
EXPORT_SYMBOL(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions.
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(copy_user_generic_unrolled)
* Output:
* eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_generic_string)
SYM_FUNC_START(copy_user_generic_string)
ASM_STAC
cmpl $8,%edx
jb 2f /* less than 8 bytes, go to byte copy loop */
@@ -182,7 +182,7 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE_UA(1b, 11b)
_ASM_EXTABLE_UA(3b, 12b)
ENDPROC(copy_user_generic_string)
SYM_FUNC_END(copy_user_generic_string)
EXPORT_SYMBOL(copy_user_generic_string)
/*
@@ -197,7 +197,7 @@ EXPORT_SYMBOL(copy_user_generic_string)
* Output:
* eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_enhanced_fast_string)
SYM_FUNC_START(copy_user_enhanced_fast_string)
ASM_STAC
cmpl $64,%edx
jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
@@ -214,7 +214,7 @@ ENTRY(copy_user_enhanced_fast_string)
.previous
_ASM_EXTABLE_UA(1b, 12b)
ENDPROC(copy_user_enhanced_fast_string)
SYM_FUNC_END(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
/*
@@ -230,8 +230,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string)
* Output:
* eax uncopied bytes or 0 if successful.
*/
ALIGN;
.Lcopy_user_handle_tail:
SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
movl %edx,%ecx
1: rep movsb
2: mov %ecx,%eax
@@ -239,7 +238,7 @@ ALIGN;
ret
_ASM_EXTABLE_UA(1b, 2b)
END(.Lcopy_user_handle_tail)
SYM_CODE_END(.Lcopy_user_handle_tail)
/*
* copy_user_nocache - Uncached memory copy with exception handling
@@ -250,7 +249,7 @@ END(.Lcopy_user_handle_tail)
* - Require 8-byte alignment when size is 8 bytes or larger.
* - Require 4-byte alignment when size is 4 bytes.
*/
ENTRY(__copy_user_nocache)
SYM_FUNC_START(__copy_user_nocache)
ASM_STAC
/* If size is less than 8 bytes, go to 4-byte copy */
@@ -389,5 +388,5 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE_UA(31b, .L_fixup_4b_copy)
_ASM_EXTABLE_UA(40b, .L_fixup_1b_copy)
_ASM_EXTABLE_UA(41b, .L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
SYM_FUNC_END(__copy_user_nocache)
EXPORT_SYMBOL(__copy_user_nocache)

View File

@@ -49,7 +49,7 @@
.endm
ENTRY(csum_partial_copy_generic)
SYM_FUNC_START(csum_partial_copy_generic)
cmpl $3*64, %edx
jle .Lignore
@@ -225,4 +225,4 @@ ENTRY(csum_partial_copy_generic)
jz .Lende
movl $-EFAULT, (%rax)
jmp .Lende
ENDPROC(csum_partial_copy_generic)
SYM_FUNC_END(csum_partial_copy_generic)

View File

@@ -36,7 +36,7 @@
#include <asm/export.h>
.text
ENTRY(__get_user_1)
SYM_FUNC_START(__get_user_1)
mov PER_CPU_VAR(current_task), %_ASM_DX
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
@@ -47,10 +47,10 @@ ENTRY(__get_user_1)
xor %eax,%eax
ASM_CLAC
ret
ENDPROC(__get_user_1)
SYM_FUNC_END(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
ENTRY(__get_user_2)
SYM_FUNC_START(__get_user_2)
add $1,%_ASM_AX
jc bad_get_user
mov PER_CPU_VAR(current_task), %_ASM_DX
@@ -63,10 +63,10 @@ ENTRY(__get_user_2)
xor %eax,%eax
ASM_CLAC
ret
ENDPROC(__get_user_2)
SYM_FUNC_END(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
ENTRY(__get_user_4)
SYM_FUNC_START(__get_user_4)
add $3,%_ASM_AX
jc bad_get_user
mov PER_CPU_VAR(current_task), %_ASM_DX
@@ -79,10 +79,10 @@ ENTRY(__get_user_4)
xor %eax,%eax
ASM_CLAC
ret
ENDPROC(__get_user_4)
SYM_FUNC_END(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
ENTRY(__get_user_8)
SYM_FUNC_START(__get_user_8)
#ifdef CONFIG_X86_64
add $7,%_ASM_AX
jc bad_get_user
@@ -111,25 +111,27 @@ ENTRY(__get_user_8)
ASM_CLAC
ret
#endif
ENDPROC(__get_user_8)
SYM_FUNC_END(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
.Lbad_get_user_clac:
SYM_CODE_START_LOCAL(.Lbad_get_user_clac)
ASM_CLAC
bad_get_user:
xor %edx,%edx
mov $(-EFAULT),%_ASM_AX
ret
SYM_CODE_END(.Lbad_get_user_clac)
#ifdef CONFIG_X86_32
.Lbad_get_user_8_clac:
SYM_CODE_START_LOCAL(.Lbad_get_user_8_clac)
ASM_CLAC
bad_get_user_8:
xor %edx,%edx
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX
ret
SYM_CODE_END(.Lbad_get_user_8_clac)
#endif
_ASM_EXTABLE_UA(1b, .Lbad_get_user_clac)

View File

@@ -8,7 +8,7 @@
* unsigned int __sw_hweight32(unsigned int w)
* %rdi: w
*/
ENTRY(__sw_hweight32)
SYM_FUNC_START(__sw_hweight32)
#ifdef CONFIG_X86_64
movl %edi, %eax # w
@@ -33,10 +33,10 @@ ENTRY(__sw_hweight32)
shrl $24, %eax # w = w_tmp >> 24
__ASM_SIZE(pop,) %__ASM_REG(dx)
ret
ENDPROC(__sw_hweight32)
SYM_FUNC_END(__sw_hweight32)
EXPORT_SYMBOL(__sw_hweight32)
ENTRY(__sw_hweight64)
SYM_FUNC_START(__sw_hweight64)
#ifdef CONFIG_X86_64
pushq %rdi
pushq %rdx
@@ -79,5 +79,5 @@ ENTRY(__sw_hweight64)
popl %ecx
ret
#endif
ENDPROC(__sw_hweight64)
SYM_FUNC_END(__sw_hweight64)
EXPORT_SYMBOL(__sw_hweight64)

View File

@@ -8,8 +8,8 @@
/*
* override generic version in lib/iomap_copy.c
*/
ENTRY(__iowrite32_copy)
SYM_FUNC_START(__iowrite32_copy)
movl %edx,%ecx
rep movsd
ret
ENDPROC(__iowrite32_copy)
SYM_FUNC_END(__iowrite32_copy)

View File

@@ -28,8 +28,8 @@
* Output:
* rax original destination
*/
ENTRY(__memcpy)
ENTRY(memcpy)
SYM_FUNC_START_ALIAS(__memcpy)
SYM_FUNC_START_LOCAL(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS
@@ -41,8 +41,8 @@ ENTRY(memcpy)
movl %edx, %ecx
rep movsb
ret
ENDPROC(memcpy)
ENDPROC(__memcpy)
SYM_FUNC_END(memcpy)
SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(__memcpy)
@@ -50,14 +50,14 @@ EXPORT_SYMBOL(__memcpy)
* memcpy_erms() - enhanced fast string memcpy. This is faster and
* simpler than memcpy. Use memcpy_erms when possible.
*/
ENTRY(memcpy_erms)
SYM_FUNC_START_LOCAL(memcpy_erms)
movq %rdi, %rax
movq %rdx, %rcx
rep movsb
ret
ENDPROC(memcpy_erms)
SYM_FUNC_END(memcpy_erms)
ENTRY(memcpy_orig)
SYM_FUNC_START_LOCAL(memcpy_orig)
movq %rdi, %rax
cmpq $0x20, %rdx
@@ -182,7 +182,7 @@ ENTRY(memcpy_orig)
.Lend:
retq
ENDPROC(memcpy_orig)
SYM_FUNC_END(memcpy_orig)
#ifndef CONFIG_UML
@@ -193,7 +193,7 @@ MCSAFE_TEST_CTL
* Note that we only catch machine checks when reading the source addresses.
* Writes to target are posted and don't generate machine checks.
*/
ENTRY(__memcpy_mcsafe)
SYM_FUNC_START(__memcpy_mcsafe)
cmpl $8, %edx
/* Less than 8 bytes? Go to byte copy loop */
jb .L_no_whole_words
@@ -260,7 +260,7 @@ ENTRY(__memcpy_mcsafe)
xorl %eax, %eax
.L_done:
ret
ENDPROC(__memcpy_mcsafe)
SYM_FUNC_END(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
.section .fixup, "ax"

View File

@@ -26,8 +26,8 @@
*/
.weak memmove
ENTRY(memmove)
ENTRY(__memmove)
SYM_FUNC_START_ALIAS(memmove)
SYM_FUNC_START(__memmove)
/* Handle more 32 bytes in loop */
mov %rdi, %rax
@@ -207,7 +207,7 @@ ENTRY(__memmove)
movb %r11b, (%rdi)
13:
retq
ENDPROC(__memmove)
ENDPROC(memmove)
SYM_FUNC_END(__memmove)
SYM_FUNC_END_ALIAS(memmove)
EXPORT_SYMBOL(__memmove)
EXPORT_SYMBOL(memmove)

View File

@@ -19,8 +19,8 @@
*
* rax original destination
*/
ENTRY(memset)
ENTRY(__memset)
SYM_FUNC_START_ALIAS(memset)
SYM_FUNC_START(__memset)
/*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
* to use it when possible. If not available, use fast string instructions.
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
ENDPROC(memset)
ENDPROC(__memset)
SYM_FUNC_END(__memset)
SYM_FUNC_END_ALIAS(memset)
EXPORT_SYMBOL(memset)
EXPORT_SYMBOL(__memset)
@@ -59,16 +59,16 @@ EXPORT_SYMBOL(__memset)
*
* rax original destination
*/
ENTRY(memset_erms)
SYM_FUNC_START_LOCAL(memset_erms)
movq %rdi,%r9
movb %sil,%al
movq %rdx,%rcx
rep stosb
movq %r9,%rax
ret
ENDPROC(memset_erms)
SYM_FUNC_END(memset_erms)
ENTRY(memset_orig)
SYM_FUNC_START_LOCAL(memset_orig)
movq %rdi,%r10
/* expand byte value */
@@ -139,4 +139,4 @@ ENTRY(memset_orig)
subq %r8,%rdx
jmp .Lafter_bad_alignment
.Lfinal:
ENDPROC(memset_orig)
SYM_FUNC_END(memset_orig)

View File

@@ -12,7 +12,7 @@
*
*/
.macro op_safe_regs op
ENTRY(\op\()_safe_regs)
SYM_FUNC_START(\op\()_safe_regs)
pushq %rbx
pushq %r12
movq %rdi, %r10 /* Save pointer */
@@ -41,13 +41,13 @@ ENTRY(\op\()_safe_regs)
jmp 2b
_ASM_EXTABLE(1b, 3b)
ENDPROC(\op\()_safe_regs)
SYM_FUNC_END(\op\()_safe_regs)
.endm
#else /* X86_32 */
.macro op_safe_regs op
ENTRY(\op\()_safe_regs)
SYM_FUNC_START(\op\()_safe_regs)
pushl %ebx
pushl %ebp
pushl %esi
@@ -83,7 +83,7 @@ ENTRY(\op\()_safe_regs)
jmp 2b
_ASM_EXTABLE(1b, 3b)
ENDPROC(\op\()_safe_regs)
SYM_FUNC_END(\op\()_safe_regs)
.endm
#endif

View File

@@ -34,7 +34,7 @@
#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX
.text
ENTRY(__put_user_1)
SYM_FUNC_START(__put_user_1)
ENTER
cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX
jae .Lbad_put_user
@@ -43,10 +43,10 @@ ENTRY(__put_user_1)
xor %eax,%eax
ASM_CLAC
ret
ENDPROC(__put_user_1)
SYM_FUNC_END(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
ENTRY(__put_user_2)
SYM_FUNC_START(__put_user_2)
ENTER
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $1,%_ASM_BX
@@ -57,10 +57,10 @@ ENTRY(__put_user_2)
xor %eax,%eax
ASM_CLAC
ret
ENDPROC(__put_user_2)
SYM_FUNC_END(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
ENTRY(__put_user_4)
SYM_FUNC_START(__put_user_4)
ENTER
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $3,%_ASM_BX
@@ -71,10 +71,10 @@ ENTRY(__put_user_4)
xor %eax,%eax
ASM_CLAC
ret
ENDPROC(__put_user_4)
SYM_FUNC_END(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
ENTRY(__put_user_8)
SYM_FUNC_START(__put_user_8)
ENTER
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $7,%_ASM_BX
@@ -88,14 +88,15 @@ ENTRY(__put_user_8)
xor %eax,%eax
ASM_CLAC
RET
ENDPROC(__put_user_8)
SYM_FUNC_END(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
.Lbad_put_user_clac:
SYM_CODE_START_LOCAL(.Lbad_put_user_clac)
ASM_CLAC
.Lbad_put_user:
movl $-EFAULT,%eax
RET
SYM_CODE_END(.Lbad_put_user_clac)
_ASM_EXTABLE_UA(1b, .Lbad_put_user_clac)
_ASM_EXTABLE_UA(2b, .Lbad_put_user_clac)

View File

@@ -11,11 +11,11 @@
.macro THUNK reg
.section .text.__x86.indirect_thunk
ENTRY(__x86_indirect_thunk_\reg)
SYM_FUNC_START(__x86_indirect_thunk_\reg)
CFI_STARTPROC
JMP_NOSPEC %\reg
CFI_ENDPROC
ENDPROC(__x86_indirect_thunk_\reg)
SYM_FUNC_END(__x86_indirect_thunk_\reg)
.endm
/*