powerpc: Use gas sections for arranging exception vectors

Use assembler sections of fixed size and location to arrange the 64-bit
Book3S exception vector code (64-bit Book3E also uses it in head_64.S
for 0x0..0x100).

This allows better flexibility in arranging exception code and hiding
unimportant details behind macros.

Gas sections can be a bit painful to use this way, mainly because the
assembler does not know where they will be finally linked. Taking
absolute addresses requires a bit of trickery for example, but it can
be hidden behind macros for the most part.

Generated code is mostly the same except locations, offsets, alignments.

The "+ 0x2" is only required for the trap number / kvm exit number,
which gets loaded as a constant into a register.

Previously, code also used + 0x2 for label names, but we changed to
using "H" to distinguish HV case for that. Remove the last vestiges
of that.

__after_prom_start is taking absolute address of a label in another
fixed section. Newer toolchains seemed to compile this okay, but older
ones do not. FIXED_SYMBOL_ABS_ADDR is more foolproof, it just takes an
additional line to define.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Nicholas Piggin
2016-09-28 11:31:48 +10:00
committed by Michael Ellerman
父節點 573819e343
當前提交 57f266497d
共有 5 個文件被更改,包括 405 次插入68 次删除

查看文件

@@ -28,6 +28,7 @@
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/head-64.h>
#include <asm/asm-offsets.h>
#include <asm/bug.h>
#include <asm/cputable.h>
@@ -65,9 +66,14 @@
* 2. The kernel is entered at __start
*/
.text
.globl _stext
_stext:
OPEN_FIXED_SECTION(first_256B, 0x0, 0x100)
USE_FIXED_SECTION(first_256B)
/*
* Offsets are relative from the start of fixed section, and
* first_256B starts at 0. Offsets are a bit easier to use here
* than the fixed section entry macros.
*/
. = 0x0
_GLOBAL(__start)
/* NOP this out unconditionally */
BEGIN_FTR_SECTION
@@ -104,6 +110,7 @@ __secondary_hold_acknowledge:
. = 0x5c
.globl __run_at_load
__run_at_load:
DEFINE_FIXED_SYMBOL(__run_at_load)
.long 0x72756e30 /* "run0" -- relocate to 0 by default */
#endif
@@ -133,7 +140,7 @@ __secondary_hold:
/* Tell the master cpu we're here */
/* Relocation is off & we are located at an address less */
/* than 0x100, so only need to grab low order offset. */
std r24,__secondary_hold_acknowledge-_stext(0)
std r24,(ABS_ADDR(__secondary_hold_acknowledge))(0)
sync
li r26,0
@@ -141,7 +148,7 @@ __secondary_hold:
tovirt(r26,r26)
#endif
/* All secondary cpus wait here until told to start. */
100: ld r12,__secondary_hold_spinloop-_stext(r26)
100: ld r12,(ABS_ADDR(__secondary_hold_spinloop))(r26)
cmpdi 0,r12,0
beq 100b
@@ -166,12 +173,13 @@ __secondary_hold:
#else
BUG_OPCODE
#endif
CLOSE_FIXED_SECTION(first_256B)
/* This value is used to mark exception frames on the stack. */
.section ".toc","aw"
exception_marker:
.tc ID_72656773_68657265[TC],0x7265677368657265
.text
.previous
/*
* On server, we include the exception vectors code here as it
@@ -180,8 +188,12 @@ exception_marker:
*/
#ifdef CONFIG_PPC_BOOK3S
#include "exceptions-64s.S"
#else
OPEN_TEXT_SECTION(0x100)
#endif
USE_TEXT_SECTION()
#ifdef CONFIG_PPC_BOOK3E
/*
* The booting_thread_hwid holds the thread id we want to boot in cpu
@@ -558,7 +570,7 @@ __after_prom_start:
#if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
#endif
lwz r7,__run_at_load-_stext(r26)
lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
#if defined(CONFIG_PPC_BOOK3E)
tophys(r26,r26)
#endif
@@ -601,7 +613,7 @@ __after_prom_start:
#if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
#endif
lwz r7,__run_at_load-_stext(r26)
lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
cmplwi cr0,r7,1
bne 3f
@@ -611,19 +623,21 @@ __after_prom_start:
sub r5,r5,r11
#else
/* just copy interrupts */
LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
LOAD_REG_IMMEDIATE(r5, FIXED_SYMBOL_ABS_ADDR(__end_interrupts))
#endif
b 5f
3:
#endif
lis r5,(copy_to_here - _stext)@ha
addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
/* # bytes of memory to copy */
lis r5,(ABS_ADDR(copy_to_here))@ha
addi r5,r5,(ABS_ADDR(copy_to_here))@l
bl copy_and_flush /* copy the first n bytes */
/* this includes the code being */
/* executed here. */
addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */
addi r12,r8,(4f - _stext)@l /* that we just made */
/* Jump to the copy of this code that we just made */
addis r8,r3,(ABS_ADDR(4f))@ha
addi r12,r8,(ABS_ADDR(4f))@l
mtctr r12
bctr
@@ -635,8 +649,8 @@ p_end: .llong _end - copy_to_here
* Now copy the rest of the kernel up to _end, add
* _end - copy_to_here to the copy limit and run again.
*/
addis r8,r26,(p_end - _stext)@ha
ld r8,(p_end - _stext)@l(r8)
addis r8,r26,(ABS_ADDR(p_end))@ha
ld r8,(ABS_ADDR(p_end))@l(r8)
add r5,r5,r8
5: bl copy_and_flush /* copy the rest */