12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264 |
- /*
- * Low-level exception handling
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004 - 2008 by Tensilica Inc.
- * Copyright (C) 2015 Cadence Design Systems Inc.
- *
- * Chris Zankel <[email protected]>
- *
- */
- #include <linux/linkage.h>
- #include <linux/pgtable.h>
- #include <asm/asm-offsets.h>
- #include <asm/asmmacro.h>
- #include <asm/processor.h>
- #include <asm/coprocessor.h>
- #include <asm/thread_info.h>
- #include <asm/asm-uaccess.h>
- #include <asm/unistd.h>
- #include <asm/ptrace.h>
- #include <asm/current.h>
- #include <asm/page.h>
- #include <asm/signal.h>
- #include <asm/tlbflush.h>
- #include <variant/tie-asm.h>
- /*
- * Macro to find first bit set in WINDOWBASE from the left + 1
- *
- * 100....0 -> 1
- * 010....0 -> 2
- * 000....1 -> WSBITS
- */
- .macro ffs_ws bit mask
- #if XCHAL_HAVE_NSA
- nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0)
- addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1
- #else
- movi \bit, WSBITS
- #if WSBITS > 16
- _bltui \mask, 0x10000, 99f
- addi \bit, \bit, -16
- extui \mask, \mask, 16, 16
- #endif
- #if WSBITS > 8
- 99: _bltui \mask, 0x100, 99f
- addi \bit, \bit, -8
- srli \mask, \mask, 8
- #endif
- 99: _bltui \mask, 0x10, 99f
- addi \bit, \bit, -4
- srli \mask, \mask, 4
- 99: _bltui \mask, 0x4, 99f
- addi \bit, \bit, -2
- srli \mask, \mask, 2
- 99: _bltui \mask, 0x2, 99f
- addi \bit, \bit, -1
- 99:
- #endif
- .endm
- .macro irq_save flags tmp
- #if XTENSA_FAKE_NMI
- #if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
- rsr \flags, ps
- extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
- bgei \tmp, LOCKLEVEL, 99f
- rsil \tmp, LOCKLEVEL
- 99:
- #else
- movi \tmp, LOCKLEVEL
- rsr \flags, ps
- or \flags, \flags, \tmp
- xsr \flags, ps
- rsync
- #endif
- #else
- rsil \flags, LOCKLEVEL
- #endif
- .endm
- /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
- /*
- * First-level exception handler for user exceptions.
- * Save some special registers, extra states and all registers in the AR
- * register file that were in use in the user task, and jump to the common
- * exception code.
- * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
- * save them for kernel exceptions).
- *
- * Entry condition for user_exception:
- *
- * a0: trashed, original value saved on stack (PT_AREG0)
- * a1: a1
- * a2: new stack pointer, original value in depc
- * a3: a3
- * depc: a2, original value saved on stack (PT_DEPC)
- * excsave1: dispatch table
- *
- * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
- * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
- *
- * Entry condition for _user_exception:
- *
- * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
- * excsave has been restored, and
- * stack pointer (a1) has been set.
- *
- * Note: _user_exception might be at an odd address. Don't use call0..call12
- */
- .literal_position
- ENTRY(user_exception)
- /* Save a1, a2, a3, and set SP. */
- rsr a0, depc
- s32i a1, a2, PT_AREG1
- s32i a0, a2, PT_AREG2
- s32i a3, a2, PT_AREG3
- mov a1, a2
- .globl _user_exception
- _user_exception:
- /* Save SAR and turn off single stepping */
- movi a2, 0
- wsr a2, depc # terminate user stack trace with 0
- rsr a3, sar
- xsr a2, icountlevel
- s32i a3, a1, PT_SAR
- s32i a2, a1, PT_ICOUNTLEVEL
- #if XCHAL_HAVE_THREADPTR
- rur a2, threadptr
- s32i a2, a1, PT_THREADPTR
- #endif
- /* Rotate ws so that the current windowbase is at bit0. */
- /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
- #if defined(USER_SUPPORT_WINDOWED)
- rsr a2, windowbase
- rsr a3, windowstart
- ssr a2
- s32i a2, a1, PT_WINDOWBASE
- s32i a3, a1, PT_WINDOWSTART
- slli a2, a3, 32-WSBITS
- src a2, a3, a2
- srli a2, a2, 32-WSBITS
- s32i a2, a1, PT_WMASK # needed for restoring registers
- #else
- movi a2, 0
- movi a3, 1
- s32i a2, a1, PT_WINDOWBASE
- s32i a3, a1, PT_WINDOWSTART
- s32i a3, a1, PT_WMASK
- #endif
- /* Save only live registers. */
- UABI_W _bbsi.l a2, 1, .Lsave_window_registers
- s32i a4, a1, PT_AREG4
- s32i a5, a1, PT_AREG5
- s32i a6, a1, PT_AREG6
- s32i a7, a1, PT_AREG7
- UABI_W _bbsi.l a2, 2, .Lsave_window_registers
- s32i a8, a1, PT_AREG8
- s32i a9, a1, PT_AREG9
- s32i a10, a1, PT_AREG10
- s32i a11, a1, PT_AREG11
- UABI_W _bbsi.l a2, 3, .Lsave_window_registers
- s32i a12, a1, PT_AREG12
- s32i a13, a1, PT_AREG13
- s32i a14, a1, PT_AREG14
- s32i a15, a1, PT_AREG15
- #if defined(USER_SUPPORT_WINDOWED)
- /* If only one valid frame skip saving regs. */
- beqi a2, 1, common_exception
- /* Save the remaining registers.
- * We have to save all registers up to the first '1' from
- * the right, except the current frame (bit 0).
- * Assume a2 is: 001001000110001
- * All register frames starting from the top field to the marked '1'
- * must be saved.
- */
- .Lsave_window_registers:
- addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
- neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
- and a3, a3, a2 # max. only one bit is set
- /* Find number of frames to save */
- ffs_ws a0, a3 # number of frames to the '1' from left
- /* Store information into WMASK:
- * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
- * bits 4...: number of valid 4-register frames
- */
- slli a3, a0, 4 # number of frames to save in bits 8..4
- extui a2, a2, 0, 4 # mask for the first 16 registers
- or a2, a3, a2
- s32i a2, a1, PT_WMASK # needed when we restore the reg-file
- /* Save 4 registers at a time */
- 1: rotw -1
- s32i a0, a5, PT_AREG_END - 16
- s32i a1, a5, PT_AREG_END - 12
- s32i a2, a5, PT_AREG_END - 8
- s32i a3, a5, PT_AREG_END - 4
- addi a0, a4, -1
- addi a1, a5, -16
- _bnez a0, 1b
- /* WINDOWBASE still in SAR! */
- rsr a2, sar # original WINDOWBASE
- movi a3, 1
- ssl a2
- sll a3, a3
- wsr a3, windowstart # set corresponding WINDOWSTART bit
- wsr a2, windowbase # and WINDOWSTART
- rsync
- /* We are back to the original stack pointer (a1) */
- #endif
- /* Now, jump to the common exception handler. */
- j common_exception
- ENDPROC(user_exception)
- /*
- * First-level exit handler for kernel exceptions
- * Save special registers and the live window frame.
- * Note: Even though we changes the stack pointer, we don't have to do a
- * MOVSP here, as we do that when we return from the exception.
- * (See comment in the kernel exception exit code)
- *
- * Entry condition for kernel_exception:
- *
- * a0: trashed, original value saved on stack (PT_AREG0)
- * a1: a1
- * a2: new stack pointer, original in DEPC
- * a3: a3
- * depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: dispatch table
- *
- * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
- * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
- *
- * Entry condition for _kernel_exception:
- *
- * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
- * excsave has been restored, and
- * stack pointer (a1) has been set.
- *
- * Note: _kernel_exception might be at an odd address. Don't use call0..call12
- */
- ENTRY(kernel_exception)
- /* Save a1, a2, a3, and set SP. */
- rsr a0, depc # get a2
- s32i a1, a2, PT_AREG1
- s32i a0, a2, PT_AREG2
- s32i a3, a2, PT_AREG3
- mov a1, a2
- .globl _kernel_exception
- _kernel_exception:
- /* Save SAR and turn off single stepping */
- movi a2, 0
- rsr a3, sar
- xsr a2, icountlevel
- s32i a3, a1, PT_SAR
- s32i a2, a1, PT_ICOUNTLEVEL
- #if defined(__XTENSA_WINDOWED_ABI__)
- /* Rotate ws so that the current windowbase is at bit0. */
- /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
- rsr a2, windowbase # don't need to save these, we only
- rsr a3, windowstart # need shifted windowstart: windowmask
- ssr a2
- slli a2, a3, 32-WSBITS
- src a2, a3, a2
- srli a2, a2, 32-WSBITS
- s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
- #endif
- /* Save only the live window-frame */
- KABI_W _bbsi.l a2, 1, 1f
- s32i a4, a1, PT_AREG4
- s32i a5, a1, PT_AREG5
- s32i a6, a1, PT_AREG6
- s32i a7, a1, PT_AREG7
- KABI_W _bbsi.l a2, 2, 1f
- s32i a8, a1, PT_AREG8
- s32i a9, a1, PT_AREG9
- s32i a10, a1, PT_AREG10
- s32i a11, a1, PT_AREG11
- KABI_W _bbsi.l a2, 3, 1f
- s32i a12, a1, PT_AREG12
- s32i a13, a1, PT_AREG13
- s32i a14, a1, PT_AREG14
- s32i a15, a1, PT_AREG15
- #ifdef __XTENSA_WINDOWED_ABI__
- _bnei a2, 1, 1f
- /* Copy spill slots of a0 and a1 to imitate movsp
- * in order to keep exception stack continuous
- */
- l32i a3, a1, PT_KERNEL_SIZE
- l32i a0, a1, PT_KERNEL_SIZE + 4
- s32e a3, a1, -16
- s32e a0, a1, -12
- #endif
- 1:
- l32i a0, a1, PT_AREG0 # restore saved a0
- wsr a0, depc
- /*
- * This is the common exception handler.
- * We get here from the user exception handler or simply by falling through
- * from the kernel exception handler.
- * Save the remaining special registers, switch to kernel mode, and jump
- * to the second-level exception handler.
- *
- */
- common_exception:
- /* Save some registers, disable loops and clear the syscall flag. */
- rsr a2, debugcause
- rsr a3, epc1
- s32i a2, a1, PT_DEBUGCAUSE
- s32i a3, a1, PT_PC
- movi a2, NO_SYSCALL
- rsr a3, excvaddr
- s32i a2, a1, PT_SYSCALL
- movi a2, 0
- s32i a3, a1, PT_EXCVADDR
- #if XCHAL_HAVE_LOOPS
- xsr a2, lcount
- s32i a2, a1, PT_LCOUNT
- #endif
- #if XCHAL_HAVE_EXCLUSIVE
- /* Clear exclusive access monitor set by interrupted code */
- clrex
- #endif
- /* It is now save to restore the EXC_TABLE_FIXUP variable. */
- rsr a2, exccause
- movi a3, 0
- rsr a0, excsave1
- s32i a2, a1, PT_EXCCAUSE
- s32i a3, a0, EXC_TABLE_FIXUP
- /* All unrecoverable states are saved on stack, now, and a1 is valid.
- * Now we can allow exceptions again. In case we've got an interrupt
- * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
- * otherwise it's left unchanged.
- *
- * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
- */
- rsr a3, ps
- s32i a3, a1, PT_PS # save ps
- #if XTENSA_FAKE_NMI
- /* Correct PS needs to be saved in the PT_PS:
- * - in case of exception or level-1 interrupt it's in the PS,
- * and is already saved.
- * - in case of medium level interrupt it's in the excsave2.
- */
- movi a0, EXCCAUSE_MAPPED_NMI
- extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
- beq a2, a0, .Lmedium_level_irq
- bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
- beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0
- .Lmedium_level_irq:
- rsr a0, excsave2
- s32i a0, a1, PT_PS # save medium-level interrupt ps
- bgei a3, LOCKLEVEL, .Lexception
- .Llevel1_irq:
- movi a3, LOCKLEVEL
- .Lexception:
- KABI_W movi a0, PS_WOE_MASK
- KABI_W or a3, a3, a0
- #else
- addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
- movi a0, LOCKLEVEL
- extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
- # a3 = PS.INTLEVEL
- moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt
- KABI_W movi a2, PS_WOE_MASK
- KABI_W or a3, a3, a2
- #endif
- /* restore return address (or 0 if return to userspace) */
- rsr a0, depc
- wsr a3, ps
- rsync # PS.WOE => rsync => overflow
- /* Save lbeg, lend */
- #if XCHAL_HAVE_LOOPS
- rsr a4, lbeg
- rsr a3, lend
- s32i a4, a1, PT_LBEG
- s32i a3, a1, PT_LEND
- #endif
- /* Save SCOMPARE1 */
- #if XCHAL_HAVE_S32C1I
- rsr a3, scompare1
- s32i a3, a1, PT_SCOMPARE1
- #endif
- /* Save optional registers. */
- save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
-
- #ifdef CONFIG_TRACE_IRQFLAGS
- rsr abi_tmp0, ps
- extui abi_tmp0, abi_tmp0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
- beqz abi_tmp0, 1f
- abi_call trace_hardirqs_off
- 1:
- #endif
- #ifdef CONFIG_CONTEXT_TRACKING_USER
- l32i abi_tmp0, a1, PT_PS
- bbci.l abi_tmp0, PS_UM_BIT, 1f
- abi_call user_exit_callable
- 1:
- #endif
- /* Go to second-level dispatcher. Set up parameters to pass to the
- * exception handler and call the exception handler.
- */
- l32i abi_arg1, a1, PT_EXCCAUSE # pass EXCCAUSE
- rsr abi_tmp0, excsave1
- addx4 abi_tmp0, abi_arg1, abi_tmp0
- l32i abi_tmp0, abi_tmp0, EXC_TABLE_DEFAULT # load handler
- mov abi_arg0, a1 # pass stack frame
- /* Call the second-level handler */
- abi_callx abi_tmp0
- /* Jump here for exception exit */
- .global common_exception_return
- common_exception_return:
- #if XTENSA_FAKE_NMI
- l32i abi_tmp0, a1, PT_EXCCAUSE
- movi abi_tmp1, EXCCAUSE_MAPPED_NMI
- l32i abi_saved1, a1, PT_PS
- beq abi_tmp0, abi_tmp1, .Lrestore_state
- #endif
- .Ltif_loop:
- irq_save abi_tmp0, abi_tmp1
- #ifdef CONFIG_TRACE_IRQFLAGS
- abi_call trace_hardirqs_off
- #endif
- /* Jump if we are returning from kernel exceptions. */
- l32i abi_saved1, a1, PT_PS
- GET_THREAD_INFO(abi_tmp0, a1)
- l32i abi_saved0, abi_tmp0, TI_FLAGS
- _bbci.l abi_saved1, PS_UM_BIT, .Lexit_tif_loop_kernel
- /* Specific to a user exception exit:
- * We need to check some flags for signal handling and rescheduling,
- * and have to restore WB and WS, extra states, and all registers
- * in the register file that were in use in the user task.
- * Note that we don't disable interrupts here.
- */
- _bbsi.l abi_saved0, TIF_NEED_RESCHED, .Lresched
- movi abi_tmp0, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL
- bnone abi_saved0, abi_tmp0, .Lexit_tif_loop_user
- l32i abi_tmp0, a1, PT_DEPC
- bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
- /* Call do_signal() */
- #ifdef CONFIG_TRACE_IRQFLAGS
- abi_call trace_hardirqs_on
- #endif
- rsil abi_tmp0, 0
- mov abi_arg0, a1
- abi_call do_notify_resume # int do_notify_resume(struct pt_regs*)
- j .Ltif_loop
- .Lresched:
- #ifdef CONFIG_TRACE_IRQFLAGS
- abi_call trace_hardirqs_on
- #endif
- rsil abi_tmp0, 0
- abi_call schedule # void schedule (void)
- j .Ltif_loop
- .Lexit_tif_loop_kernel:
- #ifdef CONFIG_PREEMPTION
- _bbci.l abi_saved0, TIF_NEED_RESCHED, .Lrestore_state
- /* Check current_thread_info->preempt_count */
- l32i abi_tmp1, abi_tmp0, TI_PRE_COUNT
- bnez abi_tmp1, .Lrestore_state
- abi_call preempt_schedule_irq
- #endif
- j .Lrestore_state
- .Lexit_tif_loop_user:
- #ifdef CONFIG_CONTEXT_TRACKING_USER
- abi_call user_enter_callable
- #endif
- #ifdef CONFIG_HAVE_HW_BREAKPOINT
- _bbci.l abi_saved0, TIF_DB_DISABLED, 1f
- abi_call restore_dbreak
- 1:
- #endif
- #ifdef CONFIG_DEBUG_TLB_SANITY
- l32i abi_tmp0, a1, PT_DEPC
- bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
- abi_call check_tlb_sanity
- #endif
- .Lrestore_state:
- #ifdef CONFIG_TRACE_IRQFLAGS
- extui abi_tmp0, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
- bgei abi_tmp0, LOCKLEVEL, 1f
- abi_call trace_hardirqs_on
- 1:
- #endif
- /*
- * Restore optional registers.
- * abi_arg* are used as temporary registers here.
- */
- load_xtregs_opt a1 abi_tmp0 abi_arg0 abi_arg1 abi_arg2 abi_arg3 PT_XTREGS_OPT
- /* Restore SCOMPARE1 */
- #if XCHAL_HAVE_S32C1I
- l32i abi_tmp0, a1, PT_SCOMPARE1
- wsr abi_tmp0, scompare1
- #endif
- wsr abi_saved1, ps /* disable interrupts */
- _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit
- user_exception_exit:
- /* Restore the state of the task and return from the exception. */
- #if defined(USER_SUPPORT_WINDOWED)
- /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
- l32i a2, a1, PT_WINDOWBASE
- l32i a3, a1, PT_WINDOWSTART
- wsr a1, depc # use DEPC as temp storage
- wsr a3, windowstart # restore WINDOWSTART
- ssr a2 # preserve user's WB in the SAR
- wsr a2, windowbase # switch to user's saved WB
- rsync
- rsr a1, depc # restore stack pointer
- l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
- rotw -1 # we restore a4..a7
- _bltui a6, 16, .Lclear_regs # only have to restore current window?
- /* The working registers are a0 and a3. We are restoring to
- * a4..a7. Be careful not to destroy what we have just restored.
- * Note: wmask has the format YYYYM:
- * Y: number of registers saved in groups of 4
- * M: 4 bit mask of first 16 registers
- */
- mov a2, a6
- mov a3, a5
- 1: rotw -1 # a0..a3 become a4..a7
- addi a3, a7, -4*4 # next iteration
- addi a2, a6, -16 # decrementing Y in WMASK
- l32i a4, a3, PT_AREG_END + 0
- l32i a5, a3, PT_AREG_END + 4
- l32i a6, a3, PT_AREG_END + 8
- l32i a7, a3, PT_AREG_END + 12
- _bgeui a2, 16, 1b
- /* Clear unrestored registers (don't leak anything to user-land */
- .Lclear_regs:
- rsr a0, windowbase
- rsr a3, sar
- sub a3, a0, a3
- beqz a3, 2f
- extui a3, a3, 0, WBBITS
- 1: rotw -1
- addi a3, a7, -1
- movi a4, 0
- movi a5, 0
- movi a6, 0
- movi a7, 0
- bgei a3, 1, 1b
- /* We are back were we were when we started.
- * Note: a2 still contains WMASK (if we've returned to the original
- * frame where we had loaded a2), or at least the lower 4 bits
- * (if we have restored WSBITS-1 frames).
- */
- 2:
- #else
- movi a2, 1
- #endif
- #if XCHAL_HAVE_THREADPTR
- l32i a3, a1, PT_THREADPTR
- wur a3, threadptr
- #endif
- j common_exception_exit
- /* This is the kernel exception exit.
- * We avoided to do a MOVSP when we entered the exception, but we
- * have to do it here.
- */
- kernel_exception_exit:
- #if defined(__XTENSA_WINDOWED_ABI__)
- /* Check if we have to do a movsp.
- *
- * We only have to do a movsp if the previous window-frame has
- * been spilled to the *temporary* exception stack instead of the
- * task's stack. This is the case if the corresponding bit in
- * WINDOWSTART for the previous window-frame was set before
- * (not spilled) but is zero now (spilled).
- * If this bit is zero, all other bits except the one for the
- * current window frame are also zero. So, we can use a simple test:
- * 'and' WINDOWSTART and WINDOWSTART-1:
- *
- * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
- *
- * The result is zero only if one bit was set.
- *
- * (Note: We might have gone through several task switches before
- * we come back to the current task, so WINDOWBASE might be
- * different from the time the exception occurred.)
- */
- /* Test WINDOWSTART before and after the exception.
- * We actually have WMASK, so we only have to test if it is 1 or not.
- */
- l32i a2, a1, PT_WMASK
- _beqi a2, 1, common_exception_exit # Spilled before exception,jump
- /* Test WINDOWSTART now. If spilled, do the movsp */
- rsr a3, windowstart
- addi a0, a3, -1
- and a3, a3, a0
- _bnez a3, common_exception_exit
- /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
- addi a0, a1, -16
- l32i a3, a0, 0
- l32i a4, a0, 4
- s32i a3, a1, PT_KERNEL_SIZE + 0
- s32i a4, a1, PT_KERNEL_SIZE + 4
- l32i a3, a0, 8
- l32i a4, a0, 12
- s32i a3, a1, PT_KERNEL_SIZE + 8
- s32i a4, a1, PT_KERNEL_SIZE + 12
- /* Common exception exit.
- * We restore the special register and the current window frame, and
- * return from the exception.
- *
- * Note: We expect a2 to hold PT_WMASK
- */
- #else
- movi a2, 1
- #endif
- common_exception_exit:
- /* Restore address registers. */
- _bbsi.l a2, 1, 1f
- l32i a4, a1, PT_AREG4
- l32i a5, a1, PT_AREG5
- l32i a6, a1, PT_AREG6
- l32i a7, a1, PT_AREG7
- _bbsi.l a2, 2, 1f
- l32i a8, a1, PT_AREG8
- l32i a9, a1, PT_AREG9
- l32i a10, a1, PT_AREG10
- l32i a11, a1, PT_AREG11
- _bbsi.l a2, 3, 1f
- l32i a12, a1, PT_AREG12
- l32i a13, a1, PT_AREG13
- l32i a14, a1, PT_AREG14
- l32i a15, a1, PT_AREG15
- /* Restore PC, SAR */
- 1: l32i a2, a1, PT_PC
- l32i a3, a1, PT_SAR
- wsr a2, epc1
- wsr a3, sar
- /* Restore LBEG, LEND, LCOUNT */
- #if XCHAL_HAVE_LOOPS
- l32i a2, a1, PT_LBEG
- l32i a3, a1, PT_LEND
- wsr a2, lbeg
- l32i a2, a1, PT_LCOUNT
- wsr a3, lend
- wsr a2, lcount
- #endif
- /* We control single stepping through the ICOUNTLEVEL register. */
- l32i a2, a1, PT_ICOUNTLEVEL
- movi a3, -2
- wsr a2, icountlevel
- wsr a3, icount
- /* Check if it was double exception. */
- l32i a0, a1, PT_DEPC
- l32i a3, a1, PT_AREG3
- l32i a2, a1, PT_AREG2
- _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
- /* Restore a0...a3 and return */
- l32i a0, a1, PT_AREG0
- l32i a1, a1, PT_AREG1
- rfe
- 1: wsr a0, depc
- l32i a0, a1, PT_AREG0
- l32i a1, a1, PT_AREG1
- rfde
- ENDPROC(kernel_exception)
- /*
- * Debug exception handler.
- *
- * Currently, we don't support KGDB, so only user application can be debugged.
- *
- * When we get here, a0 is trashed and saved to excsave[debuglevel]
- */
- .literal_position
- ENTRY(debug_exception)
- rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
- bbsi.l a0, PS_EXCM_BIT, .Ldebug_exception_in_exception # exception mode
- /* Set EPC1 and EXCCAUSE */
- wsr a2, depc # save a2 temporarily
- rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
- wsr a2, epc1
- movi a2, EXCCAUSE_MAPPED_DEBUG
- wsr a2, exccause
- /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
- movi a2, 1 << PS_EXCM_BIT
- or a2, a0, a2
- wsr a2, ps
- /* Switch to kernel/user stack, restore jump vector, and save a0 */
- bbsi.l a2, PS_UM_BIT, .Ldebug_exception_user # jump if user mode
- addi a2, a1, -16 - PT_KERNEL_SIZE # assume kernel stack
- .Ldebug_exception_continue:
- l32i a0, a3, DT_DEBUG_SAVE
- s32i a1, a2, PT_AREG1
- s32i a0, a2, PT_AREG0
- movi a0, 0
- s32i a0, a2, PT_DEPC # mark it as a regular exception
- xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
- xsr a0, depc
- s32i a3, a2, PT_AREG3
- s32i a0, a2, PT_AREG2
- mov a1, a2
- /* Debug exception is handled as an exception, so interrupts will
- * likely be enabled in the common exception handler. Disable
- * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM
- * meaning.
- */
- #if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT)
- GET_THREAD_INFO(a2, a1)
- l32i a3, a2, TI_PRE_COUNT
- addi a3, a3, 1
- s32i a3, a2, TI_PRE_COUNT
- #endif
- rsr a2, ps
- bbsi.l a2, PS_UM_BIT, _user_exception
- j _kernel_exception
- .Ldebug_exception_user:
- rsr a2, excsave1
- l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
- j .Ldebug_exception_continue
- .Ldebug_exception_in_exception:
- #ifdef CONFIG_HAVE_HW_BREAKPOINT
- /* Debug exception while in exception mode. This may happen when
- * window overflow/underflow handler or fast exception handler hits
- * data breakpoint, in which case save and disable all data
- * breakpoints, single-step faulting instruction and restore data
- * breakpoints.
- */
- bbci.l a0, PS_UM_BIT, .Ldebug_exception_in_exception # jump if kernel mode
- rsr a0, debugcause
- bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak
- .set _index, 0
- .rept XCHAL_NUM_DBREAK
- l32i a0, a3, DT_DBREAKC_SAVE + _index * 4
- wsr a0, SREG_DBREAKC + _index
- .set _index, _index + 1
- .endr
- l32i a0, a3, DT_ICOUNT_LEVEL_SAVE
- wsr a0, icountlevel
- l32i a0, a3, DT_ICOUNT_SAVE
- xsr a0, icount
- l32i a0, a3, DT_DEBUG_SAVE
- xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
- rfi XCHAL_DEBUGLEVEL
- .Ldebug_save_dbreak:
- .set _index, 0
- .rept XCHAL_NUM_DBREAK
- movi a0, 0
- xsr a0, SREG_DBREAKC + _index
- s32i a0, a3, DT_DBREAKC_SAVE + _index * 4
- .set _index, _index + 1
- .endr
- movi a0, XCHAL_EXCM_LEVEL + 1
- xsr a0, icountlevel
- s32i a0, a3, DT_ICOUNT_LEVEL_SAVE
- movi a0, 0xfffffffe
- xsr a0, icount
- s32i a0, a3, DT_ICOUNT_SAVE
- l32i a0, a3, DT_DEBUG_SAVE
- xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
- rfi XCHAL_DEBUGLEVEL
- #else
- /* Debug exception while in exception mode. Should not happen. */
- j .Ldebug_exception_in_exception // FIXME!!
- #endif
- ENDPROC(debug_exception)
- /*
- * We get here in case of an unrecoverable exception.
- * The only thing we can do is to be nice and print a panic message.
- * We only produce a single stack frame for panic, so ???
- *
- *
- * Entry conditions:
- *
- * - a0 contains the caller address; original value saved in excsave1.
- * - the original a0 contains a valid return address (backtrace) or 0.
- * - a2 contains a valid stackpointer
- *
- * Notes:
- *
- * - If the stack pointer could be invalid, the caller has to setup a
- * dummy stack pointer (e.g. the stack of the init_task)
- *
- * - If the return address could be invalid, the caller has to set it
- * to 0, so the backtrace would stop.
- *
- */
- .align 4
- unrecoverable_text:
- .ascii "Unrecoverable error in exception handler\0"
- .literal_position
- ENTRY(unrecoverable_exception)
- #if XCHAL_HAVE_WINDOWED
- movi a0, 1
- movi a1, 0
- wsr a0, windowstart
- wsr a1, windowbase
- rsync
- #endif
- movi a1, KERNEL_PS_WOE_MASK | LOCKLEVEL
- wsr a1, ps
- rsync
- movi a1, init_task
- movi a0, 0
- addi a1, a1, PT_REGS_OFFSET
- movi abi_arg0, unrecoverable_text
- abi_call panic
- 1: j 1b
- ENDPROC(unrecoverable_exception)
- /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
- __XTENSA_HANDLER
- .literal_position
- #ifdef SUPPORT_WINDOWED
- /*
- * Fast-handler for alloca exceptions
- *
- * The ALLOCA handler is entered when user code executes the MOVSP
- * instruction and the caller's frame is not in the register file.
- *
- * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
- *
- * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
- *
- * It leverages the existing window spill/fill routines and their support for
- * double exceptions. The 'movsp' instruction will only cause an exception if
- * the next window needs to be loaded. In fact this ALLOCA exception may be
- * replaced at some point by changing the hardware to do a underflow exception
- * of the proper size instead.
- *
- * This algorithm simply backs out the register changes started by the user
- * exception handler, makes it appear that we have started a window underflow
- * by rotating the window back and then setting the old window base (OWB) in
- * the 'ps' register with the rolled back window base. The 'movsp' instruction
- * will be re-executed and this time since the next window frames is in the
- * active AR registers it won't cause an exception.
- *
- * If the WindowUnderflow code gets a TLB miss the page will get mapped
- * the partial WindowUnderflow will be handled in the double exception
- * handler.
- *
- * Entry condition:
- *
- * a0: trashed, original value saved on stack (PT_AREG0)
- * a1: a1
- * a2: new stack pointer, original in DEPC
- * a3: a3
- * depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: dispatch table
- *
- * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
- * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
- */
- ENTRY(fast_alloca)
- rsr a0, windowbase
- rotw -1
- rsr a2, ps
- extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
- xor a3, a3, a4
- l32i a4, a6, PT_AREG0
- l32i a1, a6, PT_DEPC
- rsr a6, depc
- wsr a1, depc
- slli a3, a3, PS_OWB_SHIFT
- xor a2, a2, a3
- wsr a2, ps
- rsync
- _bbci.l a4, 31, 4f
- rotw -1
- _bbci.l a8, 30, 8f
- rotw -1
- j _WindowUnderflow12
- 8: j _WindowUnderflow8
- 4: j _WindowUnderflow4
- ENDPROC(fast_alloca)
- #endif
- #ifdef CONFIG_USER_ABI_CALL0_PROBE
- /*
- * fast illegal instruction handler.
- *
- * This is used to fix up user PS.WOE on the exception caused
- * by the first opcode related to register window. If PS.WOE is
- * already set it goes directly to the common user exception handler.
- *
- * Entry condition:
- *
- * a0: trashed, original value saved on stack (PT_AREG0)
- * a1: a1
- * a2: new stack pointer, original in DEPC
- * a3: a3
- * depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: dispatch table
- */
- ENTRY(fast_illegal_instruction_user)
- rsr a0, ps
- bbsi.l a0, PS_WOE_BIT, 1f
- s32i a3, a2, PT_AREG3
- movi a3, PS_WOE_MASK
- or a0, a0, a3
- wsr a0, ps
- #ifdef CONFIG_USER_ABI_CALL0_PROBE
- GET_THREAD_INFO(a3, a2)
- rsr a0, epc1
- s32i a0, a3, TI_PS_WOE_FIX_ADDR
- #endif
- l32i a3, a2, PT_AREG3
- l32i a0, a2, PT_AREG0
- rsr a2, depc
- rfe
- 1:
- call0 user_exception
- ENDPROC(fast_illegal_instruction_user)
- #endif
- /*
- * fast system calls.
- *
- * WARNING: The kernel doesn't save the entire user context before
- * handling a fast system call. These functions are small and short,
- * usually offering some functionality not available to user tasks.
- *
- * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
- *
- * Entry condition:
- *
- * a0: trashed, original value saved on stack (PT_AREG0)
- * a1: a1
- * a2: new stack pointer, original in DEPC
- * a3: a3
- * depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: dispatch table
- */
- ENTRY(fast_syscall_user)
- /* Skip syscall. */
- rsr a0, epc1
- addi a0, a0, 3
- wsr a0, epc1
- l32i a0, a2, PT_DEPC
- bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
- rsr a0, depc # get syscall-nr
- _beqz a0, fast_syscall_spill_registers
- _beqi a0, __NR_xtensa, fast_syscall_xtensa
- call0 user_exception
- ENDPROC(fast_syscall_user)
- ENTRY(fast_syscall_unrecoverable)
- /* Restore all states. */
- l32i a0, a2, PT_AREG0 # restore a0
- xsr a2, depc # restore a2, depc
- wsr a0, excsave1
- call0 unrecoverable_exception
- ENDPROC(fast_syscall_unrecoverable)
- /*
- * sysxtensa syscall handler
- *
- * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
- * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
- * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
- * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
- * a2 a6 a3 a4 a5
- *
- * Entry condition:
- *
- * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0)
- * a1: a1
- * a2: new stack pointer, original in a0 and DEPC
- * a3: a3
- * a4..a15: unchanged
- * depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: dispatch table
- *
- * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
- * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
- *
- * Note: we don't have to save a2; a2 holds the return value
- */
- .literal_position
- #ifdef CONFIG_FAST_SYSCALL_XTENSA
- ENTRY(fast_syscall_xtensa)
- s32i a7, a2, PT_AREG7 # we need an additional register
- movi a7, 4 # sizeof(unsigned int)
- access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
- _bgeui a6, SYS_XTENSA_COUNT, .Lill
- _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
- /* Fall through for ATOMIC_CMP_SWP. */
- .Lswp: /* Atomic compare and swap */
- EX(.Leac) l32i a0, a3, 0 # read old value
- bne a0, a4, 1f # same as old value? jump
- EX(.Leac) s32i a5, a3, 0 # different, modify value
- l32i a7, a2, PT_AREG7 # restore a7
- l32i a0, a2, PT_AREG0 # restore a0
- movi a2, 1 # and return 1
- rfe
- 1: l32i a7, a2, PT_AREG7 # restore a7
- l32i a0, a2, PT_AREG0 # restore a0
- movi a2, 0 # return 0 (note that we cannot set
- rfe
- .Lnswp: /* Atomic set, add, and exg_add. */
- EX(.Leac) l32i a7, a3, 0 # orig
- addi a6, a6, -SYS_XTENSA_ATOMIC_SET
- add a0, a4, a7 # + arg
- moveqz a0, a4, a6 # set
- addi a6, a6, SYS_XTENSA_ATOMIC_SET
- EX(.Leac) s32i a0, a3, 0 # write new value
- mov a0, a2
- mov a2, a7
- l32i a7, a0, PT_AREG7 # restore a7
- l32i a0, a0, PT_AREG0 # restore a0
- rfe
- .Leac: l32i a7, a2, PT_AREG7 # restore a7
- l32i a0, a2, PT_AREG0 # restore a0
- movi a2, -EFAULT
- rfe
- .Lill: l32i a7, a2, PT_AREG7 # restore a7
- l32i a0, a2, PT_AREG0 # restore a0
- movi a2, -EINVAL
- rfe
- ENDPROC(fast_syscall_xtensa)
- #else /* CONFIG_FAST_SYSCALL_XTENSA */
- ENTRY(fast_syscall_xtensa)
- l32i a0, a2, PT_AREG0 # restore a0
- movi a2, -ENOSYS
- rfe
- ENDPROC(fast_syscall_xtensa)
- #endif /* CONFIG_FAST_SYSCALL_XTENSA */
- /* fast_syscall_spill_registers.
- *
- * Entry condition:
- *
- * a0: trashed, original value saved on stack (PT_AREG0)
- * a1: a1
- * a2: new stack pointer, original in DEPC
- * a3: a3
- * depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: dispatch table
- *
- * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
- */
- #if defined(CONFIG_FAST_SYSCALL_SPILL_REGISTERS) && \
- defined(USER_SUPPORT_WINDOWED)
- ENTRY(fast_syscall_spill_registers)
- /* Register a FIXUP handler (pass current wb as a parameter) */
- xsr a3, excsave1
- movi a0, fast_syscall_spill_registers_fixup
- s32i a0, a3, EXC_TABLE_FIXUP
- rsr a0, windowbase
- s32i a0, a3, EXC_TABLE_PARAM
- xsr a3, excsave1 # restore a3 and excsave_1
- /* Save a3, a4 and SAR on stack. */
- rsr a0, sar
- s32i a3, a2, PT_AREG3
- s32i a0, a2, PT_SAR
- /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
- s32i a4, a2, PT_AREG4
- s32i a7, a2, PT_AREG7
- s32i a8, a2, PT_AREG8
- s32i a11, a2, PT_AREG11
- s32i a12, a2, PT_AREG12
- s32i a15, a2, PT_AREG15
- /*
- * Rotate ws so that the current windowbase is at bit 0.
- * Assume ws = xxxwww1yy (www1 current window frame).
- * Rotate ws right so that a4 = yyxxxwww1.
- */
- rsr a0, windowbase
- rsr a3, windowstart # a3 = xxxwww1yy
- ssr a0 # holds WB
- slli a0, a3, WSBITS
- or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy
- srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
- /* We are done if there are no more than the current register frame. */
- extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
- movi a0, (1 << (WSBITS-1))
- _beqz a3, .Lnospill # only one active frame? jump
- /* We want 1 at the top, so that we return to the current windowbase */
- or a3, a3, a0 # 1yyxxxwww
- /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
- wsr a3, windowstart # save shifted windowstart
- neg a0, a3
- and a3, a0, a3 # first bit set from right: 000010000
- ffs_ws a0, a3 # a0: shifts to skip empty frames
- movi a3, WSBITS
- sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right
- ssr a0 # save in SAR for later.
- rsr a3, windowbase
- add a3, a3, a0
- wsr a3, windowbase
- rsync
- rsr a3, windowstart
- srl a3, a3 # shift windowstart
- /* WB is now just one frame below the oldest frame in the register
- window. WS is shifted so the oldest frame is in bit 0, thus, WB
- and WS differ by one 4-register frame. */
- /* Save frames. Depending what call was used (call4, call8, call12),
- * we have to save 4,8. or 12 registers.
- */
- .Lloop: _bbsi.l a3, 1, .Lc4
- _bbci.l a3, 2, .Lc12
- .Lc8: s32e a4, a13, -16
- l32e a4, a5, -12
- s32e a8, a4, -32
- s32e a5, a13, -12
- s32e a6, a13, -8
- s32e a7, a13, -4
- s32e a9, a4, -28
- s32e a10, a4, -24
- s32e a11, a4, -20
- srli a11, a3, 2 # shift windowbase by 2
- rotw 2
- _bnei a3, 1, .Lloop
- j .Lexit
- .Lc4: s32e a4, a9, -16
- s32e a5, a9, -12
- s32e a6, a9, -8
- s32e a7, a9, -4
- srli a7, a3, 1
- rotw 1
- _bnei a3, 1, .Lloop
- j .Lexit
- .Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
- /* 12-register frame (call12) */
- l32e a0, a5, -12
- s32e a8, a0, -48
- mov a8, a0
- s32e a9, a8, -44
- s32e a10, a8, -40
- s32e a11, a8, -36
- s32e a12, a8, -32
- s32e a13, a8, -28
- s32e a14, a8, -24
- s32e a15, a8, -20
- srli a15, a3, 3
- /* The stack pointer for a4..a7 is out of reach, so we rotate the
- * window, grab the stackpointer, and rotate back.
- * Alternatively, we could also use the following approach, but that
- * makes the fixup routine much more complicated:
- * rotw 1
- * s32e a0, a13, -16
- * ...
- * rotw 2
- */
- rotw 1
- mov a4, a13
- rotw -1
- s32e a4, a8, -16
- s32e a5, a8, -12
- s32e a6, a8, -8
- s32e a7, a8, -4
- rotw 3
- _beqi a3, 1, .Lexit
- j .Lloop
- .Lexit:
- /* Done. Do the final rotation and set WS */
- rotw 1
- rsr a3, windowbase
- ssl a3
- movi a3, 1
- sll a3, a3
- wsr a3, windowstart
- .Lnospill:
- /* Advance PC, restore registers and SAR, and return from exception. */
- l32i a3, a2, PT_SAR
- l32i a0, a2, PT_AREG0
- wsr a3, sar
- l32i a3, a2, PT_AREG3
- /* Restore clobbered registers. */
- l32i a4, a2, PT_AREG4
- l32i a7, a2, PT_AREG7
- l32i a8, a2, PT_AREG8
- l32i a11, a2, PT_AREG11
- l32i a12, a2, PT_AREG12
- l32i a15, a2, PT_AREG15
- movi a2, 0
- rfe
- .Linvalid_mask:
- /* We get here because of an unrecoverable error in the window
- * registers, so set up a dummy frame and kill the user application.
- * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
- */
- movi a0, 1
- movi a1, 0
- wsr a0, windowstart
- wsr a1, windowbase
- rsync
- movi a0, 0
- rsr a3, excsave1
- l32i a1, a3, EXC_TABLE_KSTK
- movi a4, KERNEL_PS_WOE_MASK | LOCKLEVEL
- wsr a4, ps
- rsync
- movi abi_arg0, SIGSEGV
- abi_call make_task_dead
- /* shouldn't return, so panic */
- wsr a0, excsave1
- call0 unrecoverable_exception # should not return
- 1: j 1b
- ENDPROC(fast_syscall_spill_registers)
- /* Fixup handler.
- *
- * We get here if the spill routine causes an exception, e.g. tlb miss.
- * We basically restore WINDOWBASE and WINDOWSTART to the condition when
- * we entered the spill routine and jump to the user exception handler.
- *
- * Note that we only need to restore the bits in windowstart that have not
- * been spilled yet by the _spill_register routine. Luckily, a3 contains a
- * rotated windowstart with only those bits set for frames that haven't been
- * spilled yet. Because a3 is rotated such that bit 0 represents the register
- * frame for the current windowbase - 1, we need to rotate a3 left by the
- * value of the current windowbase + 1 and move it to windowstart.
- *
- * a0: value of depc, original value in depc
- * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
- * a3: exctable, original value in excsave1
- */
- ENTRY(fast_syscall_spill_registers_fixup)
- rsr a2, windowbase # get current windowbase (a2 is saved)
- xsr a0, depc # restore depc and a0
- ssl a2 # set shift (32 - WB)
- /* We need to make sure the current registers (a0-a3) are preserved.
- * To do this, we simply set the bit for the current window frame
- * in WS, so that the exception handlers save them to the task stack.
- *
- * Note: we use a3 to set the windowbase, so we take a special care
- * of it, saving it in the original _spill_registers frame across
- * the exception handler call.
- */
- xsr a3, excsave1 # get spill-mask
- slli a3, a3, 1 # shift left by one
- addi a3, a3, 1 # set the bit for the current window frame
- slli a2, a3, 32-WSBITS
- src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
- wsr a2, windowstart # set corrected windowstart
- srli a3, a3, 1
- rsr a2, excsave1
- l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
- xsr a2, excsave1
- s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
- l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
- xsr a2, excsave1
- /* Return to the original (user task) WINDOWBASE.
- * We leave the following frame behind:
- * a0, a1, a2 same
- * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
- * depc: depc (we have to return to that address)
- * excsave_1: exctable
- */
- wsr a3, windowbase
- rsync
- /* We are now in the original frame when we entered _spill_registers:
- * a0: return address
- * a1: used, stack pointer
- * a2: kernel stack pointer
- * a3: available
- * depc: exception address
- * excsave: exctable
- * Note: This frame might be the same as above.
- */
- /* Setup stack pointer. */
- addi a2, a2, -PT_USER_SIZE
- s32i a0, a2, PT_AREG0
- /* Make sure we return to this fixup handler. */
- movi a3, fast_syscall_spill_registers_fixup_return
- s32i a3, a2, PT_DEPC # setup depc
- /* Jump to the exception handler. */
- rsr a3, excsave1
- rsr a0, exccause
- addx4 a0, a0, a3 # find entry in table
- l32i a0, a0, EXC_TABLE_FAST_USER # load handler
- l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
- jx a0
- ENDPROC(fast_syscall_spill_registers_fixup)
- ENTRY(fast_syscall_spill_registers_fixup_return)
- /* When we return here, all registers have been restored (a2: DEPC) */
- wsr a2, depc # exception address
- /* Restore fixup handler. */
- rsr a2, excsave1
- s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
- movi a3, fast_syscall_spill_registers_fixup
- s32i a3, a2, EXC_TABLE_FIXUP
- rsr a3, windowbase
- s32i a3, a2, EXC_TABLE_PARAM
- l32i a2, a2, EXC_TABLE_KSTK
- /* Load WB at the time the exception occurred. */
- rsr a3, sar # WB is still in SAR
- neg a3, a3
- wsr a3, windowbase
- rsync
- rsr a3, excsave1
- l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
- rfde
- ENDPROC(fast_syscall_spill_registers_fixup_return)
- #else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
- ENTRY(fast_syscall_spill_registers)
- l32i a0, a2, PT_AREG0 # restore a0
- movi a2, -ENOSYS
- rfe
- ENDPROC(fast_syscall_spill_registers)
- #endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
- #ifdef CONFIG_MMU
- /*
- * We should never get here. Bail out!
- */
- ENTRY(fast_second_level_miss_double_kernel)
- 1:
- call0 unrecoverable_exception # should not return
- 1: j 1b
- ENDPROC(fast_second_level_miss_double_kernel)
- /* First-level entry handler for user, kernel, and double 2nd-level
- * TLB miss exceptions. Note that for now, user and kernel miss
- * exceptions share the same entry point and are handled identically.
- *
- * An old, less-efficient C version of this function used to exist.
- * We include it below, interleaved as comments, for reference.
- *
- * Entry condition:
- *
- * a0: trashed, original value saved on stack (PT_AREG0)
- * a1: a1
- * a2: new stack pointer, original in DEPC
- * a3: a3
- * depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: dispatch table
- *
- * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
- * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
- */
- ENTRY(fast_second_level_miss)
- /* Save a1 and a3. Note: we don't expect a double exception. */
- s32i a1, a2, PT_AREG1
- s32i a3, a2, PT_AREG3
- /* We need to map the page of PTEs for the user task. Find
- * the pointer to that page. Also, it's possible for tsk->mm
- * to be NULL while tsk->active_mm is nonzero if we faulted on
- * a vmalloc address. In that rare case, we must use
- * active_mm instead to avoid a fault in this handler. See
- *
- * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
- * (or search Internet on "mm vs. active_mm")
- *
- * if (!mm)
- * mm = tsk->active_mm;
- * pgd = pgd_offset (mm, regs->excvaddr);
- * pmd = pmd_offset (pgd, regs->excvaddr);
- * pmdval = *pmd;
- */
- GET_CURRENT(a1,a2)
- l32i a0, a1, TASK_MM # tsk->mm
- beqz a0, .Lfast_second_level_miss_no_mm
- .Lfast_second_level_miss_continue:
- rsr a3, excvaddr # fault address
- _PGD_OFFSET(a0, a3, a1)
- l32i a0, a0, 0 # read pmdval
- beqz a0, .Lfast_second_level_miss_no_pmd
- /* Read ptevaddr and convert to top of page-table page.
- *
- * vpnval = read_ptevaddr_register() & PAGE_MASK;
- * vpnval += DTLB_WAY_PGTABLE;
- * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
- * write_dtlb_entry (pteval, vpnval);
- *
- * The messy computation for 'pteval' above really simplifies
- * into the following:
- *
- * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
- * | PAGE_DIRECTORY
- */
- movi a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
- add a0, a0, a1 # pmdval - PAGE_OFFSET
- extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
- xor a0, a0, a1
- movi a1, _PAGE_DIRECTORY
- or a0, a0, a1 # ... | PAGE_DIRECTORY
- /*
- * We utilize all three wired-ways (7-9) to hold pmd translations.
- * Memory regions are mapped to the DTLBs according to bits 28 and 29.
- * This allows to map the three most common regions to three different
- * DTLBs:
- * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000)
- * 2 -> way 8 shared libaries (2000.0000)
- * 3 -> way 0 stack (3000.0000)
- */
- extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
- rsr a1, ptevaddr
- addx2 a3, a3, a3 # -> 0,3,6,9
- srli a1, a1, PAGE_SHIFT
- extui a3, a3, 2, 2 # -> 0,0,1,2
- slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
- addi a3, a3, DTLB_WAY_PGD
- add a1, a1, a3 # ... + way_number
- .Lfast_second_level_miss_wdtlb:
- wdtlb a0, a1
- dsync
- /* Exit critical section. */
- .Lfast_second_level_miss_skip_wdtlb:
- rsr a3, excsave1
- movi a0, 0
- s32i a0, a3, EXC_TABLE_FIXUP
- /* Restore the working registers, and return. */
- l32i a0, a2, PT_AREG0
- l32i a1, a2, PT_AREG1
- l32i a3, a2, PT_AREG3
- l32i a2, a2, PT_DEPC
- bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
- /* Restore excsave1 and return. */
- rsr a2, depc
- rfe
- /* Return from double exception. */
- 1: xsr a2, depc
- esync
- rfde
- .Lfast_second_level_miss_no_mm:
- l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
- bnez a0, .Lfast_second_level_miss_continue
- /* Even more unlikely case active_mm == 0.
- * We can get here with NMI in the middle of context_switch that
- * touches vmalloc area.
- */
- movi a0, init_mm
- j .Lfast_second_level_miss_continue
- .Lfast_second_level_miss_no_pmd:
- #if (DCACHE_WAY_SIZE > PAGE_SIZE)
- /* Special case for cache aliasing.
- * We (should) only get here if a clear_user_page, copy_user_page
- * or the aliased cache flush functions got preemptively interrupted
- * by another task. Re-establish temporary mapping to the
- * TLBTEMP_BASE areas.
- */
- /* We shouldn't be in a double exception */
- l32i a0, a2, PT_DEPC
- bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lfast_second_level_miss_slow
- /* Make sure the exception originated in the special functions */
- movi a0, __tlbtemp_mapping_start
- rsr a3, epc1
- bltu a3, a0, .Lfast_second_level_miss_slow
- movi a0, __tlbtemp_mapping_end
- bgeu a3, a0, .Lfast_second_level_miss_slow
- /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
- movi a3, TLBTEMP_BASE_1
- rsr a0, excvaddr
- bltu a0, a3, .Lfast_second_level_miss_slow
- addi a1, a0, -TLBTEMP_SIZE
- bgeu a1, a3, .Lfast_second_level_miss_slow
- /* Check if we have to restore an ITLB mapping. */
- movi a1, __tlbtemp_mapping_itlb
- rsr a3, epc1
- sub a3, a3, a1
- /* Calculate VPN */
- movi a1, PAGE_MASK
- and a1, a1, a0
- /* Jump for ITLB entry */
- bgez a3, 1f
- /* We can use up to two TLBTEMP areas, one for src and one for dst. */
- extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
- add a1, a3, a1
- /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
- mov a0, a6
- movnez a0, a7, a3
- j .Lfast_second_level_miss_wdtlb
- /* ITLB entry. We only use dst in a6. */
- 1: witlb a6, a1
- isync
- j .Lfast_second_level_miss_skip_wdtlb
- #endif // DCACHE_WAY_SIZE > PAGE_SIZE
- /* Invalid PGD, default exception handling */
- .Lfast_second_level_miss_slow:
- rsr a1, depc
- s32i a1, a2, PT_AREG2
- mov a1, a2
- rsr a2, ps
- bbsi.l a2, PS_UM_BIT, 1f
- call0 _kernel_exception
- 1: call0 _user_exception
- ENDPROC(fast_second_level_miss)
- /*
- * StoreProhibitedException
- *
- * Update the pte and invalidate the itlb mapping for this pte.
- *
- * Entry condition:
- *
- * a0: trashed, original value saved on stack (PT_AREG0)
- * a1: a1
- * a2: new stack pointer, original in DEPC
- * a3: a3
- * depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: dispatch table
- *
- * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
- * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
- */
- ENTRY(fast_store_prohibited)
- /* Save a1 and a3. */
- s32i a1, a2, PT_AREG1
- s32i a3, a2, PT_AREG3
- GET_CURRENT(a1,a2)
- l32i a0, a1, TASK_MM # tsk->mm
- beqz a0, .Lfast_store_no_mm
- .Lfast_store_continue:
- rsr a1, excvaddr # fault address
- _PGD_OFFSET(a0, a1, a3)
- l32i a0, a0, 0
- beqz a0, .Lfast_store_slow
- /*
- * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
- * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
- */
- _PTE_OFFSET(a0, a1, a3)
- l32i a3, a0, 0 # read pteval
- movi a1, _PAGE_CA_INVALID
- ball a3, a1, .Lfast_store_slow
- bbci.l a3, _PAGE_WRITABLE_BIT, .Lfast_store_slow
- movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
- or a3, a3, a1
- rsr a1, excvaddr
- s32i a3, a0, 0
- /* We need to flush the cache if we have page coloring. */
- #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
- dhwb a0, 0
- #endif
- pdtlb a0, a1
- wdtlb a3, a0
- /* Exit critical section. */
- movi a0, 0
- rsr a3, excsave1
- s32i a0, a3, EXC_TABLE_FIXUP
- /* Restore the working registers, and return. */
- l32i a3, a2, PT_AREG3
- l32i a1, a2, PT_AREG1
- l32i a0, a2, PT_AREG0
- l32i a2, a2, PT_DEPC
- bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
- rsr a2, depc
- rfe
- /* Double exception. Restore FIXUP handler and return. */
- 1: xsr a2, depc
- esync
- rfde
- .Lfast_store_no_mm:
- l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
- j .Lfast_store_continue
- /* If there was a problem, handle fault in C */
- .Lfast_store_slow:
- rsr a1, excvaddr
- pdtlb a0, a1
- bbci.l a0, DTLB_HIT_BIT, 1f
- idtlb a0
- 1:
- rsr a3, depc # still holds a2
- s32i a3, a2, PT_AREG2
- mov a1, a2
- rsr a2, ps
- bbsi.l a2, PS_UM_BIT, 1f
- call0 _kernel_exception
- 1: call0 _user_exception
- ENDPROC(fast_store_prohibited)
- #endif /* CONFIG_MMU */
- .text
- /*
- * System Calls.
- *
- * void system_call (struct pt_regs* regs, int exccause)
- * a2 a3
- */
- .literal_position
- ENTRY(system_call)
- #if defined(__XTENSA_WINDOWED_ABI__)
- abi_entry_default
- #elif defined(__XTENSA_CALL0_ABI__)
- abi_entry(12)
- s32i a0, sp, 0
- s32i abi_saved0, sp, 4
- s32i abi_saved1, sp, 8
- mov abi_saved0, a2
- #else
- #error Unsupported Xtensa ABI
- #endif
- /* regs->syscall = regs->areg[2] */
- l32i a7, abi_saved0, PT_AREG2
- s32i a7, abi_saved0, PT_SYSCALL
- GET_THREAD_INFO(a4, a1)
- l32i abi_saved1, a4, TI_FLAGS
- movi a4, _TIF_WORK_MASK
- and abi_saved1, abi_saved1, a4
- beqz abi_saved1, 1f
- mov abi_arg0, abi_saved0
- abi_call do_syscall_trace_enter
- beqz abi_rv, .Lsyscall_exit
- l32i a7, abi_saved0, PT_SYSCALL
- 1:
- /* syscall = sys_call_table[syscall_nr] */
- movi a4, sys_call_table
- movi a5, __NR_syscalls
- movi abi_rv, -ENOSYS
- bgeu a7, a5, 1f
- addx4 a4, a7, a4
- l32i abi_tmp0, a4, 0
- /* Load args: arg0 - arg5 are passed via regs. */
- l32i abi_arg0, abi_saved0, PT_AREG6
- l32i abi_arg1, abi_saved0, PT_AREG3
- l32i abi_arg2, abi_saved0, PT_AREG4
- l32i abi_arg3, abi_saved0, PT_AREG5
- l32i abi_arg4, abi_saved0, PT_AREG8
- l32i abi_arg5, abi_saved0, PT_AREG9
- abi_callx abi_tmp0
- 1: /* regs->areg[2] = return_value */
- s32i abi_rv, abi_saved0, PT_AREG2
- bnez abi_saved1, 1f
- .Lsyscall_exit:
- #if defined(__XTENSA_WINDOWED_ABI__)
- abi_ret_default
- #elif defined(__XTENSA_CALL0_ABI__)
- l32i a0, sp, 0
- l32i abi_saved0, sp, 4
- l32i abi_saved1, sp, 8
- abi_ret(12)
- #else
- #error Unsupported Xtensa ABI
- #endif
- 1:
- mov abi_arg0, abi_saved0
- abi_call do_syscall_trace_leave
- j .Lsyscall_exit
- ENDPROC(system_call)
- /*
- * Spill live registers on the kernel stack macro.
- *
- * Entry condition: ps.woe is set, ps.excm is cleared
- * Exit condition: windowstart has single bit set
- * May clobber: a12, a13
- */
- .macro spill_registers_kernel
- #if XCHAL_NUM_AREGS > 16
- call12 1f
- _j 2f
- retw
- .align 4
- 1:
- _entry a1, 48
- addi a12, a0, 3
- #if XCHAL_NUM_AREGS > 32
- .rept (XCHAL_NUM_AREGS - 32) / 12
- _entry a1, 48
- mov a12, a0
- .endr
- #endif
- _entry a1, 16
- #if XCHAL_NUM_AREGS % 12 == 0
- mov a8, a8
- #elif XCHAL_NUM_AREGS % 12 == 4
- mov a12, a12
- #elif XCHAL_NUM_AREGS % 12 == 8
- mov a4, a4
- #endif
- retw
- 2:
- #else
- mov a12, a12
- #endif
- .endm
- /*
- * Task switch.
- *
- * struct task* _switch_to (struct task* prev, struct task* next)
- * a2 a2 a3
- */
- ENTRY(_switch_to)
- #if defined(__XTENSA_WINDOWED_ABI__)
- abi_entry(XTENSA_SPILL_STACK_RESERVE)
- #elif defined(__XTENSA_CALL0_ABI__)
- abi_entry(16)
- s32i a12, sp, 0
- s32i a13, sp, 4
- s32i a14, sp, 8
- s32i a15, sp, 12
- #else
- #error Unsupported Xtensa ABI
- #endif
- mov a11, a3 # and 'next' (a3)
- l32i a4, a2, TASK_THREAD_INFO
- l32i a5, a3, TASK_THREAD_INFO
- save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
- #if THREAD_RA > 1020 || THREAD_SP > 1020
- addi a10, a2, TASK_THREAD
- s32i a0, a10, THREAD_RA - TASK_THREAD # save return address
- s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer
- #else
- s32i a0, a2, THREAD_RA # save return address
- s32i a1, a2, THREAD_SP # save stack pointer
- #endif
- #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
- movi a6, __stack_chk_guard
- l32i a8, a3, TASK_STACK_CANARY
- s32i a8, a6, 0
- #endif
- /* Disable ints while we manipulate the stack pointer. */
- irq_save a14, a3
- rsync
- /* Switch CPENABLE */
- #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
- l32i a3, a5, THREAD_CPENABLE
- #ifdef CONFIG_SMP
- beqz a3, 1f
- memw # pairs with memw (2) in fast_coprocessor
- l32i a6, a5, THREAD_CP_OWNER_CPU
- l32i a7, a5, THREAD_CPU
- beq a6, a7, 1f # load 0 into CPENABLE if current CPU is not the owner
- movi a3, 0
- 1:
- #endif
- wsr a3, cpenable
- #endif
- #if XCHAL_HAVE_EXCLUSIVE
- l32i a3, a5, THREAD_ATOMCTL8
- getex a3
- s32i a3, a4, THREAD_ATOMCTL8
- #endif
- /* Flush register file. */
- #if defined(__XTENSA_WINDOWED_ABI__)
- spill_registers_kernel
- #endif
- /* Set kernel stack (and leave critical section)
- * Note: It's save to set it here. The stack will not be overwritten
- * because the kernel stack will only be loaded again after
- * we return from kernel space.
- */
- rsr a3, excsave1 # exc_table
- addi a7, a5, PT_REGS_OFFSET
- s32i a7, a3, EXC_TABLE_KSTK
- /* restore context of the task 'next' */
- l32i a0, a11, THREAD_RA # restore return address
- l32i a1, a11, THREAD_SP # restore stack pointer
- load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
- wsr a14, ps
- rsync
- #if defined(__XTENSA_WINDOWED_ABI__)
- abi_ret(XTENSA_SPILL_STACK_RESERVE)
- #elif defined(__XTENSA_CALL0_ABI__)
- l32i a12, sp, 0
- l32i a13, sp, 4
- l32i a14, sp, 8
- l32i a15, sp, 12
- abi_ret(16)
- #else
- #error Unsupported Xtensa ABI
- #endif
- ENDPROC(_switch_to)
- ENTRY(ret_from_fork)
- /* void schedule_tail (struct task_struct *prev)
- * Note: prev is still in abi_arg0 (return value from fake call frame)
- */
- abi_call schedule_tail
- mov abi_arg0, a1
- abi_call do_syscall_trace_leave
- j common_exception_return
- ENDPROC(ret_from_fork)
- /*
- * Kernel thread creation helper
- * On entry, set up by copy_thread: abi_saved0 = thread_fn,
- * abi_saved1 = thread_fn arg. Left from _switch_to: abi_arg0 = prev
- */
- ENTRY(ret_from_kernel_thread)
- abi_call schedule_tail
- mov abi_arg0, abi_saved1
- abi_callx abi_saved0
- j common_exception_return
- ENDPROC(ret_from_kernel_thread)
- #ifdef CONFIG_HIBERNATION
- .section .bss, "aw"
- .align 4
- .Lsaved_regs:
- #if defined(__XTENSA_WINDOWED_ABI__)
- .fill 2, 4
- #elif defined(__XTENSA_CALL0_ABI__)
- .fill 6, 4
- #else
- #error Unsupported Xtensa ABI
- #endif
- .align XCHAL_NCP_SA_ALIGN
- .Lsaved_user_regs:
- .fill XTREGS_USER_SIZE, 1
- .previous
- ENTRY(swsusp_arch_suspend)
- abi_entry_default
- movi a2, .Lsaved_regs
- movi a3, .Lsaved_user_regs
- s32i a0, a2, 0
- s32i a1, a2, 4
- save_xtregs_user a3 a4 a5 a6 a7 a8 0
- #if defined(__XTENSA_WINDOWED_ABI__)
- spill_registers_kernel
- #elif defined(__XTENSA_CALL0_ABI__)
- s32i a12, a2, 8
- s32i a13, a2, 12
- s32i a14, a2, 16
- s32i a15, a2, 20
- #else
- #error Unsupported Xtensa ABI
- #endif
- abi_call swsusp_save
- mov a2, abi_rv
- abi_ret_default
- ENDPROC(swsusp_arch_suspend)
- ENTRY(swsusp_arch_resume)
- abi_entry_default
- #if defined(__XTENSA_WINDOWED_ABI__)
- spill_registers_kernel
- #endif
- movi a2, restore_pblist
- l32i a2, a2, 0
- .Lcopy_pbe:
- l32i a3, a2, PBE_ADDRESS
- l32i a4, a2, PBE_ORIG_ADDRESS
- __loopi a3, a9, PAGE_SIZE, 16
- l32i a5, a3, 0
- l32i a6, a3, 4
- l32i a7, a3, 8
- l32i a8, a3, 12
- addi a3, a3, 16
- s32i a5, a4, 0
- s32i a6, a4, 4
- s32i a7, a4, 8
- s32i a8, a4, 12
- addi a4, a4, 16
- __endl a3, a9
- l32i a2, a2, PBE_NEXT
- bnez a2, .Lcopy_pbe
- movi a2, .Lsaved_regs
- movi a3, .Lsaved_user_regs
- l32i a0, a2, 0
- l32i a1, a2, 4
- load_xtregs_user a3 a4 a5 a6 a7 a8 0
- #if defined(__XTENSA_CALL0_ABI__)
- l32i a12, a2, 8
- l32i a13, a2, 12
- l32i a14, a2, 16
- l32i a15, a2, 20
- #endif
- movi a2, 0
- abi_ret_default
- ENDPROC(swsusp_arch_resume)
- #endif
|