exceptions-64s.S 88 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * This file contains the 64-bit "server" PowerPC variant
  4. * of the low level exception handling including exception
  5. * vectors, exception return, part of the slb and stab
  6. * handling and other fixed offset specific things.
  7. *
  8. * This file is meant to be #included from head_64.S due to
  9. * position dependent assembly.
  10. *
  11. * Most of this originates from head_64.S and thus has the same
  12. * copyright history.
  13. *
  14. */
  15. #include <asm/hw_irq.h>
  16. #include <asm/exception-64s.h>
  17. #include <asm/ptrace.h>
  18. #include <asm/cpuidle.h>
  19. #include <asm/head-64.h>
  20. #include <asm/feature-fixups.h>
  21. #include <asm/kup.h>
  22. /*
  23. * Following are fixed section helper macros.
  24. *
  25. * EXC_REAL_BEGIN/END - real, unrelocated exception vectors
  26. * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors
  27. * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these)
  28. * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use)
  29. * EXC_COMMON - After switching to virtual, relocated mode.
  30. */
  31. #define EXC_REAL_BEGIN(name, start, size) \
  32. FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
  33. #define EXC_REAL_END(name, start, size) \
  34. FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
  35. #define EXC_VIRT_BEGIN(name, start, size) \
  36. FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
  37. #define EXC_VIRT_END(name, start, size) \
  38. FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
  39. #define EXC_COMMON_BEGIN(name) \
  40. USE_TEXT_SECTION(); \
  41. .balign IFETCH_ALIGN_BYTES; \
  42. .global name; \
  43. _ASM_NOKPROBE_SYMBOL(name); \
  44. DEFINE_FIXED_SYMBOL(name, text); \
  45. name:
  46. #define TRAMP_REAL_BEGIN(name) \
  47. FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name)
  48. #define TRAMP_VIRT_BEGIN(name) \
  49. FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name)
  50. #define EXC_REAL_NONE(start, size) \
  51. FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \
  52. FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size)
  53. #define EXC_VIRT_NONE(start, size) \
  54. FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \
  55. FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size)
  56. /*
  57. * We're short on space and time in the exception prolog, so we can't
  58. * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
  59. * Instead we get the base of the kernel from paca->kernelbase and or in the low
  60. * part of label. This requires that the label be within 64KB of kernelbase, and
  61. * that kernelbase be 64K aligned.
  62. */
  63. #define LOAD_HANDLER(reg, label) \
  64. ld reg,PACAKBASE(r13); /* get high part of &label */ \
  65. ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
  66. #define __LOAD_HANDLER(reg, label, section) \
  67. ld reg,PACAKBASE(r13); \
  68. ori reg,reg,(ABS_ADDR(label, section))@l
  69. /*
  70. * Branches from unrelocated code (e.g., interrupts) to labels outside
  71. * head-y require >64K offsets.
  72. */
  73. #define __LOAD_FAR_HANDLER(reg, label, section) \
  74. ld reg,PACAKBASE(r13); \
  75. ori reg,reg,(ABS_ADDR(label, section))@l; \
  76. addis reg,reg,(ABS_ADDR(label, section))@h
  77. /*
  78. * Interrupt code generation macros
  79. */
  80. #define IVEC .L_IVEC_\name\() /* Interrupt vector address */
  81. #define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */
  82. #define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */
  83. #define IAREA .L_IAREA_\name\() /* PACA save area */
  84. #define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */
  85. #define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */
  86. #define ICFAR .L_ICFAR_\name\() /* Uses CFAR */
  87. #define ICFAR_IF_HVMODE .L_ICFAR_IF_HVMODE_\name\() /* Uses CFAR if HV */
  88. #define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */
  89. #define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */
  90. #define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
  91. #define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
  92. #define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
  93. #define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */
  94. #define __IKVM_REAL(name) .L_IKVM_REAL_ ## name
  95. #define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */
  96. #define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */
  97. #define __ISTACK(name) .L_ISTACK_ ## name
  98. #define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */
  99. #define INT_DEFINE_BEGIN(n) \
  100. .macro int_define_ ## n name
  101. #define INT_DEFINE_END(n) \
  102. .endm ; \
  103. int_define_ ## n n ; \
  104. do_define_int n
  105. .macro do_define_int name
  106. .ifndef IVEC
  107. .error "IVEC not defined"
  108. .endif
  109. .ifndef IHSRR
  110. IHSRR=0
  111. .endif
  112. .ifndef IHSRR_IF_HVMODE
  113. IHSRR_IF_HVMODE=0
  114. .endif
  115. .ifndef IAREA
  116. IAREA=PACA_EXGEN
  117. .endif
  118. .ifndef IVIRT
  119. IVIRT=1
  120. .endif
  121. .ifndef IISIDE
  122. IISIDE=0
  123. .endif
  124. .ifndef ICFAR
  125. ICFAR=1
  126. .endif
  127. .ifndef ICFAR_IF_HVMODE
  128. ICFAR_IF_HVMODE=0
  129. .endif
  130. .ifndef IDAR
  131. IDAR=0
  132. .endif
  133. .ifndef IDSISR
  134. IDSISR=0
  135. .endif
  136. .ifndef IBRANCH_TO_COMMON
  137. IBRANCH_TO_COMMON=1
  138. .endif
  139. .ifndef IREALMODE_COMMON
  140. IREALMODE_COMMON=0
  141. .else
  142. .if ! IBRANCH_TO_COMMON
  143. .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0"
  144. .endif
  145. .endif
  146. .ifndef IMASK
  147. IMASK=0
  148. .endif
  149. .ifndef IKVM_REAL
  150. IKVM_REAL=0
  151. .endif
  152. .ifndef IKVM_VIRT
  153. IKVM_VIRT=0
  154. .endif
  155. .ifndef ISTACK
  156. ISTACK=1
  157. .endif
  158. .ifndef IKUAP
  159. IKUAP=1
  160. .endif
  161. .endm
  162. /*
  163. * All interrupts which set HSRR registers, as well as SRESET and MCE and
  164. * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
  165. * so they all generally need to test whether they were taken in guest context.
  166. *
  167. * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be
  168. * taken with MSR[HV]=0.
  169. *
  170. * Interrupts which set SRR registers (with the above exceptions) do not
  171. * elevate to MSR[HV]=1 mode, though most can be taken when running with
  172. * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do
  173. * not need to test whether a guest is running because they get delivered to
  174. * the guest directly, including nested HV KVM guests.
  175. *
  176. * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host
  177. * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the
  178. * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be
  179. * delivered to the real-mode entry point, therefore such interrupts only test
  180. * KVM in their real mode handlers, and only when PR KVM is possible.
  181. *
  182. * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always
  183. * delivered in real-mode when the MMU is in hash mode because the MMU
  184. * registers are not set appropriately to translate host addresses. In nested
  185. * radix mode these can be delivered in virt-mode as the host translations are
  186. * used implicitly (see: effective LPID, effective PID).
  187. */
  188. /*
  189. * If an interrupt is taken while a guest is running, it is immediately routed
  190. * to KVM to handle.
  191. */
  192. .macro KVMTEST name handler
  193. #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
  194. lbz r10,HSTATE_IN_GUEST(r13)
  195. cmpwi r10,0
  196. /* HSRR variants have the 0x2 bit added to their trap number */
  197. .if IHSRR_IF_HVMODE
  198. BEGIN_FTR_SECTION
  199. li r10,(IVEC + 0x2)
  200. FTR_SECTION_ELSE
  201. li r10,(IVEC)
  202. ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
  203. .elseif IHSRR
  204. li r10,(IVEC + 0x2)
  205. .else
  206. li r10,(IVEC)
  207. .endif
  208. bne \handler
  209. #endif
  210. .endm
  211. /*
  212. * This is the BOOK3S interrupt entry code macro.
  213. *
  214. * This can result in one of several things happening:
  215. * - Branch to the _common handler, relocated, in virtual mode.
  216. * These are normal interrupts (synchronous and asynchronous) handled by
  217. * the kernel.
  218. * - Branch to KVM, relocated but real mode interrupts remain in real mode.
  219. * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by
  220. * / intended for host or guest kernel, but KVM must always be involved
  221. * because the machine state is set for guest execution.
  222. * - Branch to the masked handler, unrelocated.
  223. * These occur when maskable asynchronous interrupts are taken with the
  224. * irq_soft_mask set.
  225. * - Branch to an "early" handler in real mode but relocated.
  226. * This is done if early=1. MCE and HMI use these to handle errors in real
  227. * mode.
  228. * - Fall through and continue executing in real, unrelocated mode.
  229. * This is done if early=2.
  230. */
  231. .macro GEN_BRANCH_TO_COMMON name, virt
  232. .if IREALMODE_COMMON
  233. LOAD_HANDLER(r10, \name\()_common)
  234. mtctr r10
  235. bctr
  236. .else
  237. .if \virt
  238. #ifndef CONFIG_RELOCATABLE
  239. b \name\()_common_virt
  240. #else
  241. LOAD_HANDLER(r10, \name\()_common_virt)
  242. mtctr r10
  243. bctr
  244. #endif
  245. .else
  246. LOAD_HANDLER(r10, \name\()_common_real)
  247. mtctr r10
  248. bctr
  249. .endif
  250. .endif
  251. .endm
  252. .macro GEN_INT_ENTRY name, virt, ool=0
  253. SET_SCRATCH0(r13) /* save r13 */
  254. GET_PACA(r13)
  255. std r9,IAREA+EX_R9(r13) /* save r9 */
  256. BEGIN_FTR_SECTION
  257. mfspr r9,SPRN_PPR
  258. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  259. HMT_MEDIUM
  260. std r10,IAREA+EX_R10(r13) /* save r10 */
  261. .if ICFAR
  262. BEGIN_FTR_SECTION
  263. mfspr r10,SPRN_CFAR
  264. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  265. .elseif ICFAR_IF_HVMODE
  266. BEGIN_FTR_SECTION
  267. BEGIN_FTR_SECTION_NESTED(69)
  268. mfspr r10,SPRN_CFAR
  269. END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69)
  270. FTR_SECTION_ELSE
  271. BEGIN_FTR_SECTION_NESTED(69)
  272. li r10,0
  273. END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69)
  274. ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
  275. .endif
  276. .if \ool
  277. .if !\virt
  278. b tramp_real_\name
  279. .pushsection .text
  280. TRAMP_REAL_BEGIN(tramp_real_\name)
  281. .else
  282. b tramp_virt_\name
  283. .pushsection .text
  284. TRAMP_VIRT_BEGIN(tramp_virt_\name)
  285. .endif
  286. .endif
  287. BEGIN_FTR_SECTION
  288. std r9,IAREA+EX_PPR(r13)
  289. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  290. .if ICFAR || ICFAR_IF_HVMODE
  291. BEGIN_FTR_SECTION
  292. std r10,IAREA+EX_CFAR(r13)
  293. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  294. .endif
  295. INTERRUPT_TO_KERNEL
  296. mfctr r10
  297. std r10,IAREA+EX_CTR(r13)
  298. mfcr r9
  299. std r11,IAREA+EX_R11(r13) /* save r11 - r12 */
  300. std r12,IAREA+EX_R12(r13)
  301. /*
  302. * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI],
  303. * because a d-side MCE will clobber those registers so is
  304. * not recoverable if they are live.
  305. */
  306. GET_SCRATCH0(r10)
  307. std r10,IAREA+EX_R13(r13)
  308. .if IDAR && !IISIDE
  309. .if IHSRR
  310. mfspr r10,SPRN_HDAR
  311. .else
  312. mfspr r10,SPRN_DAR
  313. .endif
  314. std r10,IAREA+EX_DAR(r13)
  315. .endif
  316. .if IDSISR && !IISIDE
  317. .if IHSRR
  318. mfspr r10,SPRN_HDSISR
  319. .else
  320. mfspr r10,SPRN_DSISR
  321. .endif
  322. stw r10,IAREA+EX_DSISR(r13)
  323. .endif
  324. .if IHSRR_IF_HVMODE
  325. BEGIN_FTR_SECTION
  326. mfspr r11,SPRN_HSRR0 /* save HSRR0 */
  327. mfspr r12,SPRN_HSRR1 /* and HSRR1 */
  328. FTR_SECTION_ELSE
  329. mfspr r11,SPRN_SRR0 /* save SRR0 */
  330. mfspr r12,SPRN_SRR1 /* and SRR1 */
  331. ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
  332. .elseif IHSRR
  333. mfspr r11,SPRN_HSRR0 /* save HSRR0 */
  334. mfspr r12,SPRN_HSRR1 /* and HSRR1 */
  335. .else
  336. mfspr r11,SPRN_SRR0 /* save SRR0 */
  337. mfspr r12,SPRN_SRR1 /* and SRR1 */
  338. .endif
  339. .if IBRANCH_TO_COMMON
  340. GEN_BRANCH_TO_COMMON \name \virt
  341. .endif
  342. .if \ool
  343. .popsection
  344. .endif
  345. .endm
  346. /*
  347. * __GEN_COMMON_ENTRY is required to receive the branch from interrupt
  348. * entry, except in the case of the real-mode handlers which require
  349. * __GEN_REALMODE_COMMON_ENTRY.
  350. *
  351. * This switches to virtual mode and sets MSR[RI].
  352. */
  353. .macro __GEN_COMMON_ENTRY name
  354. DEFINE_FIXED_SYMBOL(\name\()_common_real, text)
  355. \name\()_common_real:
  356. .if IKVM_REAL
  357. KVMTEST \name kvm_interrupt
  358. .endif
  359. ld r10,PACAKMSR(r13) /* get MSR value for kernel */
  360. /* MSR[RI] is clear iff using SRR regs */
  361. .if IHSRR_IF_HVMODE
  362. BEGIN_FTR_SECTION
  363. xori r10,r10,MSR_RI
  364. END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
  365. .elseif ! IHSRR
  366. xori r10,r10,MSR_RI
  367. .endif
  368. mtmsrd r10
  369. .if IVIRT
  370. .if IKVM_VIRT
  371. b 1f /* skip the virt test coming from real */
  372. .endif
  373. .balign IFETCH_ALIGN_BYTES
  374. DEFINE_FIXED_SYMBOL(\name\()_common_virt, text)
  375. \name\()_common_virt:
  376. .if IKVM_VIRT
  377. KVMTEST \name kvm_interrupt
  378. 1:
  379. .endif
  380. .endif /* IVIRT */
  381. .endm
  382. /*
  383. * Don't switch to virt mode. Used for early MCE and HMI handlers that
  384. * want to run in real mode.
  385. */
  386. .macro __GEN_REALMODE_COMMON_ENTRY name
  387. DEFINE_FIXED_SYMBOL(\name\()_common_real, text)
  388. \name\()_common_real:
  389. .if IKVM_REAL
  390. KVMTEST \name kvm_interrupt
  391. .endif
  392. .endm
  393. .macro __GEN_COMMON_BODY name
  394. .if IMASK
  395. .if ! ISTACK
  396. .error "No support for masked interrupt to use custom stack"
  397. .endif
  398. /* If coming from user, skip soft-mask tests. */
  399. andi. r10,r12,MSR_PR
  400. bne 3f
  401. /*
  402. * Kernel code running below __end_soft_masked may be
  403. * implicitly soft-masked if it is within the regions
  404. * in the soft mask table.
  405. */
  406. LOAD_HANDLER(r10, __end_soft_masked)
  407. cmpld r11,r10
  408. bge+ 1f
  409. /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */
  410. mtctr r12
  411. stw r9,PACA_EXGEN+EX_CCR(r13)
  412. SEARCH_SOFT_MASK_TABLE
  413. cmpdi r12,0
  414. mfctr r12 /* Restore r12 to SRR1 */
  415. lwz r9,PACA_EXGEN+EX_CCR(r13)
  416. beq 1f /* Not in soft-mask table */
  417. li r10,IMASK
  418. b 2f /* In soft-mask table, always mask */
  419. /* Test the soft mask state against our interrupt's bit */
  420. 1: lbz r10,PACAIRQSOFTMASK(r13)
  421. 2: andi. r10,r10,IMASK
  422. /* Associate vector numbers with bits in paca->irq_happened */
  423. .if IVEC == 0x500 || IVEC == 0xea0
  424. li r10,PACA_IRQ_EE
  425. .elseif IVEC == 0x900
  426. li r10,PACA_IRQ_DEC
  427. .elseif IVEC == 0xa00 || IVEC == 0xe80
  428. li r10,PACA_IRQ_DBELL
  429. .elseif IVEC == 0xe60
  430. li r10,PACA_IRQ_HMI
  431. .elseif IVEC == 0xf00
  432. li r10,PACA_IRQ_PMI
  433. .else
  434. .abort "Bad maskable vector"
  435. .endif
  436. .if IHSRR_IF_HVMODE
  437. BEGIN_FTR_SECTION
  438. bne masked_Hinterrupt
  439. FTR_SECTION_ELSE
  440. bne masked_interrupt
  441. ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
  442. .elseif IHSRR
  443. bne masked_Hinterrupt
  444. .else
  445. bne masked_interrupt
  446. .endif
  447. .endif
  448. .if ISTACK
  449. andi. r10,r12,MSR_PR /* See if coming from user */
  450. 3: mr r10,r1 /* Save r1 */
  451. subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */
  452. beq- 100f
  453. ld r1,PACAKSAVE(r13) /* kernel stack to use */
  454. 100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */
  455. EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
  456. .endif
  457. std r9,_CCR(r1) /* save CR in stackframe */
  458. std r11,_NIP(r1) /* save SRR0 in stackframe */
  459. std r12,_MSR(r1) /* save SRR1 in stackframe */
  460. std r10,0(r1) /* make stack chain pointer */
  461. std r0,GPR0(r1) /* save r0 in stackframe */
  462. std r10,GPR1(r1) /* save r1 in stackframe */
  463. /* Mark our [H]SRRs valid for return */
  464. li r10,1
  465. .if IHSRR_IF_HVMODE
  466. BEGIN_FTR_SECTION
  467. stb r10,PACAHSRR_VALID(r13)
  468. FTR_SECTION_ELSE
  469. stb r10,PACASRR_VALID(r13)
  470. ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
  471. .elseif IHSRR
  472. stb r10,PACAHSRR_VALID(r13)
  473. .else
  474. stb r10,PACASRR_VALID(r13)
  475. .endif
  476. .if ISTACK
  477. .if IKUAP
  478. kuap_save_amr_and_lock r9, r10, cr1, cr0
  479. .endif
  480. beq 101f /* if from kernel mode */
  481. BEGIN_FTR_SECTION
  482. ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */
  483. std r9,_PPR(r1)
  484. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  485. 101:
  486. .else
  487. .if IKUAP
  488. kuap_save_amr_and_lock r9, r10, cr1
  489. .endif
  490. .endif
  491. /* Save original regs values from save area to stack frame. */
  492. ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */
  493. ld r10,IAREA+EX_R10(r13)
  494. std r9,GPR9(r1)
  495. std r10,GPR10(r1)
  496. ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */
  497. ld r10,IAREA+EX_R12(r13)
  498. ld r11,IAREA+EX_R13(r13)
  499. std r9,GPR11(r1)
  500. std r10,GPR12(r1)
  501. std r11,GPR13(r1)
  502. SAVE_NVGPRS(r1)
  503. .if IDAR
  504. .if IISIDE
  505. ld r10,_NIP(r1)
  506. .else
  507. ld r10,IAREA+EX_DAR(r13)
  508. .endif
  509. std r10,_DAR(r1)
  510. .endif
  511. .if IDSISR
  512. .if IISIDE
  513. ld r10,_MSR(r1)
  514. lis r11,DSISR_SRR1_MATCH_64S@h
  515. and r10,r10,r11
  516. .else
  517. lwz r10,IAREA+EX_DSISR(r13)
  518. .endif
  519. std r10,_DSISR(r1)
  520. .endif
  521. BEGIN_FTR_SECTION
  522. .if ICFAR || ICFAR_IF_HVMODE
  523. ld r10,IAREA+EX_CFAR(r13)
  524. .else
  525. li r10,0
  526. .endif
  527. std r10,ORIG_GPR3(r1)
  528. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  529. ld r10,IAREA+EX_CTR(r13)
  530. std r10,_CTR(r1)
  531. std r2,GPR2(r1) /* save r2 in stackframe */
  532. SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */
  533. mflr r9 /* Get LR, later save to stack */
  534. LOAD_PACA_TOC() /* get kernel TOC into r2 */
  535. std r9,_LINK(r1)
  536. lbz r10,PACAIRQSOFTMASK(r13)
  537. mfspr r11,SPRN_XER /* save XER in stackframe */
  538. std r10,SOFTE(r1)
  539. std r11,_XER(r1)
  540. li r9,IVEC
  541. std r9,_TRAP(r1) /* set trap number */
  542. li r10,0
  543. LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
  544. std r10,RESULT(r1) /* clear regs->result */
  545. std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */
  546. .endm
  547. /*
  548. * On entry r13 points to the paca, r9-r13 are saved in the paca,
  549. * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
  550. * SRR1, and relocation is on.
  551. *
  552. * If stack=0, then the stack is already set in r1, and r1 is saved in r10.
  553. * PPR save and CPU accounting is not done for the !stack case (XXX why not?)
  554. */
  555. .macro GEN_COMMON name
  556. __GEN_COMMON_ENTRY \name
  557. __GEN_COMMON_BODY \name
  558. .endm
  559. .macro SEARCH_RESTART_TABLE
  560. #ifdef CONFIG_RELOCATABLE
  561. mr r12,r2
  562. LOAD_PACA_TOC()
  563. LOAD_REG_ADDR(r9, __start___restart_table)
  564. LOAD_REG_ADDR(r10, __stop___restart_table)
  565. mr r2,r12
  566. #else
  567. LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table)
  568. LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table)
  569. #endif
  570. 300:
  571. cmpd r9,r10
  572. beq 302f
  573. ld r12,0(r9)
  574. cmpld r11,r12
  575. blt 301f
  576. ld r12,8(r9)
  577. cmpld r11,r12
  578. bge 301f
  579. ld r12,16(r9)
  580. b 303f
  581. 301:
  582. addi r9,r9,24
  583. b 300b
  584. 302:
  585. li r12,0
  586. 303:
  587. .endm
  588. .macro SEARCH_SOFT_MASK_TABLE
  589. #ifdef CONFIG_RELOCATABLE
  590. mr r12,r2
  591. LOAD_PACA_TOC()
  592. LOAD_REG_ADDR(r9, __start___soft_mask_table)
  593. LOAD_REG_ADDR(r10, __stop___soft_mask_table)
  594. mr r2,r12
  595. #else
  596. LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table)
  597. LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table)
  598. #endif
  599. 300:
  600. cmpd r9,r10
  601. beq 302f
  602. ld r12,0(r9)
  603. cmpld r11,r12
  604. blt 301f
  605. ld r12,8(r9)
  606. cmpld r11,r12
  607. bge 301f
  608. li r12,1
  609. b 303f
  610. 301:
  611. addi r9,r9,16
  612. b 300b
  613. 302:
  614. li r12,0
  615. 303:
  616. .endm
  617. /*
  618. * Restore all registers including H/SRR0/1 saved in a stack frame of a
  619. * standard exception.
  620. */
  621. .macro EXCEPTION_RESTORE_REGS hsrr=0
  622. /* Move original SRR0 and SRR1 into the respective regs */
  623. ld r9,_MSR(r1)
  624. li r10,0
  625. .if \hsrr
  626. mtspr SPRN_HSRR1,r9
  627. stb r10,PACAHSRR_VALID(r13)
  628. .else
  629. mtspr SPRN_SRR1,r9
  630. stb r10,PACASRR_VALID(r13)
  631. .endif
  632. ld r9,_NIP(r1)
  633. .if \hsrr
  634. mtspr SPRN_HSRR0,r9
  635. .else
  636. mtspr SPRN_SRR0,r9
  637. .endif
  638. ld r9,_CTR(r1)
  639. mtctr r9
  640. ld r9,_XER(r1)
  641. mtxer r9
  642. ld r9,_LINK(r1)
  643. mtlr r9
  644. ld r9,_CCR(r1)
  645. mtcr r9
  646. REST_GPRS(2, 13, r1)
  647. REST_GPR(0, r1)
  648. /* restore original r1. */
  649. ld r1,GPR1(r1)
  650. .endm
  651. /*
  652. * EARLY_BOOT_FIXUP - Fix real-mode interrupt with wrong endian in early boot.
  653. *
  654. * There's a short window during boot where although the kernel is running
  655. * little endian, any exceptions will cause the CPU to switch back to big
  656. * endian. For example a WARN() boils down to a trap instruction, which will
  657. * cause a program check, and we end up here but with the CPU in big endian
  658. * mode. The first instruction of the program check handler (in GEN_INT_ENTRY
  659. * below) is an mtsprg, which when executed in the wrong endian is an lhzu with
  660. * a ~3GB displacement from r3. The content of r3 is random, so that is a load
  661. * from some random location, and depending on the system can easily lead to a
  662. * checkstop, or an infinitely recursive page fault.
  663. *
  664. * So to handle that case we have a trampoline here that can detect we are in
  665. * the wrong endian and flip us back to the correct endian. We can't flip
  666. * MSR[LE] using mtmsr, so we have to use rfid. That requires backing up SRR0/1
  667. * as well as a GPR. To do that we use SPRG0/2/3, as SPRG1 is already used for
  668. * the paca. SPRG3 is user readable, but this trampoline is only active very
  669. * early in boot, and SPRG3 will be reinitialised in vdso_getcpu_init() before
  670. * userspace starts.
  671. */
  672. .macro EARLY_BOOT_FIXUP
  673. BEGIN_FTR_SECTION
  674. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  675. tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8
  676. b 2f // Skip trampoline if endian is correct
  677. .long 0xa643707d // mtsprg 0, r11 Backup r11
  678. .long 0xa6027a7d // mfsrr0 r11
  679. .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2
  680. .long 0xa6027b7d // mfsrr1 r11
  681. .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3
  682. .long 0xa600607d // mfmsr r11
  683. .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE]
  684. .long 0xa6037b7d // mtsrr1 r11
  685. /*
  686. * This is 'li r11,1f' where 1f is the absolute address of that
  687. * label, byteswapped into the SI field of the instruction.
  688. */
  689. .long 0x00006039 | \
  690. ((ABS_ADDR(1f, real_vectors) & 0x00ff) << 24) | \
  691. ((ABS_ADDR(1f, real_vectors) & 0xff00) << 8)
  692. .long 0xa6037a7d // mtsrr0 r11
  693. .long 0x2400004c // rfid
  694. 1:
  695. mfsprg r11, 3
  696. mtsrr1 r11 // Restore SRR1
  697. mfsprg r11, 2
  698. mtsrr0 r11 // Restore SRR0
  699. mfsprg r11, 0 // Restore r11
  700. 2:
  701. #endif
  702. /*
  703. * program check could hit at any time, and pseries can not block
  704. * MSR[ME] in early boot. So check if there is anything useful in r13
  705. * yet, and spin forever if not.
  706. */
  707. mtsprg 0, r11
  708. mfcr r11
  709. cmpdi r13, 0
  710. beq .
  711. mtcr r11
  712. mfsprg r11, 0
  713. END_FTR_SECTION(0, 1) // nop out after boot
  714. .endm
  715. /*
  716. * There are a few constraints to be concerned with.
  717. * - Real mode exceptions code/data must be located at their physical location.
  718. * - Virtual mode exceptions must be mapped at their 0xc000... location.
  719. * - Fixed location code must not call directly beyond the __end_interrupts
  720. * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
  721. * must be used.
  722. * - LOAD_HANDLER targets must be within first 64K of physical 0 /
  723. * virtual 0xc00...
  724. * - Conditional branch targets must be within +/-32K of caller.
  725. *
  726. * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
  727. * therefore don't have to run in physically located code or rfid to
  728. * virtual mode kernel code. However on relocatable kernels they do have
  729. * to branch to KERNELBASE offset because the rest of the kernel (outside
  730. * the exception vectors) may be located elsewhere.
  731. *
  732. * Virtual exceptions correspond with physical, except their entry points
  733. * are offset by 0xc000000000000000 and also tend to get an added 0x4000
  734. * offset applied. Virtual exceptions are enabled with the Alternate
  735. * Interrupt Location (AIL) bit set in the LPCR. However this does not
  736. * guarantee they will be delivered virtually. Some conditions (see the ISA)
  737. * cause exceptions to be delivered in real mode.
  738. *
  739. * The scv instructions are a special case. They get a 0x3000 offset applied.
  740. * scv exceptions have unique reentrancy properties, see below.
  741. *
  742. * It's impossible to receive interrupts below 0x300 via AIL.
  743. *
  744. * KVM: None of the virtual exceptions are from the guest. Anything that
  745. * escalated to HV=1 from HV=0 is delivered via real mode handlers.
  746. *
  747. *
  748. * We layout physical memory as follows:
  749. * 0x0000 - 0x00ff : Secondary processor spin code
  750. * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
  751. * 0x1900 - 0x2fff : Real mode trampolines
  752. * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
  753. * 0x5900 - 0x6fff : Relon mode trampolines
  754. * 0x7000 - 0x7fff : FWNMI data area
  755. * 0x8000 - .... : Common interrupt handlers, remaining early
  756. * setup code, rest of kernel.
  757. *
  758. * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
  759. * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
  760. * vectors there.
  761. */
  762. OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900)
  763. OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000)
  764. OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900)
  765. OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000)
  766. #ifdef CONFIG_PPC_POWERNV
  767. .globl start_real_trampolines
  768. .globl end_real_trampolines
  769. .globl start_virt_trampolines
  770. .globl end_virt_trampolines
  771. #endif
  772. #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
  773. /*
  774. * Data area reserved for FWNMI option.
  775. * This address (0x7000) is fixed by the RPA.
  776. * pseries and powernv need to keep the whole page from
  777. * 0x7000 to 0x8000 free for use by the firmware
  778. */
  779. ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000)
  780. OPEN_TEXT_SECTION(0x8000)
  781. #else
  782. OPEN_TEXT_SECTION(0x7000)
  783. #endif
  784. USE_FIXED_SECTION(real_vectors)
  785. /*
  786. * This is the start of the interrupt handlers for pSeries
  787. * This code runs with relocation off.
  788. * Code from here to __end_interrupts gets copied down to real
  789. * address 0x100 when we are running a relocatable kernel.
  790. * Therefore any relative branches in this section must only
  791. * branch to labels in this section.
  792. */
  793. .globl __start_interrupts
  794. __start_interrupts:
  795. /**
  796. * Interrupt 0x3000 - System Call Vectored Interrupt (syscall).
  797. * This is a synchronous interrupt invoked with the "scv" instruction. The
  798. * system call does not alter the HV bit, so it is directed to the OS.
  799. *
  800. * Handling:
  801. * scv instructions enter the kernel without changing EE, RI, ME, or HV.
  802. * In particular, this means we can take a maskable interrupt at any point
  803. * in the scv handler, which is unlike any other interrupt. This is solved
  804. * by treating the instruction addresses in the handler as being soft-masked,
  805. * by adding a SOFT_MASK_TABLE entry for them.
  806. *
  807. * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
  808. * ensure scv is never executed with relocation off, which means AIL-0
  809. * should never happen.
  810. *
  811. * Before leaving the following inside-__end_soft_masked text, at least of the
  812. * following must be true:
  813. * - MSR[PR]=1 (i.e., return to userspace)
  814. * - MSR_EE|MSR_RI is clear (no reentrant exceptions)
  815. * - Standard kernel environment is set up (stack, paca, etc)
  816. *
  817. * KVM:
  818. * These interrupts do not elevate HV 0->1, so HV is not involved. PR KVM
  819. * ensures that FSCR[SCV] is disabled whenever it has to force AIL off.
  820. *
  821. * Call convention:
  822. *
  823. * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
  824. */
  825. EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
  826. /* SCV 0 */
  827. mr r9,r13
  828. GET_PACA(r13)
  829. mflr r11
  830. mfctr r12
  831. li r10,IRQS_ALL_DISABLED
  832. stb r10,PACAIRQSOFTMASK(r13)
  833. #ifdef CONFIG_RELOCATABLE
  834. b system_call_vectored_tramp
  835. #else
  836. b system_call_vectored_common
  837. #endif
  838. nop
  839. /* SCV 1 - 127 */
  840. .rept 127
  841. mr r9,r13
  842. GET_PACA(r13)
  843. mflr r11
  844. mfctr r12
  845. li r10,IRQS_ALL_DISABLED
  846. stb r10,PACAIRQSOFTMASK(r13)
  847. li r0,-1 /* cause failure */
  848. #ifdef CONFIG_RELOCATABLE
  849. b system_call_vectored_sigill_tramp
  850. #else
  851. b system_call_vectored_sigill
  852. #endif
  853. .endr
  854. EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
  855. // Treat scv vectors as soft-masked, see comment above.
  856. // Use absolute values rather than labels here, so they don't get relocated,
  857. // because this code runs unrelocated.
  858. SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)
  859. #ifdef CONFIG_RELOCATABLE
  860. TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
  861. __LOAD_HANDLER(r10, system_call_vectored_common, virt_trampolines)
  862. mtctr r10
  863. bctr
  864. TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp)
  865. __LOAD_HANDLER(r10, system_call_vectored_sigill, virt_trampolines)
  866. mtctr r10
  867. bctr
  868. #endif
  869. /* No virt vectors corresponding with 0x0..0x100 */
  870. EXC_VIRT_NONE(0x4000, 0x100)
  871. /**
  872. * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI).
  873. * This is a non-maskable, asynchronous interrupt always taken in real-mode.
  874. * It is caused by:
  875. * - Wake from power-saving state, on powernv.
  876. * - An NMI from another CPU, triggered by firmware or hypercall.
  877. * - As crash/debug signal injected from BMC, firmware or hypervisor.
  878. *
  879. * Handling:
  880. * Power-save wakeup is the only performance critical path, so this is
  881. * determined quickly as possible first. In this case volatile registers
  882. * can be discarded and SPRs like CFAR don't need to be read.
  883. *
  884. * If not a powersave wakeup, then it's run as a regular interrupt, however
  885. * it uses its own stack and PACA save area to preserve the regular kernel
  886. * environment for debugging.
  887. *
  888. * This interrupt is not maskable, so triggering it when MSR[RI] is clear,
  889. * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely
  890. * correct to switch to virtual mode to run the regular interrupt handler
  891. * because it might be interrupted when the MMU is in a bad state (e.g., SLB
  892. * is clear).
  893. *
  894. * FWNMI:
  895. * PAPR specifies a "fwnmi" facility which sends the sreset to a different
  896. * entry point with a different register set up. Some hypervisors will
  897. * send the sreset to 0x100 in the guest if it is not fwnmi capable.
  898. *
  899. * KVM:
  900. * Unlike most SRR interrupts, this may be taken by the host while executing
  901. * in a guest, so a KVM test is required. KVM will pull the CPU out of guest
  902. * mode and then raise the sreset.
  903. */
  904. INT_DEFINE_BEGIN(system_reset)
  905. IVEC=0x100
  906. IAREA=PACA_EXNMI
  907. IVIRT=0 /* no virt entry point */
  908. ISTACK=0
  909. IKVM_REAL=1
  910. INT_DEFINE_END(system_reset)
  911. EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
  912. #ifdef CONFIG_PPC_P7_NAP
  913. /*
  914. * If running native on arch 2.06 or later, check if we are waking up
  915. * from nap/sleep/winkle, and branch to idle handler. This tests SRR1
  916. * bits 46:47. A non-0 value indicates that we are coming from a power
  917. * saving state. The idle wakeup handler initially runs in real mode,
  918. * but we branch to the 0xc000... address so we can turn on relocation
  919. * with mtmsrd later, after SPRs are restored.
  920. *
  921. * Careful to minimise cost for the fast path (idle wakeup) while
  922. * also avoiding clobbering CFAR for the debug path (non-idle).
  923. *
  924. * For the idle wake case volatile registers can be clobbered, which
  925. * is why we use those initially. If it turns out to not be an idle
  926. * wake, carefully put everything back the way it was, so we can use
  927. * common exception macros to handle it.
  928. */
  929. BEGIN_FTR_SECTION
  930. SET_SCRATCH0(r13)
  931. GET_PACA(r13)
  932. std r3,PACA_EXNMI+0*8(r13)
  933. std r4,PACA_EXNMI+1*8(r13)
  934. std r5,PACA_EXNMI+2*8(r13)
  935. mfspr r3,SPRN_SRR1
  936. mfocrf r4,0x80
  937. rlwinm. r5,r3,47-31,30,31
  938. bne+ system_reset_idle_wake
  939. /* Not powersave wakeup. Restore regs for regular interrupt handler. */
  940. mtocrf 0x80,r4
  941. ld r3,PACA_EXNMI+0*8(r13)
  942. ld r4,PACA_EXNMI+1*8(r13)
  943. ld r5,PACA_EXNMI+2*8(r13)
  944. GET_SCRATCH0(r13)
  945. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
  946. #endif
  947. GEN_INT_ENTRY system_reset, virt=0
  948. /*
  949. * In theory, we should not enable relocation here if it was disabled
  950. * in SRR1, because the MMU may not be configured to support it (e.g.,
  951. * SLB may have been cleared). In practice, there should only be a few
  952. * small windows where that's the case, and sreset is considered to
  953. * be dangerous anyway.
  954. */
  955. EXC_REAL_END(system_reset, 0x100, 0x100)
  956. EXC_VIRT_NONE(0x4100, 0x100)
  957. #ifdef CONFIG_PPC_P7_NAP
  958. TRAMP_REAL_BEGIN(system_reset_idle_wake)
  959. /* We are waking up from idle, so may clobber any volatile register */
  960. cmpwi cr1,r5,2
  961. bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
  962. __LOAD_FAR_HANDLER(r12, DOTSYM(idle_return_gpr_loss), real_trampolines)
  963. mtctr r12
  964. bctr
  965. #endif
  966. #ifdef CONFIG_PPC_PSERIES
  967. /*
  968. * Vectors for the FWNMI option. Share common code.
  969. */
  970. TRAMP_REAL_BEGIN(system_reset_fwnmi)
  971. GEN_INT_ENTRY system_reset, virt=0
  972. #endif /* CONFIG_PPC_PSERIES */
  973. EXC_COMMON_BEGIN(system_reset_common)
  974. __GEN_COMMON_ENTRY system_reset
  975. /*
  976. * Increment paca->in_nmi. When the interrupt entry wrapper later
  977. * enable MSR_RI, then SLB or MCE will be able to recover, but a nested
  978. * NMI will notice in_nmi and not recover because of the use of the NMI
  979. * stack. in_nmi reentrancy is tested in system_reset_exception.
  980. */
  981. lhz r10,PACA_IN_NMI(r13)
  982. addi r10,r10,1
  983. sth r10,PACA_IN_NMI(r13)
  984. mr r10,r1
  985. ld r1,PACA_NMI_EMERG_SP(r13)
  986. subi r1,r1,INT_FRAME_SIZE
  987. __GEN_COMMON_BODY system_reset
  988. addi r3,r1,STACK_FRAME_OVERHEAD
  989. bl system_reset_exception
  990. /* Clear MSR_RI before setting SRR0 and SRR1. */
  991. li r9,0
  992. mtmsrd r9,1
  993. /*
  994. * MSR_RI is clear, now we can decrement paca->in_nmi.
  995. */
  996. lhz r10,PACA_IN_NMI(r13)
  997. subi r10,r10,1
  998. sth r10,PACA_IN_NMI(r13)
  999. kuap_kernel_restore r9, r10
  1000. EXCEPTION_RESTORE_REGS
  1001. RFI_TO_USER_OR_KERNEL
  1002. /**
  1003. * Interrupt 0x200 - Machine Check Interrupt (MCE).
  1004. * This is a non-maskable interrupt always taken in real-mode. It can be
  1005. * synchronous or asynchronous, caused by hardware or software, and it may be
  1006. * taken in a power-saving state.
  1007. *
  1008. * Handling:
  1009. * Similarly to system reset, this uses its own stack and PACA save area,
  1010. * the difference is re-entrancy is allowed on the machine check stack.
  1011. *
  1012. * machine_check_early is run in real mode, and carefully decodes the
  1013. * machine check and tries to handle it (e.g., flush the SLB if there was an
  1014. * error detected there), determines if it was recoverable and logs the
  1015. * event.
  1016. *
  1017. * This early code does not "reconcile" irq soft-mask state like SRESET or
  1018. * regular interrupts do, so irqs_disabled() among other things may not work
  1019. * properly (irq disable/enable already doesn't work because irq tracing can
  1020. * not work in real mode).
  1021. *
  1022. * Then, depending on the execution context when the interrupt is taken, there
  1023. * are 3 main actions:
  1024. * - Executing in kernel mode. The event is queued with irq_work, which means
  1025. * it is handled when it is next safe to do so (i.e., the kernel has enabled
  1026. * interrupts), which could be immediately when the interrupt returns. This
  1027. * avoids nasty issues like switching to virtual mode when the MMU is in a
  1028. * bad state, or when executing OPAL code. (SRESET is exposed to such issues,
  1029. * but it has different priorities). Check to see if the CPU was in power
  1030. * save, and return via the wake up code if it was.
  1031. *
  1032. * - Executing in user mode. machine_check_exception is run like a normal
  1033. * interrupt handler, which processes the data generated by the early handler.
  1034. *
  1035. * - Executing in guest mode. The interrupt is run with its KVM test, and
  1036. * branches to KVM to deal with. KVM may queue the event for the host
  1037. * to report later.
  1038. *
  1039. * This interrupt is not maskable, so if it triggers when MSR[RI] is clear,
  1040. * or SCRATCH0 is in use, it may cause a crash.
  1041. *
  1042. * KVM:
  1043. * See SRESET.
  1044. */
  1045. INT_DEFINE_BEGIN(machine_check_early)
  1046. IVEC=0x200
  1047. IAREA=PACA_EXMC
  1048. IVIRT=0 /* no virt entry point */
  1049. IREALMODE_COMMON=1
  1050. ISTACK=0
  1051. IDAR=1
  1052. IDSISR=1
  1053. IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
  1054. INT_DEFINE_END(machine_check_early)
  1055. INT_DEFINE_BEGIN(machine_check)
  1056. IVEC=0x200
  1057. IAREA=PACA_EXMC
  1058. IVIRT=0 /* no virt entry point */
  1059. IDAR=1
  1060. IDSISR=1
  1061. IKVM_REAL=1
  1062. INT_DEFINE_END(machine_check)
  1063. EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
  1064. EARLY_BOOT_FIXUP
  1065. GEN_INT_ENTRY machine_check_early, virt=0
  1066. EXC_REAL_END(machine_check, 0x200, 0x100)
  1067. EXC_VIRT_NONE(0x4200, 0x100)
  1068. #ifdef CONFIG_PPC_PSERIES
  1069. TRAMP_REAL_BEGIN(machine_check_fwnmi)
  1070. /* See comment at machine_check exception, don't turn on RI */
  1071. GEN_INT_ENTRY machine_check_early, virt=0
  1072. #endif
  1073. #define MACHINE_CHECK_HANDLER_WINDUP \
  1074. /* Clear MSR_RI before setting SRR0 and SRR1. */\
  1075. li r9,0; \
  1076. mtmsrd r9,1; /* Clear MSR_RI */ \
  1077. /* Decrement paca->in_mce now RI is clear. */ \
  1078. lhz r12,PACA_IN_MCE(r13); \
  1079. subi r12,r12,1; \
  1080. sth r12,PACA_IN_MCE(r13); \
  1081. EXCEPTION_RESTORE_REGS
  1082. EXC_COMMON_BEGIN(machine_check_early_common)
  1083. __GEN_REALMODE_COMMON_ENTRY machine_check_early
  1084. /*
  1085. * Switch to mc_emergency stack and handle re-entrancy (we limit
  1086. * the nested MCE upto level 4 to avoid stack overflow).
  1087. * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
  1088. *
  1089. * We use paca->in_mce to check whether this is the first entry or
  1090. * nested machine check. We increment paca->in_mce to track nested
  1091. * machine checks.
  1092. *
  1093. * If this is the first entry then set stack pointer to
  1094. * paca->mc_emergency_sp, otherwise r1 is already pointing to
  1095. * stack frame on mc_emergency stack.
  1096. *
  1097. * NOTE: We are here with MSR_ME=0 (off), which means we risk a
  1098. * checkstop if we get another machine check exception before we do
  1099. * rfid with MSR_ME=1.
  1100. *
  1101. * This interrupt can wake directly from idle. If that is the case,
  1102. * the machine check is handled then the idle wakeup code is called
  1103. * to restore state.
  1104. */
  1105. lhz r10,PACA_IN_MCE(r13)
  1106. cmpwi r10,0 /* Are we in nested machine check */
  1107. cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */
  1108. addi r10,r10,1 /* increment paca->in_mce */
  1109. sth r10,PACA_IN_MCE(r13)
  1110. mr r10,r1 /* Save r1 */
  1111. bne 1f
  1112. /* First machine check entry */
  1113. ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
  1114. 1: /* Limit nested MCE to level 4 to avoid stack overflow */
  1115. bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */
  1116. subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
  1117. __GEN_COMMON_BODY machine_check_early
  1118. BEGIN_FTR_SECTION
  1119. bl enable_machine_check
  1120. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  1121. addi r3,r1,STACK_FRAME_OVERHEAD
  1122. BEGIN_FTR_SECTION
  1123. bl machine_check_early_boot
  1124. END_FTR_SECTION(0, 1) // nop out after boot
  1125. bl machine_check_early
  1126. std r3,RESULT(r1) /* Save result */
  1127. ld r12,_MSR(r1)
  1128. #ifdef CONFIG_PPC_P7_NAP
  1129. /*
  1130. * Check if thread was in power saving mode. We come here when any
  1131. * of the following is true:
  1132. * a. thread wasn't in power saving mode
  1133. * b. thread was in power saving mode with no state loss,
  1134. * supervisor state loss or hypervisor state loss.
  1135. *
  1136. * Go back to nap/sleep/winkle mode again if (b) is true.
  1137. */
  1138. BEGIN_FTR_SECTION
  1139. rlwinm. r11,r12,47-31,30,31
  1140. bne machine_check_idle_common
  1141. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
  1142. #endif
  1143. #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
  1144. /*
  1145. * Check if we are coming from guest. If yes, then run the normal
  1146. * exception handler which will take the
  1147. * machine_check_kvm->kvm_interrupt branch to deliver the MC event
  1148. * to guest.
  1149. */
  1150. lbz r11,HSTATE_IN_GUEST(r13)
  1151. cmpwi r11,0 /* Check if coming from guest */
  1152. bne mce_deliver /* continue if we are. */
  1153. #endif
  1154. /*
  1155. * Check if we are coming from userspace. If yes, then run the normal
  1156. * exception handler which will deliver the MC event to this kernel.
  1157. */
  1158. andi. r11,r12,MSR_PR /* See if coming from user. */
  1159. bne mce_deliver /* continue in V mode if we are. */
  1160. /*
  1161. * At this point we are coming from kernel context.
  1162. * Queue up the MCE event and return from the interrupt.
  1163. * But before that, check if this is an un-recoverable exception.
  1164. * If yes, then stay on emergency stack and panic.
  1165. */
  1166. andi. r11,r12,MSR_RI
  1167. beq unrecoverable_mce
  1168. /*
  1169. * Check if we have successfully handled/recovered from error, if not
  1170. * then stay on emergency stack and panic.
  1171. */
  1172. ld r3,RESULT(r1) /* Load result */
  1173. cmpdi r3,0 /* see if we handled MCE successfully */
  1174. beq unrecoverable_mce /* if !handled then panic */
  1175. /*
  1176. * Return from MC interrupt.
  1177. * Queue up the MCE event so that we can log it later, while
  1178. * returning from kernel or opal call.
  1179. */
  1180. bl machine_check_queue_event
  1181. MACHINE_CHECK_HANDLER_WINDUP
  1182. RFI_TO_KERNEL
  1183. mce_deliver:
  1184. /*
  1185. * This is a host user or guest MCE. Restore all registers, then
  1186. * run the "late" handler. For host user, this will run the
  1187. * machine_check_exception handler in virtual mode like a normal
  1188. * interrupt handler. For guest, this will trigger the KVM test
  1189. * and branch to the KVM interrupt similarly to other interrupts.
  1190. */
  1191. BEGIN_FTR_SECTION
  1192. ld r10,ORIG_GPR3(r1)
  1193. mtspr SPRN_CFAR,r10
  1194. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  1195. MACHINE_CHECK_HANDLER_WINDUP
  1196. GEN_INT_ENTRY machine_check, virt=0
  1197. EXC_COMMON_BEGIN(machine_check_common)
  1198. /*
  1199. * Machine check is different because we use a different
  1200. * save area: PACA_EXMC instead of PACA_EXGEN.
  1201. */
  1202. GEN_COMMON machine_check
  1203. addi r3,r1,STACK_FRAME_OVERHEAD
  1204. bl machine_check_exception_async
  1205. b interrupt_return_srr
  1206. #ifdef CONFIG_PPC_P7_NAP
  1207. /*
  1208. * This is an idle wakeup. Low level machine check has already been
  1209. * done. Queue the event then call the idle code to do the wake up.
  1210. */
  1211. EXC_COMMON_BEGIN(machine_check_idle_common)
  1212. bl machine_check_queue_event
  1213. /*
  1214. * GPR-loss wakeups are relatively straightforward, because the
  1215. * idle sleep code has saved all non-volatile registers on its
  1216. * own stack, and r1 in PACAR1.
  1217. *
  1218. * For no-loss wakeups the r1 and lr registers used by the
  1219. * early machine check handler have to be restored first. r2 is
  1220. * the kernel TOC, so no need to restore it.
  1221. *
  1222. * Then decrement MCE nesting after finishing with the stack.
  1223. */
  1224. ld r3,_MSR(r1)
  1225. ld r4,_LINK(r1)
  1226. ld r1,GPR1(r1)
  1227. lhz r11,PACA_IN_MCE(r13)
  1228. subi r11,r11,1
  1229. sth r11,PACA_IN_MCE(r13)
  1230. mtlr r4
  1231. rlwinm r10,r3,47-31,30,31
  1232. cmpwi cr1,r10,2
  1233. bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
  1234. b idle_return_gpr_loss
  1235. #endif
  1236. EXC_COMMON_BEGIN(unrecoverable_mce)
  1237. /*
  1238. * We are going down. But there are chances that we might get hit by
  1239. * another MCE during panic path and we may run into unstable state
  1240. * with no way out. Hence, turn ME bit off while going down, so that
  1241. * when another MCE is hit during panic path, system will checkstop
  1242. * and hypervisor will get restarted cleanly by SP.
  1243. */
  1244. BEGIN_FTR_SECTION
  1245. li r10,0 /* clear MSR_RI */
  1246. mtmsrd r10,1
  1247. bl disable_machine_check
  1248. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  1249. ld r10,PACAKMSR(r13)
  1250. li r3,MSR_ME
  1251. andc r10,r10,r3
  1252. mtmsrd r10
  1253. lhz r12,PACA_IN_MCE(r13)
  1254. subi r12,r12,1
  1255. sth r12,PACA_IN_MCE(r13)
  1256. /*
  1257. * Invoke machine_check_exception to print MCE event and panic.
  1258. * This is the NMI version of the handler because we are called from
  1259. * the early handler which is a true NMI.
  1260. */
  1261. addi r3,r1,STACK_FRAME_OVERHEAD
  1262. bl machine_check_exception
  1263. /*
  1264. * We will not reach here. Even if we did, there is no way out.
  1265. * Call unrecoverable_exception and die.
  1266. */
  1267. addi r3,r1,STACK_FRAME_OVERHEAD
  1268. bl unrecoverable_exception
  1269. b .
  1270. /**
  1271. * Interrupt 0x300 - Data Storage Interrupt (DSI).
  1272. * This is a synchronous interrupt generated due to a data access exception,
  1273. * e.g., a load orstore which does not have a valid page table entry with
  1274. * permissions. DAWR matches also fault here, as do RC updates, and minor misc
  1275. * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc.
  1276. *
  1277. * Handling:
  1278. * - Hash MMU
  1279. * Go to do_hash_fault, which attempts to fill the HPT from an entry in the
  1280. * Linux page table. Hash faults can hit in kernel mode in a fairly
  1281. * arbitrary state (e.g., interrupts disabled, locks held) when accessing
  1282. * "non-bolted" regions, e.g., vmalloc space. However these should always be
  1283. * backed by Linux page table entries.
  1284. *
  1285. * If no entry is found the Linux page fault handler is invoked (by
  1286. * do_hash_fault). Linux page faults can happen in kernel mode due to user
  1287. * copy operations of course.
  1288. *
  1289. * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
  1290. * MMU context, which may cause a DSI in the host, which must go to the
  1291. * KVM handler. MSR[IR] is not enabled, so the real-mode handler will
  1292. * always be used regardless of AIL setting.
  1293. *
  1294. * - Radix MMU
  1295. * The hardware loads from the Linux page table directly, so a fault goes
  1296. * immediately to Linux page fault.
  1297. *
  1298. * Conditions like DAWR match are handled on the way in to Linux page fault.
  1299. */
  1300. INT_DEFINE_BEGIN(data_access)
  1301. IVEC=0x300
  1302. IDAR=1
  1303. IDSISR=1
  1304. IKVM_REAL=1
  1305. INT_DEFINE_END(data_access)
  1306. EXC_REAL_BEGIN(data_access, 0x300, 0x80)
  1307. GEN_INT_ENTRY data_access, virt=0
  1308. EXC_REAL_END(data_access, 0x300, 0x80)
  1309. EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
  1310. GEN_INT_ENTRY data_access, virt=1
  1311. EXC_VIRT_END(data_access, 0x4300, 0x80)
  1312. EXC_COMMON_BEGIN(data_access_common)
  1313. GEN_COMMON data_access
  1314. ld r4,_DSISR(r1)
  1315. addi r3,r1,STACK_FRAME_OVERHEAD
  1316. andis. r0,r4,DSISR_DABRMATCH@h
  1317. bne- 1f
  1318. #ifdef CONFIG_PPC_64S_HASH_MMU
  1319. BEGIN_MMU_FTR_SECTION
  1320. bl do_hash_fault
  1321. MMU_FTR_SECTION_ELSE
  1322. bl do_page_fault
  1323. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
  1324. #else
  1325. bl do_page_fault
  1326. #endif
  1327. b interrupt_return_srr
  1328. 1: bl do_break
  1329. /*
  1330. * do_break() may have changed the NV GPRS while handling a breakpoint.
  1331. * If so, we need to restore them with their updated values.
  1332. */
  1333. REST_NVGPRS(r1)
  1334. b interrupt_return_srr
  1335. /**
  1336. * Interrupt 0x380 - Data Segment Interrupt (DSLB).
  1337. * This is a synchronous interrupt in response to an MMU fault missing SLB
  1338. * entry for HPT, or an address outside RPT translation range.
  1339. *
  1340. * Handling:
  1341. * - HPT:
  1342. * This refills the SLB, or reports an access fault similarly to a bad page
  1343. * fault. When coming from user-mode, the SLB handler may access any kernel
  1344. * data, though it may itself take a DSLB. When coming from kernel mode,
  1345. * recursive faults must be avoided so access is restricted to the kernel
  1346. * image text/data, kernel stack, and any data allocated below
  1347. * ppc64_bolted_size (first segment). The kernel handler must avoid stomping
  1348. * on user-handler data structures.
  1349. *
  1350. * KVM: Same as 0x300, DSLB must test for KVM guest.
  1351. */
  1352. INT_DEFINE_BEGIN(data_access_slb)
  1353. IVEC=0x380
  1354. IDAR=1
  1355. IKVM_REAL=1
  1356. INT_DEFINE_END(data_access_slb)
  1357. EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
  1358. GEN_INT_ENTRY data_access_slb, virt=0
  1359. EXC_REAL_END(data_access_slb, 0x380, 0x80)
  1360. EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
  1361. GEN_INT_ENTRY data_access_slb, virt=1
  1362. EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
  1363. EXC_COMMON_BEGIN(data_access_slb_common)
  1364. GEN_COMMON data_access_slb
  1365. #ifdef CONFIG_PPC_64S_HASH_MMU
  1366. BEGIN_MMU_FTR_SECTION
  1367. /* HPT case, do SLB fault */
  1368. addi r3,r1,STACK_FRAME_OVERHEAD
  1369. bl do_slb_fault
  1370. cmpdi r3,0
  1371. bne- 1f
  1372. b fast_interrupt_return_srr
  1373. 1: /* Error case */
  1374. MMU_FTR_SECTION_ELSE
  1375. /* Radix case, access is outside page table range */
  1376. li r3,-EFAULT
  1377. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
  1378. #else
  1379. li r3,-EFAULT
  1380. #endif
  1381. std r3,RESULT(r1)
  1382. addi r3,r1,STACK_FRAME_OVERHEAD
  1383. bl do_bad_segment_interrupt
  1384. b interrupt_return_srr
  1385. /**
  1386. * Interrupt 0x400 - Instruction Storage Interrupt (ISI).
  1387. * This is a synchronous interrupt in response to an MMU fault due to an
  1388. * instruction fetch.
  1389. *
  1390. * Handling:
  1391. * Similar to DSI, though in response to fetch. The faulting address is found
  1392. * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR).
  1393. */
  1394. INT_DEFINE_BEGIN(instruction_access)
  1395. IVEC=0x400
  1396. IISIDE=1
  1397. IDAR=1
  1398. IDSISR=1
  1399. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  1400. IKVM_REAL=1
  1401. #endif
  1402. INT_DEFINE_END(instruction_access)
  1403. EXC_REAL_BEGIN(instruction_access, 0x400, 0x80)
  1404. GEN_INT_ENTRY instruction_access, virt=0
  1405. EXC_REAL_END(instruction_access, 0x400, 0x80)
  1406. EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80)
  1407. GEN_INT_ENTRY instruction_access, virt=1
  1408. EXC_VIRT_END(instruction_access, 0x4400, 0x80)
  1409. EXC_COMMON_BEGIN(instruction_access_common)
  1410. GEN_COMMON instruction_access
  1411. addi r3,r1,STACK_FRAME_OVERHEAD
  1412. #ifdef CONFIG_PPC_64S_HASH_MMU
  1413. BEGIN_MMU_FTR_SECTION
  1414. bl do_hash_fault
  1415. MMU_FTR_SECTION_ELSE
  1416. bl do_page_fault
  1417. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
  1418. #else
  1419. bl do_page_fault
  1420. #endif
  1421. b interrupt_return_srr
  1422. /**
  1423. * Interrupt 0x480 - Instruction Segment Interrupt (ISLB).
  1424. * This is a synchronous interrupt in response to an MMU fault due to an
  1425. * instruction fetch.
  1426. *
  1427. * Handling:
  1428. * Similar to DSLB, though in response to fetch. The faulting address is found
  1429. * in SRR0 (rather than DAR).
  1430. */
  1431. INT_DEFINE_BEGIN(instruction_access_slb)
  1432. IVEC=0x480
  1433. IISIDE=1
  1434. IDAR=1
  1435. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  1436. IKVM_REAL=1
  1437. #endif
  1438. INT_DEFINE_END(instruction_access_slb)
  1439. EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
  1440. GEN_INT_ENTRY instruction_access_slb, virt=0
  1441. EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
  1442. EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
  1443. GEN_INT_ENTRY instruction_access_slb, virt=1
  1444. EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
  1445. EXC_COMMON_BEGIN(instruction_access_slb_common)
  1446. GEN_COMMON instruction_access_slb
  1447. #ifdef CONFIG_PPC_64S_HASH_MMU
  1448. BEGIN_MMU_FTR_SECTION
  1449. /* HPT case, do SLB fault */
  1450. addi r3,r1,STACK_FRAME_OVERHEAD
  1451. bl do_slb_fault
  1452. cmpdi r3,0
  1453. bne- 1f
  1454. b fast_interrupt_return_srr
  1455. 1: /* Error case */
  1456. MMU_FTR_SECTION_ELSE
  1457. /* Radix case, access is outside page table range */
  1458. li r3,-EFAULT
  1459. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
  1460. #else
  1461. li r3,-EFAULT
  1462. #endif
  1463. std r3,RESULT(r1)
  1464. addi r3,r1,STACK_FRAME_OVERHEAD
  1465. bl do_bad_segment_interrupt
  1466. b interrupt_return_srr
  1467. /**
  1468. * Interrupt 0x500 - External Interrupt.
  1469. * This is an asynchronous maskable interrupt in response to an "external
  1470. * exception" from the interrupt controller or hypervisor (e.g., device
  1471. * interrupt). It is maskable in hardware by clearing MSR[EE], and
  1472. * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()).
  1473. *
  1474. * When running in HV mode, Linux sets up the LPCR[LPES] bit such that
  1475. * interrupts are delivered with HSRR registers, guests use SRRs, which
  1476. * reqiures IHSRR_IF_HVMODE.
  1477. *
  1478. * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that
  1479. * external interrupts are delivered as Hypervisor Virtualization Interrupts
  1480. * rather than External Interrupts.
  1481. *
  1482. * Handling:
  1483. * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead,
  1484. * because registers at the time of the interrupt are not so important as it is
  1485. * asynchronous.
  1486. *
  1487. * If soft masked, the masked handler will note the pending interrupt for
  1488. * replay, and clear MSR[EE] in the interrupted context.
  1489. *
  1490. * CFAR is not required because this is an asynchronous interrupt that in
  1491. * general won't have much bearing on the state of the CPU, with the possible
  1492. * exception of crash/debug IPIs, but those are generally moving to use SRESET
  1493. * IPIs. Unless this is an HV interrupt and KVM HV is possible, in which case
  1494. * it may be exiting the guest and need CFAR to be saved.
  1495. */
  1496. INT_DEFINE_BEGIN(hardware_interrupt)
  1497. IVEC=0x500
  1498. IHSRR_IF_HVMODE=1
  1499. IMASK=IRQS_DISABLED
  1500. IKVM_REAL=1
  1501. IKVM_VIRT=1
  1502. ICFAR=0
  1503. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  1504. ICFAR_IF_HVMODE=1
  1505. #endif
  1506. INT_DEFINE_END(hardware_interrupt)
  1507. EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
  1508. GEN_INT_ENTRY hardware_interrupt, virt=0
  1509. EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
  1510. EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
  1511. GEN_INT_ENTRY hardware_interrupt, virt=1
  1512. EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
  1513. EXC_COMMON_BEGIN(hardware_interrupt_common)
  1514. GEN_COMMON hardware_interrupt
  1515. addi r3,r1,STACK_FRAME_OVERHEAD
  1516. bl do_IRQ
  1517. BEGIN_FTR_SECTION
  1518. b interrupt_return_hsrr
  1519. FTR_SECTION_ELSE
  1520. b interrupt_return_srr
  1521. ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
  1522. /**
  1523. * Interrupt 0x600 - Alignment Interrupt
  1524. * This is a synchronous interrupt in response to data alignment fault.
  1525. */
  1526. INT_DEFINE_BEGIN(alignment)
  1527. IVEC=0x600
  1528. IDAR=1
  1529. IDSISR=1
  1530. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  1531. IKVM_REAL=1
  1532. #endif
  1533. INT_DEFINE_END(alignment)
  1534. EXC_REAL_BEGIN(alignment, 0x600, 0x100)
  1535. GEN_INT_ENTRY alignment, virt=0
  1536. EXC_REAL_END(alignment, 0x600, 0x100)
  1537. EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
  1538. GEN_INT_ENTRY alignment, virt=1
  1539. EXC_VIRT_END(alignment, 0x4600, 0x100)
  1540. EXC_COMMON_BEGIN(alignment_common)
  1541. GEN_COMMON alignment
  1542. addi r3,r1,STACK_FRAME_OVERHEAD
  1543. bl alignment_exception
  1544. REST_NVGPRS(r1) /* instruction emulation may change GPRs */
  1545. b interrupt_return_srr
  1546. /**
  1547. * Interrupt 0x700 - Program Interrupt (program check).
  1548. * This is a synchronous interrupt in response to various instruction faults:
  1549. * traps, privilege errors, TM errors, floating point exceptions.
  1550. *
  1551. * Handling:
  1552. * This interrupt may use the "emergency stack" in some cases when being taken
  1553. * from kernel context, which complicates handling.
  1554. */
  1555. INT_DEFINE_BEGIN(program_check)
  1556. IVEC=0x700
  1557. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  1558. IKVM_REAL=1
  1559. #endif
  1560. INT_DEFINE_END(program_check)
  1561. EXC_REAL_BEGIN(program_check, 0x700, 0x100)
  1562. EARLY_BOOT_FIXUP
  1563. GEN_INT_ENTRY program_check, virt=0
  1564. EXC_REAL_END(program_check, 0x700, 0x100)
  1565. EXC_VIRT_BEGIN(program_check, 0x4700, 0x100)
  1566. GEN_INT_ENTRY program_check, virt=1
  1567. EXC_VIRT_END(program_check, 0x4700, 0x100)
  1568. EXC_COMMON_BEGIN(program_check_common)
  1569. __GEN_COMMON_ENTRY program_check
  1570. /*
  1571. * It's possible to receive a TM Bad Thing type program check with
  1572. * userspace register values (in particular r1), but with SRR1 reporting
  1573. * that we came from the kernel. Normally that would confuse the bad
  1574. * stack logic, and we would report a bad kernel stack pointer. Instead
  1575. * we switch to the emergency stack if we're taking a TM Bad Thing from
  1576. * the kernel.
  1577. */
  1578. andi. r10,r12,MSR_PR
  1579. bne .Lnormal_stack /* If userspace, go normal path */
  1580. andis. r10,r12,(SRR1_PROGTM)@h
  1581. bne .Lemergency_stack /* If TM, emergency */
  1582. cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
  1583. blt .Lnormal_stack /* normal path if not */
  1584. /* Use the emergency stack */
  1585. .Lemergency_stack:
  1586. andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
  1587. /* 3 in EXCEPTION_PROLOG_COMMON */
  1588. mr r10,r1 /* Save r1 */
  1589. ld r1,PACAEMERGSP(r13) /* Use emergency stack */
  1590. subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
  1591. __ISTACK(program_check)=0
  1592. __GEN_COMMON_BODY program_check
  1593. b .Ldo_program_check
  1594. .Lnormal_stack:
  1595. __ISTACK(program_check)=1
  1596. __GEN_COMMON_BODY program_check
  1597. .Ldo_program_check:
  1598. addi r3,r1,STACK_FRAME_OVERHEAD
  1599. bl program_check_exception
  1600. REST_NVGPRS(r1) /* instruction emulation may change GPRs */
  1601. b interrupt_return_srr
  1602. /*
  1603. * Interrupt 0x800 - Floating-Point Unavailable Interrupt.
  1604. * This is a synchronous interrupt in response to executing an fp instruction
  1605. * with MSR[FP]=0.
  1606. *
  1607. * Handling:
  1608. * This will load FP registers and enable the FP bit if coming from userspace,
  1609. * otherwise report a bad kernel use of FP.
  1610. */
  1611. INT_DEFINE_BEGIN(fp_unavailable)
  1612. IVEC=0x800
  1613. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  1614. IKVM_REAL=1
  1615. #endif
  1616. INT_DEFINE_END(fp_unavailable)
  1617. EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100)
  1618. GEN_INT_ENTRY fp_unavailable, virt=0
  1619. EXC_REAL_END(fp_unavailable, 0x800, 0x100)
  1620. EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100)
  1621. GEN_INT_ENTRY fp_unavailable, virt=1
  1622. EXC_VIRT_END(fp_unavailable, 0x4800, 0x100)
  1623. EXC_COMMON_BEGIN(fp_unavailable_common)
  1624. GEN_COMMON fp_unavailable
  1625. bne 1f /* if from user, just load it up */
  1626. addi r3,r1,STACK_FRAME_OVERHEAD
  1627. bl kernel_fp_unavailable_exception
  1628. 0: trap
  1629. EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
  1630. 1:
  1631. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1632. BEGIN_FTR_SECTION
  1633. /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
  1634. * transaction), go do TM stuff
  1635. */
  1636. rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
  1637. bne- 2f
  1638. END_FTR_SECTION_IFSET(CPU_FTR_TM)
  1639. #endif
  1640. bl load_up_fpu
  1641. b fast_interrupt_return_srr
  1642. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1643. 2: /* User process was in a transaction */
  1644. addi r3,r1,STACK_FRAME_OVERHEAD
  1645. bl fp_unavailable_tm
  1646. b interrupt_return_srr
  1647. #endif
  1648. /**
  1649. * Interrupt 0x900 - Decrementer Interrupt.
  1650. * This is an asynchronous interrupt in response to a decrementer exception
  1651. * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing
  1652. * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e.,
  1653. * local_irq_disable()).
  1654. *
  1655. * Handling:
  1656. * This calls into Linux timer handler. NVGPRs are not saved (see 0x500).
  1657. *
  1658. * If soft masked, the masked handler will note the pending interrupt for
  1659. * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled
  1660. * in the interrupted context.
  1661. * If PPC_WATCHDOG is configured, the soft masked handler will actually set
  1662. * things back up to run soft_nmi_interrupt as a regular interrupt handler
  1663. * on the emergency stack.
  1664. *
  1665. * CFAR is not required because this is asynchronous (see hardware_interrupt).
  1666. * A watchdog interrupt may like to have CFAR, but usually the interesting
  1667. * branch is long gone by that point (e.g., infinite loop).
  1668. */
  1669. INT_DEFINE_BEGIN(decrementer)
  1670. IVEC=0x900
  1671. IMASK=IRQS_DISABLED
  1672. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  1673. IKVM_REAL=1
  1674. #endif
  1675. ICFAR=0
  1676. INT_DEFINE_END(decrementer)
  1677. EXC_REAL_BEGIN(decrementer, 0x900, 0x80)
  1678. GEN_INT_ENTRY decrementer, virt=0
  1679. EXC_REAL_END(decrementer, 0x900, 0x80)
  1680. EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
  1681. GEN_INT_ENTRY decrementer, virt=1
  1682. EXC_VIRT_END(decrementer, 0x4900, 0x80)
  1683. EXC_COMMON_BEGIN(decrementer_common)
  1684. GEN_COMMON decrementer
  1685. addi r3,r1,STACK_FRAME_OVERHEAD
  1686. bl timer_interrupt
  1687. b interrupt_return_srr
  1688. /**
  1689. * Interrupt 0x980 - Hypervisor Decrementer Interrupt.
  1690. * This is an asynchronous interrupt, similar to 0x900 but for the HDEC
  1691. * register.
  1692. *
  1693. * Handling:
  1694. * Linux does not use this outside KVM where it's used to keep a host timer
  1695. * while the guest is given control of DEC. It should normally be caught by
  1696. * the KVM test and routed there.
  1697. */
  1698. INT_DEFINE_BEGIN(hdecrementer)
  1699. IVEC=0x980
  1700. IHSRR=1
  1701. ISTACK=0
  1702. IKVM_REAL=1
  1703. IKVM_VIRT=1
  1704. INT_DEFINE_END(hdecrementer)
  1705. EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80)
  1706. GEN_INT_ENTRY hdecrementer, virt=0
  1707. EXC_REAL_END(hdecrementer, 0x980, 0x80)
  1708. EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80)
  1709. GEN_INT_ENTRY hdecrementer, virt=1
  1710. EXC_VIRT_END(hdecrementer, 0x4980, 0x80)
  1711. EXC_COMMON_BEGIN(hdecrementer_common)
  1712. __GEN_COMMON_ENTRY hdecrementer
  1713. /*
  1714. * Hypervisor decrementer interrupts not caught by the KVM test
  1715. * shouldn't occur but are sometimes left pending on exit from a KVM
  1716. * guest. We don't need to do anything to clear them, as they are
  1717. * edge-triggered.
  1718. *
  1719. * Be careful to avoid touching the kernel stack.
  1720. */
  1721. li r10,0
  1722. stb r10,PACAHSRR_VALID(r13)
  1723. ld r10,PACA_EXGEN+EX_CTR(r13)
  1724. mtctr r10
  1725. mtcrf 0x80,r9
  1726. ld r9,PACA_EXGEN+EX_R9(r13)
  1727. ld r10,PACA_EXGEN+EX_R10(r13)
  1728. ld r11,PACA_EXGEN+EX_R11(r13)
  1729. ld r12,PACA_EXGEN+EX_R12(r13)
  1730. ld r13,PACA_EXGEN+EX_R13(r13)
  1731. HRFI_TO_KERNEL
  1732. /**
  1733. * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt.
  1734. * This is an asynchronous interrupt in response to a msgsndp doorbell.
  1735. * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
  1736. * IRQS_DISABLED mask (i.e., local_irq_disable()).
  1737. *
  1738. * Handling:
  1739. * Guests may use this for IPIs between threads in a core if the
  1740. * hypervisor supports it. NVGPRS are not saved (see 0x500).
  1741. *
  1742. * If soft masked, the masked handler will note the pending interrupt for
  1743. * replay, leaving MSR[EE] enabled in the interrupted context because the
  1744. * doorbells are edge triggered.
  1745. *
  1746. * CFAR is not required, similarly to hardware_interrupt.
  1747. */
  1748. INT_DEFINE_BEGIN(doorbell_super)
  1749. IVEC=0xa00
  1750. IMASK=IRQS_DISABLED
  1751. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  1752. IKVM_REAL=1
  1753. #endif
  1754. ICFAR=0
  1755. INT_DEFINE_END(doorbell_super)
  1756. EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100)
  1757. GEN_INT_ENTRY doorbell_super, virt=0
  1758. EXC_REAL_END(doorbell_super, 0xa00, 0x100)
  1759. EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
  1760. GEN_INT_ENTRY doorbell_super, virt=1
  1761. EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
  1762. EXC_COMMON_BEGIN(doorbell_super_common)
  1763. GEN_COMMON doorbell_super
  1764. addi r3,r1,STACK_FRAME_OVERHEAD
  1765. #ifdef CONFIG_PPC_DOORBELL
  1766. bl doorbell_exception
  1767. #else
  1768. bl unknown_async_exception
  1769. #endif
  1770. b interrupt_return_srr
  1771. EXC_REAL_NONE(0xb00, 0x100)
  1772. EXC_VIRT_NONE(0x4b00, 0x100)
  1773. /**
  1774. * Interrupt 0xc00 - System Call Interrupt (syscall, hcall).
  1775. * This is a synchronous interrupt invoked with the "sc" instruction. The
  1776. * system call is invoked with "sc 0" and does not alter the HV bit, so it
  1777. * is directed to the currently running OS. The hypercall is invoked with
  1778. * "sc 1" and it sets HV=1, so it elevates to hypervisor.
  1779. *
  1780. * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
  1781. * 0x4c00 virtual mode.
  1782. *
  1783. * Handling:
  1784. * If the KVM test fires then it was due to a hypercall and is accordingly
  1785. * routed to KVM. Otherwise this executes a normal Linux system call.
  1786. *
  1787. * Call convention:
  1788. *
  1789. * syscall and hypercalls register conventions are documented in
  1790. * Documentation/powerpc/syscall64-abi.rst and
  1791. * Documentation/powerpc/papr_hcalls.rst respectively.
  1792. *
  1793. * The intersection of volatile registers that don't contain possible
  1794. * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
  1795. * without saving, though xer is not a good idea to use, as hardware may
  1796. * interpret some bits so it may be costly to change them.
  1797. */
  1798. INT_DEFINE_BEGIN(system_call)
  1799. IVEC=0xc00
  1800. IKVM_REAL=1
  1801. IKVM_VIRT=1
  1802. ICFAR=0
  1803. INT_DEFINE_END(system_call)
  1804. .macro SYSTEM_CALL virt
  1805. #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
  1806. /*
  1807. * There is a little bit of juggling to get syscall and hcall
  1808. * working well. Save r13 in ctr to avoid using SPRG scratch
  1809. * register.
  1810. *
  1811. * Userspace syscalls have already saved the PPR, hcalls must save
  1812. * it before setting HMT_MEDIUM.
  1813. */
  1814. mtctr r13
  1815. GET_PACA(r13)
  1816. std r10,PACA_EXGEN+EX_R10(r13)
  1817. INTERRUPT_TO_KERNEL
  1818. KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */
  1819. mfctr r9
  1820. #else
  1821. mr r9,r13
  1822. GET_PACA(r13)
  1823. INTERRUPT_TO_KERNEL
  1824. #endif
  1825. #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
  1826. BEGIN_FTR_SECTION
  1827. cmpdi r0,0x1ebe
  1828. beq- 1f
  1829. END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
  1830. #endif
  1831. /* We reach here with PACA in r13, r13 in r9. */
  1832. mfspr r11,SPRN_SRR0
  1833. mfspr r12,SPRN_SRR1
  1834. HMT_MEDIUM
  1835. .if ! \virt
  1836. __LOAD_HANDLER(r10, system_call_common_real, real_vectors)
  1837. mtctr r10
  1838. bctr
  1839. .else
  1840. #ifdef CONFIG_RELOCATABLE
  1841. __LOAD_HANDLER(r10, system_call_common, virt_vectors)
  1842. mtctr r10
  1843. bctr
  1844. #else
  1845. b system_call_common
  1846. #endif
  1847. .endif
  1848. #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
  1849. /* Fast LE/BE switch system call */
  1850. 1: mfspr r12,SPRN_SRR1
  1851. xori r12,r12,MSR_LE
  1852. mtspr SPRN_SRR1,r12
  1853. mr r13,r9
  1854. RFI_TO_USER /* return to userspace */
  1855. b . /* prevent speculative execution */
  1856. #endif
  1857. .endm
  1858. EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
  1859. SYSTEM_CALL 0
  1860. EXC_REAL_END(system_call, 0xc00, 0x100)
  1861. EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
  1862. SYSTEM_CALL 1
  1863. EXC_VIRT_END(system_call, 0x4c00, 0x100)
  1864. #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
  1865. TRAMP_REAL_BEGIN(kvm_hcall)
  1866. std r9,PACA_EXGEN+EX_R9(r13)
  1867. std r11,PACA_EXGEN+EX_R11(r13)
  1868. std r12,PACA_EXGEN+EX_R12(r13)
  1869. mfcr r9
  1870. mfctr r10
  1871. std r10,PACA_EXGEN+EX_R13(r13)
  1872. li r10,0
  1873. std r10,PACA_EXGEN+EX_CFAR(r13)
  1874. std r10,PACA_EXGEN+EX_CTR(r13)
  1875. /*
  1876. * Save the PPR (on systems that support it) before changing to
  1877. * HMT_MEDIUM. That allows the KVM code to save that value into the
  1878. * guest state (it is the guest's PPR value).
  1879. */
  1880. BEGIN_FTR_SECTION
  1881. mfspr r10,SPRN_PPR
  1882. std r10,PACA_EXGEN+EX_PPR(r13)
  1883. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  1884. HMT_MEDIUM
  1885. #ifdef CONFIG_RELOCATABLE
  1886. /*
  1887. * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives
  1888. * outside the head section.
  1889. */
  1890. __LOAD_FAR_HANDLER(r10, kvmppc_hcall, real_trampolines)
  1891. mtctr r10
  1892. bctr
  1893. #else
  1894. b kvmppc_hcall
  1895. #endif
  1896. #endif
  1897. /**
  1898. * Interrupt 0xd00 - Trace Interrupt.
  1899. * This is a synchronous interrupt in response to instruction step or
  1900. * breakpoint faults.
  1901. */
  1902. INT_DEFINE_BEGIN(single_step)
  1903. IVEC=0xd00
  1904. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  1905. IKVM_REAL=1
  1906. #endif
  1907. INT_DEFINE_END(single_step)
  1908. EXC_REAL_BEGIN(single_step, 0xd00, 0x100)
  1909. GEN_INT_ENTRY single_step, virt=0
  1910. EXC_REAL_END(single_step, 0xd00, 0x100)
  1911. EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100)
  1912. GEN_INT_ENTRY single_step, virt=1
  1913. EXC_VIRT_END(single_step, 0x4d00, 0x100)
  1914. EXC_COMMON_BEGIN(single_step_common)
  1915. GEN_COMMON single_step
  1916. addi r3,r1,STACK_FRAME_OVERHEAD
  1917. bl single_step_exception
  1918. b interrupt_return_srr
  1919. /**
  1920. * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI).
  1921. * This is a synchronous interrupt in response to an MMU fault caused by a
  1922. * guest data access.
  1923. *
  1924. * Handling:
  1925. * This should always get routed to KVM. In radix MMU mode, this is caused
  1926. * by a guest nested radix access that can't be performed due to the
  1927. * partition scope page table. In hash mode, this can be caused by guests
  1928. * running with translation disabled (virtual real mode) or with VPM enabled.
  1929. * KVM will update the page table structures or disallow the access.
  1930. */
  1931. INT_DEFINE_BEGIN(h_data_storage)
  1932. IVEC=0xe00
  1933. IHSRR=1
  1934. IDAR=1
  1935. IDSISR=1
  1936. IKVM_REAL=1
  1937. IKVM_VIRT=1
  1938. INT_DEFINE_END(h_data_storage)
  1939. EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20)
  1940. GEN_INT_ENTRY h_data_storage, virt=0, ool=1
  1941. EXC_REAL_END(h_data_storage, 0xe00, 0x20)
  1942. EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20)
  1943. GEN_INT_ENTRY h_data_storage, virt=1, ool=1
  1944. EXC_VIRT_END(h_data_storage, 0x4e00, 0x20)
  1945. EXC_COMMON_BEGIN(h_data_storage_common)
  1946. GEN_COMMON h_data_storage
  1947. addi r3,r1,STACK_FRAME_OVERHEAD
  1948. BEGIN_MMU_FTR_SECTION
  1949. bl do_bad_page_fault_segv
  1950. MMU_FTR_SECTION_ELSE
  1951. bl unknown_exception
  1952. ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
  1953. b interrupt_return_hsrr
  1954. /**
  1955. * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI).
  1956. * This is a synchronous interrupt in response to an MMU fault caused by a
  1957. * guest instruction fetch, similar to HDSI.
  1958. */
  1959. INT_DEFINE_BEGIN(h_instr_storage)
  1960. IVEC=0xe20
  1961. IHSRR=1
  1962. IKVM_REAL=1
  1963. IKVM_VIRT=1
  1964. INT_DEFINE_END(h_instr_storage)
  1965. EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20)
  1966. GEN_INT_ENTRY h_instr_storage, virt=0, ool=1
  1967. EXC_REAL_END(h_instr_storage, 0xe20, 0x20)
  1968. EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20)
  1969. GEN_INT_ENTRY h_instr_storage, virt=1, ool=1
  1970. EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20)
  1971. EXC_COMMON_BEGIN(h_instr_storage_common)
  1972. GEN_COMMON h_instr_storage
  1973. addi r3,r1,STACK_FRAME_OVERHEAD
  1974. bl unknown_exception
  1975. b interrupt_return_hsrr
  1976. /**
  1977. * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt.
  1978. */
  1979. INT_DEFINE_BEGIN(emulation_assist)
  1980. IVEC=0xe40
  1981. IHSRR=1
  1982. IKVM_REAL=1
  1983. IKVM_VIRT=1
  1984. INT_DEFINE_END(emulation_assist)
  1985. EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20)
  1986. GEN_INT_ENTRY emulation_assist, virt=0, ool=1
  1987. EXC_REAL_END(emulation_assist, 0xe40, 0x20)
  1988. EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20)
  1989. GEN_INT_ENTRY emulation_assist, virt=1, ool=1
  1990. EXC_VIRT_END(emulation_assist, 0x4e40, 0x20)
  1991. EXC_COMMON_BEGIN(emulation_assist_common)
  1992. GEN_COMMON emulation_assist
  1993. addi r3,r1,STACK_FRAME_OVERHEAD
  1994. bl emulation_assist_interrupt
  1995. REST_NVGPRS(r1) /* instruction emulation may change GPRs */
  1996. b interrupt_return_hsrr
  1997. /**
  1998. * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI).
  1999. * This is an asynchronous interrupt caused by a Hypervisor Maintenance
  2000. * Exception. It is always taken in real mode but uses HSRR registers
  2001. * unlike SRESET and MCE.
  2002. *
  2003. * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable
  2004. * with IRQS_DISABLED mask (i.e., local_irq_disable()).
  2005. *
  2006. * Handling:
  2007. * This is a special case, this is handled similarly to machine checks, with an
  2008. * initial real mode handler that is not soft-masked, which attempts to fix the
  2009. * problem. Then a regular handler which is soft-maskable and reports the
  2010. * problem.
  2011. *
  2012. * The emergency stack is used for the early real mode handler.
  2013. *
  2014. * XXX: unclear why MCE and HMI schemes could not be made common, e.g.,
  2015. * either use soft-masking for the MCE, or use irq_work for the HMI.
  2016. *
  2017. * KVM:
  2018. * Unlike MCE, this calls into KVM without calling the real mode handler
  2019. * first.
  2020. */
  2021. INT_DEFINE_BEGIN(hmi_exception_early)
  2022. IVEC=0xe60
  2023. IHSRR=1
  2024. IREALMODE_COMMON=1
  2025. ISTACK=0
  2026. IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
  2027. IKVM_REAL=1
  2028. INT_DEFINE_END(hmi_exception_early)
  2029. INT_DEFINE_BEGIN(hmi_exception)
  2030. IVEC=0xe60
  2031. IHSRR=1
  2032. IMASK=IRQS_DISABLED
  2033. IKVM_REAL=1
  2034. INT_DEFINE_END(hmi_exception)
  2035. EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20)
  2036. GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1
  2037. EXC_REAL_END(hmi_exception, 0xe60, 0x20)
  2038. EXC_VIRT_NONE(0x4e60, 0x20)
  2039. EXC_COMMON_BEGIN(hmi_exception_early_common)
  2040. __GEN_REALMODE_COMMON_ENTRY hmi_exception_early
  2041. mr r10,r1 /* Save r1 */
  2042. ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */
  2043. subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
  2044. __GEN_COMMON_BODY hmi_exception_early
  2045. addi r3,r1,STACK_FRAME_OVERHEAD
  2046. bl hmi_exception_realmode
  2047. cmpdi cr0,r3,0
  2048. bne 1f
  2049. EXCEPTION_RESTORE_REGS hsrr=1
  2050. HRFI_TO_USER_OR_KERNEL
  2051. 1:
  2052. /*
  2053. * Go to virtual mode and pull the HMI event information from
  2054. * firmware.
  2055. */
  2056. EXCEPTION_RESTORE_REGS hsrr=1
  2057. GEN_INT_ENTRY hmi_exception, virt=0
  2058. EXC_COMMON_BEGIN(hmi_exception_common)
  2059. GEN_COMMON hmi_exception
  2060. addi r3,r1,STACK_FRAME_OVERHEAD
  2061. bl handle_hmi_exception
  2062. b interrupt_return_hsrr
  2063. /**
  2064. * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt.
  2065. * This is an asynchronous interrupt in response to a msgsnd doorbell.
  2066. * Similar to the 0xa00 doorbell but for host rather than guest.
  2067. *
  2068. * CFAR is not required (similar to doorbell_interrupt), unless KVM HV
  2069. * is enabled, in which case it may be a guest exit. Most PowerNV kernels
  2070. * include KVM support so it would be nice if this could be dynamically
  2071. * patched out if KVM was not currently running any guests.
  2072. */
  2073. INT_DEFINE_BEGIN(h_doorbell)
  2074. IVEC=0xe80
  2075. IHSRR=1
  2076. IMASK=IRQS_DISABLED
  2077. IKVM_REAL=1
  2078. IKVM_VIRT=1
  2079. #ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  2080. ICFAR=0
  2081. #endif
  2082. INT_DEFINE_END(h_doorbell)
  2083. EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20)
  2084. GEN_INT_ENTRY h_doorbell, virt=0, ool=1
  2085. EXC_REAL_END(h_doorbell, 0xe80, 0x20)
  2086. EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
  2087. GEN_INT_ENTRY h_doorbell, virt=1, ool=1
  2088. EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
  2089. EXC_COMMON_BEGIN(h_doorbell_common)
  2090. GEN_COMMON h_doorbell
  2091. addi r3,r1,STACK_FRAME_OVERHEAD
  2092. #ifdef CONFIG_PPC_DOORBELL
  2093. bl doorbell_exception
  2094. #else
  2095. bl unknown_async_exception
  2096. #endif
  2097. b interrupt_return_hsrr
  2098. /**
  2099. * Interrupt 0xea0 - Hypervisor Virtualization Interrupt.
  2100. * This is an asynchronous interrupt in response to an "external exception".
  2101. * Similar to 0x500 but for host only.
  2102. *
  2103. * Like h_doorbell, CFAR is only required for KVM HV because this can be
  2104. * a guest exit.
  2105. */
  2106. INT_DEFINE_BEGIN(h_virt_irq)
  2107. IVEC=0xea0
  2108. IHSRR=1
  2109. IMASK=IRQS_DISABLED
  2110. IKVM_REAL=1
  2111. IKVM_VIRT=1
  2112. #ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  2113. ICFAR=0
  2114. #endif
  2115. INT_DEFINE_END(h_virt_irq)
  2116. EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20)
  2117. GEN_INT_ENTRY h_virt_irq, virt=0, ool=1
  2118. EXC_REAL_END(h_virt_irq, 0xea0, 0x20)
  2119. EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
  2120. GEN_INT_ENTRY h_virt_irq, virt=1, ool=1
  2121. EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
  2122. EXC_COMMON_BEGIN(h_virt_irq_common)
  2123. GEN_COMMON h_virt_irq
  2124. addi r3,r1,STACK_FRAME_OVERHEAD
  2125. bl do_IRQ
  2126. b interrupt_return_hsrr
  2127. EXC_REAL_NONE(0xec0, 0x20)
  2128. EXC_VIRT_NONE(0x4ec0, 0x20)
  2129. EXC_REAL_NONE(0xee0, 0x20)
  2130. EXC_VIRT_NONE(0x4ee0, 0x20)
  2131. /*
  2132. * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU).
  2133. * This is an asynchronous interrupt in response to a PMU exception.
  2134. * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
  2135. * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()).
  2136. *
  2137. * Handling:
  2138. * This calls into the perf subsystem.
  2139. *
  2140. * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it
  2141. * runs under local_irq_disable. However it may be soft-masked in
  2142. * powerpc-specific code.
  2143. *
  2144. * If soft masked, the masked handler will note the pending interrupt for
  2145. * replay, and clear MSR[EE] in the interrupted context.
  2146. *
  2147. * CFAR is not used by perf interrupts so not required.
  2148. */
  2149. INT_DEFINE_BEGIN(performance_monitor)
  2150. IVEC=0xf00
  2151. IMASK=IRQS_PMI_DISABLED
  2152. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  2153. IKVM_REAL=1
  2154. #endif
  2155. ICFAR=0
  2156. INT_DEFINE_END(performance_monitor)
  2157. EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20)
  2158. GEN_INT_ENTRY performance_monitor, virt=0, ool=1
  2159. EXC_REAL_END(performance_monitor, 0xf00, 0x20)
  2160. EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
  2161. GEN_INT_ENTRY performance_monitor, virt=1, ool=1
  2162. EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
  2163. EXC_COMMON_BEGIN(performance_monitor_common)
  2164. GEN_COMMON performance_monitor
  2165. addi r3,r1,STACK_FRAME_OVERHEAD
  2166. lbz r4,PACAIRQSOFTMASK(r13)
  2167. cmpdi r4,IRQS_ENABLED
  2168. bne 1f
  2169. bl performance_monitor_exception_async
  2170. b interrupt_return_srr
  2171. 1:
  2172. bl performance_monitor_exception_nmi
  2173. /* Clear MSR_RI before setting SRR0 and SRR1. */
  2174. li r9,0
  2175. mtmsrd r9,1
  2176. kuap_kernel_restore r9, r10
  2177. EXCEPTION_RESTORE_REGS hsrr=0
  2178. RFI_TO_KERNEL
  2179. /**
  2180. * Interrupt 0xf20 - Vector Unavailable Interrupt.
  2181. * This is a synchronous interrupt in response to
  2182. * executing a vector (or altivec) instruction with MSR[VEC]=0.
  2183. * Similar to FP unavailable.
  2184. */
  2185. INT_DEFINE_BEGIN(altivec_unavailable)
  2186. IVEC=0xf20
  2187. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  2188. IKVM_REAL=1
  2189. #endif
  2190. INT_DEFINE_END(altivec_unavailable)
  2191. EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20)
  2192. GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1
  2193. EXC_REAL_END(altivec_unavailable, 0xf20, 0x20)
  2194. EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20)
  2195. GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1
  2196. EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20)
  2197. EXC_COMMON_BEGIN(altivec_unavailable_common)
  2198. GEN_COMMON altivec_unavailable
  2199. #ifdef CONFIG_ALTIVEC
  2200. BEGIN_FTR_SECTION
  2201. beq 1f
  2202. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  2203. BEGIN_FTR_SECTION_NESTED(69)
  2204. /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
  2205. * transaction), go do TM stuff
  2206. */
  2207. rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
  2208. bne- 2f
  2209. END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
  2210. #endif
  2211. bl load_up_altivec
  2212. b fast_interrupt_return_srr
  2213. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  2214. 2: /* User process was in a transaction */
  2215. addi r3,r1,STACK_FRAME_OVERHEAD
  2216. bl altivec_unavailable_tm
  2217. b interrupt_return_srr
  2218. #endif
  2219. 1:
  2220. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  2221. #endif
  2222. addi r3,r1,STACK_FRAME_OVERHEAD
  2223. bl altivec_unavailable_exception
  2224. b interrupt_return_srr
  2225. /**
  2226. * Interrupt 0xf40 - VSX Unavailable Interrupt.
  2227. * This is a synchronous interrupt in response to
  2228. * executing a VSX instruction with MSR[VSX]=0.
  2229. * Similar to FP unavailable.
  2230. */
  2231. INT_DEFINE_BEGIN(vsx_unavailable)
  2232. IVEC=0xf40
  2233. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  2234. IKVM_REAL=1
  2235. #endif
  2236. INT_DEFINE_END(vsx_unavailable)
  2237. EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20)
  2238. GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1
  2239. EXC_REAL_END(vsx_unavailable, 0xf40, 0x20)
  2240. EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20)
  2241. GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1
  2242. EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20)
  2243. EXC_COMMON_BEGIN(vsx_unavailable_common)
  2244. GEN_COMMON vsx_unavailable
  2245. #ifdef CONFIG_VSX
  2246. BEGIN_FTR_SECTION
  2247. beq 1f
  2248. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  2249. BEGIN_FTR_SECTION_NESTED(69)
  2250. /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
  2251. * transaction), go do TM stuff
  2252. */
  2253. rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
  2254. bne- 2f
  2255. END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
  2256. #endif
  2257. b load_up_vsx
  2258. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  2259. 2: /* User process was in a transaction */
  2260. addi r3,r1,STACK_FRAME_OVERHEAD
  2261. bl vsx_unavailable_tm
  2262. b interrupt_return_srr
  2263. #endif
  2264. 1:
  2265. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  2266. #endif
  2267. addi r3,r1,STACK_FRAME_OVERHEAD
  2268. bl vsx_unavailable_exception
  2269. b interrupt_return_srr
  2270. /**
  2271. * Interrupt 0xf60 - Facility Unavailable Interrupt.
  2272. * This is a synchronous interrupt in response to
  2273. * executing an instruction without access to the facility that can be
  2274. * resolved by the OS (e.g., FSCR, MSR).
  2275. * Similar to FP unavailable.
  2276. */
  2277. INT_DEFINE_BEGIN(facility_unavailable)
  2278. IVEC=0xf60
  2279. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  2280. IKVM_REAL=1
  2281. #endif
  2282. INT_DEFINE_END(facility_unavailable)
  2283. EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20)
  2284. GEN_INT_ENTRY facility_unavailable, virt=0, ool=1
  2285. EXC_REAL_END(facility_unavailable, 0xf60, 0x20)
  2286. EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20)
  2287. GEN_INT_ENTRY facility_unavailable, virt=1, ool=1
  2288. EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20)
  2289. EXC_COMMON_BEGIN(facility_unavailable_common)
  2290. GEN_COMMON facility_unavailable
  2291. addi r3,r1,STACK_FRAME_OVERHEAD
  2292. bl facility_unavailable_exception
  2293. REST_NVGPRS(r1) /* instruction emulation may change GPRs */
  2294. b interrupt_return_srr
  2295. /**
  2296. * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt.
  2297. * This is a synchronous interrupt in response to
  2298. * executing an instruction without access to the facility that can only
  2299. * be resolved in HV mode (e.g., HFSCR).
  2300. * Similar to FP unavailable.
  2301. */
  2302. INT_DEFINE_BEGIN(h_facility_unavailable)
  2303. IVEC=0xf80
  2304. IHSRR=1
  2305. IKVM_REAL=1
  2306. IKVM_VIRT=1
  2307. INT_DEFINE_END(h_facility_unavailable)
  2308. EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20)
  2309. GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1
  2310. EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20)
  2311. EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20)
  2312. GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1
  2313. EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20)
  2314. EXC_COMMON_BEGIN(h_facility_unavailable_common)
  2315. GEN_COMMON h_facility_unavailable
  2316. addi r3,r1,STACK_FRAME_OVERHEAD
  2317. bl facility_unavailable_exception
  2318. REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
  2319. b interrupt_return_hsrr
  2320. EXC_REAL_NONE(0xfa0, 0x20)
  2321. EXC_VIRT_NONE(0x4fa0, 0x20)
  2322. EXC_REAL_NONE(0xfc0, 0x20)
  2323. EXC_VIRT_NONE(0x4fc0, 0x20)
  2324. EXC_REAL_NONE(0xfe0, 0x20)
  2325. EXC_VIRT_NONE(0x4fe0, 0x20)
  2326. EXC_REAL_NONE(0x1000, 0x100)
  2327. EXC_VIRT_NONE(0x5000, 0x100)
  2328. EXC_REAL_NONE(0x1100, 0x100)
  2329. EXC_VIRT_NONE(0x5100, 0x100)
  2330. #ifdef CONFIG_CBE_RAS
  2331. INT_DEFINE_BEGIN(cbe_system_error)
  2332. IVEC=0x1200
  2333. IHSRR=1
  2334. INT_DEFINE_END(cbe_system_error)
  2335. EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100)
  2336. GEN_INT_ENTRY cbe_system_error, virt=0
  2337. EXC_REAL_END(cbe_system_error, 0x1200, 0x100)
  2338. EXC_VIRT_NONE(0x5200, 0x100)
  2339. EXC_COMMON_BEGIN(cbe_system_error_common)
  2340. GEN_COMMON cbe_system_error
  2341. addi r3,r1,STACK_FRAME_OVERHEAD
  2342. bl cbe_system_error_exception
  2343. b interrupt_return_hsrr
  2344. #else /* CONFIG_CBE_RAS */
  2345. EXC_REAL_NONE(0x1200, 0x100)
  2346. EXC_VIRT_NONE(0x5200, 0x100)
  2347. #endif
  2348. /**
  2349. * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt.
  2350. * This has been removed from the ISA before 2.01, which is the earliest
  2351. * 64-bit BookS ISA supported, however the G5 / 970 implements this
  2352. * interrupt with a non-architected feature available through the support
  2353. * processor interface.
  2354. */
  2355. INT_DEFINE_BEGIN(instruction_breakpoint)
  2356. IVEC=0x1300
  2357. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  2358. IKVM_REAL=1
  2359. #endif
  2360. INT_DEFINE_END(instruction_breakpoint)
  2361. EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100)
  2362. GEN_INT_ENTRY instruction_breakpoint, virt=0
  2363. EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100)
  2364. EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100)
  2365. GEN_INT_ENTRY instruction_breakpoint, virt=1
  2366. EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100)
  2367. EXC_COMMON_BEGIN(instruction_breakpoint_common)
  2368. GEN_COMMON instruction_breakpoint
  2369. addi r3,r1,STACK_FRAME_OVERHEAD
  2370. bl instruction_breakpoint_exception
  2371. b interrupt_return_srr
  2372. EXC_REAL_NONE(0x1400, 0x100)
  2373. EXC_VIRT_NONE(0x5400, 0x100)
  2374. /**
  2375. * Interrupt 0x1500 - Soft Patch Interrupt
  2376. *
  2377. * Handling:
  2378. * This is an implementation specific interrupt which can be used for a
  2379. * range of exceptions.
  2380. *
  2381. * This interrupt handler is unique in that it runs the denormal assist
  2382. * code even for guests (and even in guest context) without going to KVM,
  2383. * for speed. POWER9 does not raise denorm exceptions, so this special case
  2384. * could be phased out in future to reduce special cases.
  2385. */
  2386. INT_DEFINE_BEGIN(denorm_exception)
  2387. IVEC=0x1500
  2388. IHSRR=1
  2389. IBRANCH_TO_COMMON=0
  2390. IKVM_REAL=1
  2391. INT_DEFINE_END(denorm_exception)
  2392. EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100)
  2393. GEN_INT_ENTRY denorm_exception, virt=0
  2394. #ifdef CONFIG_PPC_DENORMALISATION
  2395. andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */
  2396. bne+ denorm_assist
  2397. #endif
  2398. GEN_BRANCH_TO_COMMON denorm_exception, virt=0
  2399. EXC_REAL_END(denorm_exception, 0x1500, 0x100)
  2400. #ifdef CONFIG_PPC_DENORMALISATION
  2401. EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
  2402. GEN_INT_ENTRY denorm_exception, virt=1
  2403. andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */
  2404. bne+ denorm_assist
  2405. GEN_BRANCH_TO_COMMON denorm_exception, virt=1
  2406. EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
  2407. #else
  2408. EXC_VIRT_NONE(0x5500, 0x100)
  2409. #endif
  2410. #ifdef CONFIG_PPC_DENORMALISATION
  2411. TRAMP_REAL_BEGIN(denorm_assist)
  2412. BEGIN_FTR_SECTION
  2413. /*
  2414. * To denormalise we need to move a copy of the register to itself.
  2415. * For POWER6 do that here for all FP regs.
  2416. */
  2417. mfmsr r10
  2418. ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
  2419. xori r10,r10,(MSR_FE0|MSR_FE1)
  2420. mtmsrd r10
  2421. sync
  2422. .Lreg=0
  2423. .rept 32
  2424. fmr .Lreg,.Lreg
  2425. .Lreg=.Lreg+1
  2426. .endr
  2427. FTR_SECTION_ELSE
  2428. /*
  2429. * To denormalise we need to move a copy of the register to itself.
  2430. * For POWER7 do that here for the first 32 VSX registers only.
  2431. */
  2432. mfmsr r10
  2433. oris r10,r10,MSR_VSX@h
  2434. mtmsrd r10
  2435. sync
  2436. .Lreg=0
  2437. .rept 32
  2438. XVCPSGNDP(.Lreg,.Lreg,.Lreg)
  2439. .Lreg=.Lreg+1
  2440. .endr
  2441. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
  2442. BEGIN_FTR_SECTION
  2443. b denorm_done
  2444. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
  2445. /*
  2446. * To denormalise we need to move a copy of the register to itself.
  2447. * For POWER8 we need to do that for all 64 VSX registers
  2448. */
  2449. .Lreg=32
  2450. .rept 32
  2451. XVCPSGNDP(.Lreg,.Lreg,.Lreg)
  2452. .Lreg=.Lreg+1
  2453. .endr
  2454. denorm_done:
  2455. mfspr r11,SPRN_HSRR0
  2456. subi r11,r11,4
  2457. mtspr SPRN_HSRR0,r11
  2458. mtcrf 0x80,r9
  2459. ld r9,PACA_EXGEN+EX_R9(r13)
  2460. BEGIN_FTR_SECTION
  2461. ld r10,PACA_EXGEN+EX_PPR(r13)
  2462. mtspr SPRN_PPR,r10
  2463. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  2464. BEGIN_FTR_SECTION
  2465. ld r10,PACA_EXGEN+EX_CFAR(r13)
  2466. mtspr SPRN_CFAR,r10
  2467. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  2468. li r10,0
  2469. stb r10,PACAHSRR_VALID(r13)
  2470. ld r10,PACA_EXGEN+EX_R10(r13)
  2471. ld r11,PACA_EXGEN+EX_R11(r13)
  2472. ld r12,PACA_EXGEN+EX_R12(r13)
  2473. ld r13,PACA_EXGEN+EX_R13(r13)
  2474. HRFI_TO_UNKNOWN
  2475. b .
  2476. #endif
  2477. EXC_COMMON_BEGIN(denorm_exception_common)
  2478. GEN_COMMON denorm_exception
  2479. addi r3,r1,STACK_FRAME_OVERHEAD
  2480. bl unknown_exception
  2481. b interrupt_return_hsrr
  2482. #ifdef CONFIG_CBE_RAS
  2483. INT_DEFINE_BEGIN(cbe_maintenance)
  2484. IVEC=0x1600
  2485. IHSRR=1
  2486. INT_DEFINE_END(cbe_maintenance)
  2487. EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100)
  2488. GEN_INT_ENTRY cbe_maintenance, virt=0
  2489. EXC_REAL_END(cbe_maintenance, 0x1600, 0x100)
  2490. EXC_VIRT_NONE(0x5600, 0x100)
  2491. EXC_COMMON_BEGIN(cbe_maintenance_common)
  2492. GEN_COMMON cbe_maintenance
  2493. addi r3,r1,STACK_FRAME_OVERHEAD
  2494. bl cbe_maintenance_exception
  2495. b interrupt_return_hsrr
  2496. #else /* CONFIG_CBE_RAS */
  2497. EXC_REAL_NONE(0x1600, 0x100)
  2498. EXC_VIRT_NONE(0x5600, 0x100)
  2499. #endif
  2500. INT_DEFINE_BEGIN(altivec_assist)
  2501. IVEC=0x1700
  2502. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  2503. IKVM_REAL=1
  2504. #endif
  2505. INT_DEFINE_END(altivec_assist)
  2506. EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100)
  2507. GEN_INT_ENTRY altivec_assist, virt=0
  2508. EXC_REAL_END(altivec_assist, 0x1700, 0x100)
  2509. EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100)
  2510. GEN_INT_ENTRY altivec_assist, virt=1
  2511. EXC_VIRT_END(altivec_assist, 0x5700, 0x100)
  2512. EXC_COMMON_BEGIN(altivec_assist_common)
  2513. GEN_COMMON altivec_assist
  2514. addi r3,r1,STACK_FRAME_OVERHEAD
  2515. #ifdef CONFIG_ALTIVEC
  2516. bl altivec_assist_exception
  2517. REST_NVGPRS(r1) /* instruction emulation may change GPRs */
  2518. #else
  2519. bl unknown_exception
  2520. #endif
  2521. b interrupt_return_srr
  2522. #ifdef CONFIG_CBE_RAS
  2523. INT_DEFINE_BEGIN(cbe_thermal)
  2524. IVEC=0x1800
  2525. IHSRR=1
  2526. INT_DEFINE_END(cbe_thermal)
  2527. EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100)
  2528. GEN_INT_ENTRY cbe_thermal, virt=0
  2529. EXC_REAL_END(cbe_thermal, 0x1800, 0x100)
  2530. EXC_VIRT_NONE(0x5800, 0x100)
  2531. EXC_COMMON_BEGIN(cbe_thermal_common)
  2532. GEN_COMMON cbe_thermal
  2533. addi r3,r1,STACK_FRAME_OVERHEAD
  2534. bl cbe_thermal_exception
  2535. b interrupt_return_hsrr
  2536. #else /* CONFIG_CBE_RAS */
  2537. EXC_REAL_NONE(0x1800, 0x100)
  2538. EXC_VIRT_NONE(0x5800, 0x100)
  2539. #endif
  2540. #ifdef CONFIG_PPC_WATCHDOG
  2541. INT_DEFINE_BEGIN(soft_nmi)
  2542. IVEC=0x900
  2543. ISTACK=0
  2544. ICFAR=0
  2545. INT_DEFINE_END(soft_nmi)
  2546. /*
  2547. * Branch to soft_nmi_interrupt using the emergency stack. The emergency
  2548. * stack is one that is usable by maskable interrupts so long as MSR_EE
  2549. * remains off. It is used for recovery when something has corrupted the
  2550. * normal kernel stack, for example. The "soft NMI" must not use the process
  2551. * stack because we want irq disabled sections to avoid touching the stack
  2552. * at all (other than PMU interrupts), so use the emergency stack for this,
  2553. * and run it entirely with interrupts hard disabled.
  2554. */
  2555. EXC_COMMON_BEGIN(soft_nmi_common)
  2556. mr r10,r1
  2557. ld r1,PACAEMERGSP(r13)
  2558. subi r1,r1,INT_FRAME_SIZE
  2559. __GEN_COMMON_BODY soft_nmi
  2560. addi r3,r1,STACK_FRAME_OVERHEAD
  2561. bl soft_nmi_interrupt
  2562. /* Clear MSR_RI before setting SRR0 and SRR1. */
  2563. li r9,0
  2564. mtmsrd r9,1
  2565. kuap_kernel_restore r9, r10
  2566. EXCEPTION_RESTORE_REGS hsrr=0
  2567. RFI_TO_KERNEL
  2568. #endif /* CONFIG_PPC_WATCHDOG */
  2569. /*
  2570. * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
  2571. * - If it was a decrementer interrupt, we bump the dec to max and return.
  2572. * - If it was a doorbell we return immediately since doorbells are edge
  2573. * triggered and won't automatically refire.
  2574. * - If it was a HMI we return immediately since we handled it in realmode
  2575. * and it won't refire.
  2576. * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
  2577. * This is called with r10 containing the value to OR to the paca field.
  2578. */
  2579. .macro MASKED_INTERRUPT hsrr=0
  2580. .if \hsrr
  2581. masked_Hinterrupt:
  2582. .else
  2583. masked_interrupt:
  2584. .endif
  2585. stw r9,PACA_EXGEN+EX_CCR(r13)
  2586. #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
  2587. /*
  2588. * Ensure there was no previous MUST_HARD_MASK interrupt or
  2589. * HARD_DIS setting. If this does fire, the interrupt is still
  2590. * masked and MSR[EE] will be cleared on return, so no need to
  2591. * panic, but somebody probably enabled MSR[EE] under
  2592. * PACA_IRQ_HARD_DIS, mtmsr(mfmsr() | MSR_x) being a common
  2593. * cause.
  2594. */
  2595. lbz r9,PACAIRQHAPPENED(r13)
  2596. andi. r9,r9,(PACA_IRQ_MUST_HARD_MASK|PACA_IRQ_HARD_DIS)
  2597. 0: tdnei r9,0
  2598. EMIT_WARN_ENTRY 0b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
  2599. #endif
  2600. lbz r9,PACAIRQHAPPENED(r13)
  2601. or r9,r9,r10
  2602. stb r9,PACAIRQHAPPENED(r13)
  2603. .if ! \hsrr
  2604. cmpwi r10,PACA_IRQ_DEC
  2605. bne 1f
  2606. LOAD_REG_IMMEDIATE(r9, 0x7fffffff)
  2607. mtspr SPRN_DEC,r9
  2608. #ifdef CONFIG_PPC_WATCHDOG
  2609. lwz r9,PACA_EXGEN+EX_CCR(r13)
  2610. b soft_nmi_common
  2611. #else
  2612. b 2f
  2613. #endif
  2614. .endif
  2615. 1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
  2616. beq 2f
  2617. xori r12,r12,MSR_EE /* clear MSR_EE */
  2618. .if \hsrr
  2619. mtspr SPRN_HSRR1,r12
  2620. .else
  2621. mtspr SPRN_SRR1,r12
  2622. .endif
  2623. ori r9,r9,PACA_IRQ_HARD_DIS
  2624. stb r9,PACAIRQHAPPENED(r13)
  2625. 2: /* done */
  2626. li r9,0
  2627. .if \hsrr
  2628. stb r9,PACAHSRR_VALID(r13)
  2629. .else
  2630. stb r9,PACASRR_VALID(r13)
  2631. .endif
  2632. SEARCH_RESTART_TABLE
  2633. cmpdi r12,0
  2634. beq 3f
  2635. .if \hsrr
  2636. mtspr SPRN_HSRR0,r12
  2637. .else
  2638. mtspr SPRN_SRR0,r12
  2639. .endif
  2640. 3:
  2641. ld r9,PACA_EXGEN+EX_CTR(r13)
  2642. mtctr r9
  2643. lwz r9,PACA_EXGEN+EX_CCR(r13)
  2644. mtcrf 0x80,r9
  2645. std r1,PACAR1(r13)
  2646. ld r9,PACA_EXGEN+EX_R9(r13)
  2647. ld r10,PACA_EXGEN+EX_R10(r13)
  2648. ld r11,PACA_EXGEN+EX_R11(r13)
  2649. ld r12,PACA_EXGEN+EX_R12(r13)
  2650. ld r13,PACA_EXGEN+EX_R13(r13)
  2651. /* May return to masked low address where r13 is not set up */
  2652. .if \hsrr
  2653. HRFI_TO_KERNEL
  2654. .else
  2655. RFI_TO_KERNEL
  2656. .endif
  2657. b .
  2658. .endm
  2659. TRAMP_REAL_BEGIN(stf_barrier_fallback)
  2660. std r9,PACA_EXRFI+EX_R9(r13)
  2661. std r10,PACA_EXRFI+EX_R10(r13)
  2662. sync
  2663. ld r9,PACA_EXRFI+EX_R9(r13)
  2664. ld r10,PACA_EXRFI+EX_R10(r13)
  2665. ori 31,31,0
  2666. .rept 14
  2667. b 1f
  2668. 1:
  2669. .endr
  2670. blr
  2671. /* Clobbers r10, r11, ctr */
  2672. .macro L1D_DISPLACEMENT_FLUSH
  2673. ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
  2674. ld r11,PACA_L1D_FLUSH_SIZE(r13)
  2675. srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
  2676. mtctr r11
  2677. DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
  2678. /* order ld/st prior to dcbt stop all streams with flushing */
  2679. sync
  2680. /*
  2681. * The load addresses are at staggered offsets within cachelines,
  2682. * which suits some pipelines better (on others it should not
  2683. * hurt).
  2684. */
  2685. 1:
  2686. ld r11,(0x80 + 8)*0(r10)
  2687. ld r11,(0x80 + 8)*1(r10)
  2688. ld r11,(0x80 + 8)*2(r10)
  2689. ld r11,(0x80 + 8)*3(r10)
  2690. ld r11,(0x80 + 8)*4(r10)
  2691. ld r11,(0x80 + 8)*5(r10)
  2692. ld r11,(0x80 + 8)*6(r10)
  2693. ld r11,(0x80 + 8)*7(r10)
  2694. addi r10,r10,0x80*8
  2695. bdnz 1b
  2696. .endm
  2697. TRAMP_REAL_BEGIN(entry_flush_fallback)
  2698. std r9,PACA_EXRFI+EX_R9(r13)
  2699. std r10,PACA_EXRFI+EX_R10(r13)
  2700. std r11,PACA_EXRFI+EX_R11(r13)
  2701. mfctr r9
  2702. L1D_DISPLACEMENT_FLUSH
  2703. mtctr r9
  2704. ld r9,PACA_EXRFI+EX_R9(r13)
  2705. ld r10,PACA_EXRFI+EX_R10(r13)
  2706. ld r11,PACA_EXRFI+EX_R11(r13)
  2707. blr
  2708. /*
  2709. * The SCV entry flush happens with interrupts enabled, so it must disable
  2710. * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
  2711. * (containing LR) does not need to be preserved here because scv entry
  2712. * puts 0 in the pt_regs, CTR can be clobbered for the same reason.
  2713. */
  2714. TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
  2715. li r10,0
  2716. mtmsrd r10,1
  2717. lbz r10,PACAIRQHAPPENED(r13)
  2718. ori r10,r10,PACA_IRQ_HARD_DIS
  2719. stb r10,PACAIRQHAPPENED(r13)
  2720. std r11,PACA_EXRFI+EX_R11(r13)
  2721. L1D_DISPLACEMENT_FLUSH
  2722. ld r11,PACA_EXRFI+EX_R11(r13)
  2723. li r10,MSR_RI
  2724. mtmsrd r10,1
  2725. blr
  2726. TRAMP_REAL_BEGIN(rfi_flush_fallback)
  2727. SET_SCRATCH0(r13);
  2728. GET_PACA(r13);
  2729. std r1,PACA_EXRFI+EX_R12(r13)
  2730. ld r1,PACAKSAVE(r13)
  2731. std r9,PACA_EXRFI+EX_R9(r13)
  2732. std r10,PACA_EXRFI+EX_R10(r13)
  2733. std r11,PACA_EXRFI+EX_R11(r13)
  2734. mfctr r9
  2735. L1D_DISPLACEMENT_FLUSH
  2736. mtctr r9
  2737. ld r9,PACA_EXRFI+EX_R9(r13)
  2738. ld r10,PACA_EXRFI+EX_R10(r13)
  2739. ld r11,PACA_EXRFI+EX_R11(r13)
  2740. ld r1,PACA_EXRFI+EX_R12(r13)
  2741. GET_SCRATCH0(r13);
  2742. rfid
  2743. TRAMP_REAL_BEGIN(hrfi_flush_fallback)
  2744. SET_SCRATCH0(r13);
  2745. GET_PACA(r13);
  2746. std r1,PACA_EXRFI+EX_R12(r13)
  2747. ld r1,PACAKSAVE(r13)
  2748. std r9,PACA_EXRFI+EX_R9(r13)
  2749. std r10,PACA_EXRFI+EX_R10(r13)
  2750. std r11,PACA_EXRFI+EX_R11(r13)
  2751. mfctr r9
  2752. L1D_DISPLACEMENT_FLUSH
  2753. mtctr r9
  2754. ld r9,PACA_EXRFI+EX_R9(r13)
  2755. ld r10,PACA_EXRFI+EX_R10(r13)
  2756. ld r11,PACA_EXRFI+EX_R11(r13)
  2757. ld r1,PACA_EXRFI+EX_R12(r13)
  2758. GET_SCRATCH0(r13);
  2759. hrfid
  2760. TRAMP_REAL_BEGIN(rfscv_flush_fallback)
  2761. /* system call volatile */
  2762. mr r7,r13
  2763. GET_PACA(r13);
  2764. mr r8,r1
  2765. ld r1,PACAKSAVE(r13)
  2766. mfctr r9
  2767. ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
  2768. ld r11,PACA_L1D_FLUSH_SIZE(r13)
  2769. srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
  2770. mtctr r11
  2771. DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
  2772. /* order ld/st prior to dcbt stop all streams with flushing */
  2773. sync
  2774. /*
  2775. * The load adresses are at staggered offsets within cachelines,
  2776. * which suits some pipelines better (on others it should not
  2777. * hurt).
  2778. */
  2779. 1:
  2780. ld r11,(0x80 + 8)*0(r10)
  2781. ld r11,(0x80 + 8)*1(r10)
  2782. ld r11,(0x80 + 8)*2(r10)
  2783. ld r11,(0x80 + 8)*3(r10)
  2784. ld r11,(0x80 + 8)*4(r10)
  2785. ld r11,(0x80 + 8)*5(r10)
  2786. ld r11,(0x80 + 8)*6(r10)
  2787. ld r11,(0x80 + 8)*7(r10)
  2788. addi r10,r10,0x80*8
  2789. bdnz 1b
  2790. mtctr r9
  2791. li r9,0
  2792. li r10,0
  2793. li r11,0
  2794. mr r1,r8
  2795. mr r13,r7
  2796. RFSCV
  2797. USE_TEXT_SECTION()
  2798. #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
  2799. kvm_interrupt:
  2800. /*
  2801. * The conditional branch in KVMTEST can't reach all the way,
  2802. * make a stub.
  2803. */
  2804. b kvmppc_interrupt
  2805. #endif
  2806. _GLOBAL(do_uaccess_flush)
  2807. UACCESS_FLUSH_FIXUP_SECTION
  2808. nop
  2809. nop
  2810. nop
  2811. blr
  2812. L1D_DISPLACEMENT_FLUSH
  2813. blr
  2814. _ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
  2815. EXPORT_SYMBOL(do_uaccess_flush)
  2816. MASKED_INTERRUPT
  2817. MASKED_INTERRUPT hsrr=1
  2818. USE_FIXED_SECTION(virt_trampolines)
  2819. /*
  2820. * All code below __end_soft_masked is treated as soft-masked. If
  2821. * any code runs here with MSR[EE]=1, it must then cope with pending
  2822. * soft interrupt being raised (i.e., by ensuring it is replayed).
  2823. *
  2824. * The __end_interrupts marker must be past the out-of-line (OOL)
  2825. * handlers, so that they are copied to real address 0x100 when running
  2826. * a relocatable kernel. This ensures they can be reached from the short
  2827. * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
  2828. * directly, without using LOAD_HANDLER().
  2829. */
  2830. .align 7
  2831. .globl __end_interrupts
  2832. __end_interrupts:
  2833. DEFINE_FIXED_SYMBOL(__end_interrupts, virt_trampolines)
  2834. CLOSE_FIXED_SECTION(real_vectors);
  2835. CLOSE_FIXED_SECTION(real_trampolines);
  2836. CLOSE_FIXED_SECTION(virt_vectors);
  2837. CLOSE_FIXED_SECTION(virt_trampolines);
  2838. USE_TEXT_SECTION()
  2839. /* MSR[RI] should be clear because this uses SRR[01] */
  2840. _GLOBAL(enable_machine_check)
  2841. mflr r0
  2842. bcl 20,31,$+4
  2843. 0: mflr r3
  2844. addi r3,r3,(1f - 0b)
  2845. mtspr SPRN_SRR0,r3
  2846. mfmsr r3
  2847. ori r3,r3,MSR_ME
  2848. mtspr SPRN_SRR1,r3
  2849. RFI_TO_KERNEL
  2850. 1: mtlr r0
  2851. blr
  2852. /* MSR[RI] should be clear because this uses SRR[01] */
  2853. disable_machine_check:
  2854. mflr r0
  2855. bcl 20,31,$+4
  2856. 0: mflr r3
  2857. addi r3,r3,(1f - 0b)
  2858. mtspr SPRN_SRR0,r3
  2859. mfmsr r3
  2860. li r4,MSR_ME
  2861. andc r3,r3,r4
  2862. mtspr SPRN_SRR1,r3
  2863. RFI_TO_KERNEL
  2864. 1: mtlr r0
  2865. blr