syscall.S 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341
  1. /*
  2. * Linux/PA-RISC Project (http://www.parisc-linux.org/)
  3. *
  4. * System call entry code / Linux gateway page
  5. * Copyright (c) Matthew Wilcox 1999 <[email protected]>
  6. * Licensed under the GNU GPL.
  7. * thanks to Philipp Rumpf, Mike Shaver and various others
  8. * sorry about the wall, puffin..
  9. */
  10. /*
  11. How does the Linux gateway page on PA-RISC work?
  12. ------------------------------------------------
  13. The Linux gateway page on PA-RISC is "special".
  14. It actually has PAGE_GATEWAY bits set (this is linux terminology; in parisc
  15. terminology it's Execute, promote to PL0) in the page map. So anything
  16. executing on this page executes with kernel level privilege (there's more to it
  17. than that: to have this happen, you also have to use a branch with a ,gate
  18. completer to activate the privilege promotion). The upshot is that everything
  19. that runs on the gateway page runs at kernel privilege but with the current
  20. user process address space (although you have access to kernel space via %sr2).
  21. For the 0x100 syscall entry, we redo the space registers to point to the kernel
  22. address space (preserving the user address space in %sr3), move to wide mode if
  23. required, save the user registers and branch into the kernel syscall entry
  24. point. For all the other functions, we execute at kernel privilege but don't
  25. flip address spaces. The basic upshot of this is that these code snippets are
  26. executed atomically (because the kernel can't be pre-empted) and they may
  27. perform architecturally forbidden (to PL3) operations (like setting control
  28. registers).
  29. */
  30. #include <asm/asm-offsets.h>
  31. #include <asm/unistd.h>
  32. #include <asm/errno.h>
  33. #include <asm/page.h>
  34. #include <asm/psw.h>
  35. #include <asm/thread_info.h>
  36. #include <asm/assembly.h>
  37. #include <asm/processor.h>
  38. #include <asm/cache.h>
  39. #include <linux/linkage.h>
  40. /* We fill the empty parts of the gateway page with
  41. * something that will kill the kernel or a
  42. * userspace application.
  43. */
  44. #define KILL_INSN break 0,0
  45. .level PA_ASM_LEVEL
  46. .macro lws_pagefault_disable reg1,reg2
  47. mfctl %cr30, \reg2
  48. ldo TASK_PAGEFAULT_DISABLED(\reg2), \reg2
  49. ldw 0(%sr2,\reg2), \reg1
  50. ldo 1(\reg1), \reg1
  51. stw \reg1, 0(%sr2,\reg2)
  52. .endm
  53. .macro lws_pagefault_enable reg1,reg2
  54. mfctl %cr30, \reg2
  55. ldo TASK_PAGEFAULT_DISABLED(\reg2), \reg2
  56. ldw 0(%sr2,\reg2), \reg1
  57. ldo -1(\reg1), \reg1
  58. stw \reg1, 0(%sr2,\reg2)
  59. .endm
  60. .text
  61. .import syscall_exit,code
  62. .import syscall_exit_rfi,code
  63. /* Linux gateway page is aliased to virtual page 0 in the kernel
  64. * address space. Since it is a gateway page it cannot be
  65. * dereferenced, so null pointers will still fault. We start
  66. * the actual entry point at 0x100. We put break instructions
  67. * at the beginning of the page to trap null indirect function
  68. * pointers.
  69. */
  70. .align PAGE_SIZE
  71. ENTRY(linux_gateway_page)
  72. /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
  73. .rept 44
  74. KILL_INSN
  75. .endr
  76. /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
  77. /* Light-weight-syscall entry must always be located at 0xb0 */
  78. /* WARNING: Keep this number updated with table size changes */
  79. #define __NR_lws_entries (5)
  80. lws_entry:
  81. gate lws_start, %r0 /* increase privilege */
  82. depi PRIV_USER, 31, 2, %r31 /* Ensure we return into user mode. */
  83. /* Fill from 0xb8 to 0xe0 */
  84. .rept 10
  85. KILL_INSN
  86. .endr
  87. /* This function MUST be located at 0xe0 for glibc's threading
  88. mechanism to work. DO NOT MOVE THIS CODE EVER! */
  89. set_thread_pointer:
  90. gate .+8, %r0 /* increase privilege */
  91. depi PRIV_USER, 31, 2, %r31 /* Ensure we return into user mode. */
  92. be 0(%sr7,%r31) /* return to user space */
  93. mtctl %r26, %cr27 /* move arg0 to the control register */
  94. /* Increase the chance of trapping if random jumps occur to this
  95. address, fill from 0xf0 to 0x100 */
  96. .rept 4
  97. KILL_INSN
  98. .endr
  99. /* This address must remain fixed at 0x100 for glibc's syscalls to work */
  100. .align LINUX_GATEWAY_ADDR
  101. linux_gateway_entry:
  102. gate .+8, %r0 /* become privileged */
  103. mtsp %r0,%sr4 /* get kernel space into sr4 */
  104. mtsp %r0,%sr5 /* get kernel space into sr5 */
  105. mtsp %r0,%sr6 /* get kernel space into sr6 */
  106. #ifdef CONFIG_64BIT
  107. /* Store W bit on entry to the syscall in case it's a wide userland
  108. * process. */
  109. ssm PSW_SM_W, %r1
  110. extrd,u %r1,PSW_W_BIT,1,%r1
  111. /* sp must be aligned on 4, so deposit the W bit setting into
  112. * the bottom of sp temporarily */
  113. or,ev %r1,%r30,%r30
  114. b,n 1f
  115. /* The top halves of argument registers must be cleared on syscall
  116. * entry from narrow executable.
  117. */
  118. depdi 0, 31, 32, %r26
  119. depdi 0, 31, 32, %r25
  120. depdi 0, 31, 32, %r24
  121. depdi 0, 31, 32, %r23
  122. depdi 0, 31, 32, %r22
  123. depdi 0, 31, 32, %r21
  124. 1:
  125. #endif
  126. /* We use a rsm/ssm pair to prevent sr3 from being clobbered
  127. * by external interrupts.
  128. */
  129. mfsp %sr7,%r1 /* save user sr7 */
  130. rsm PSW_SM_I, %r0 /* disable interrupts */
  131. mtsp %r1,%sr3 /* and store it in sr3 */
  132. mfctl %cr30,%r1
  133. xor %r1,%r30,%r30 /* ye olde xor trick */
  134. xor %r1,%r30,%r1
  135. xor %r1,%r30,%r30
  136. LDREG TASK_STACK(%r30),%r30 /* set up kernel stack */
  137. ldo FRAME_SIZE(%r30),%r30
  138. /* N.B.: It is critical that we don't set sr7 to 0 until r30
  139. * contains a valid kernel stack pointer. It is also
  140. * critical that we don't start using the kernel stack
  141. * until after sr7 has been set to 0.
  142. */
  143. mtsp %r0,%sr7 /* get kernel space into sr7 */
  144. ssm PSW_SM_I, %r0 /* enable interrupts */
  145. STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
  146. mfctl %cr30,%r1 /* get task ptr in %r1 */
  147. /* Save some registers for sigcontext and potential task
  148. switch (see entry.S for the details of which ones are
  149. saved/restored). TASK_PT_PSW is zeroed so we can see whether
  150. a process is on a syscall or not. For an interrupt the real
  151. PSW value is stored. This is needed for gdb and sys_ptrace. */
  152. STREG %r0, TASK_PT_PSW(%r1)
  153. STREG %r2, TASK_PT_GR2(%r1) /* preserve rp */
  154. STREG %r19, TASK_PT_GR19(%r1)
  155. LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */
  156. #ifdef CONFIG_64BIT
  157. extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */
  158. #if 0
  159. xor %r19,%r2,%r2 /* clear bottom bit */
  160. depd,z %r19,1,1,%r19
  161. std %r19,TASK_PT_PSW(%r1)
  162. #endif
  163. #endif
  164. STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
  165. STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */
  166. STREG %r21, TASK_PT_GR21(%r1)
  167. STREG %r22, TASK_PT_GR22(%r1)
  168. STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
  169. STREG %r24, TASK_PT_GR24(%r1) /* 3rd argument */
  170. STREG %r25, TASK_PT_GR25(%r1) /* 2nd argument */
  171. STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */
  172. STREG %r27, TASK_PT_GR27(%r1) /* user dp */
  173. STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */
  174. STREG %r0, TASK_PT_ORIG_R28(%r1) /* don't prohibit restarts */
  175. STREG %r29, TASK_PT_GR29(%r1) /* return value 1 */
  176. STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */
  177. ldo TASK_PT_FR0(%r1), %r27 /* save fpregs from the kernel */
  178. save_fp %r27 /* or potential task switch */
  179. mfctl %cr11, %r27 /* i.e. SAR */
  180. STREG %r27, TASK_PT_SAR(%r1)
  181. loadgp
  182. #ifdef CONFIG_64BIT
  183. ldo -16(%r30),%r29 /* Reference param save area */
  184. copy %r19,%r2 /* W bit back to r2 */
  185. #else
  186. /* no need to save these on stack in wide mode because the first 8
  187. * args are passed in registers */
  188. stw %r22, -52(%r30) /* 5th argument */
  189. stw %r21, -56(%r30) /* 6th argument */
  190. #endif
  191. /* Are we being ptraced? */
  192. mfctl %cr30, %r1
  193. LDREG TASK_TI_FLAGS(%r1),%r1
  194. ldi _TIF_SYSCALL_TRACE_MASK, %r19
  195. and,COND(=) %r1, %r19, %r0
  196. b,n .Ltracesys
  197. /* Note! We cannot use the syscall table that is mapped
  198. nearby since the gateway page is mapped execute-only. */
  199. #ifdef CONFIG_64BIT
  200. ldil L%sys_call_table, %r1
  201. or,= %r2,%r2,%r2
  202. addil L%(sys_call_table64-sys_call_table), %r1
  203. ldo R%sys_call_table(%r1), %r19
  204. or,= %r2,%r2,%r2
  205. ldo R%sys_call_table64(%r1), %r19
  206. #else
  207. load32 sys_call_table, %r19
  208. #endif
  209. comiclr,>> __NR_Linux_syscalls, %r20, %r0
  210. b,n .Lsyscall_nosys
  211. LDREGX %r20(%r19), %r19
  212. /* If this is a sys_rt_sigreturn call, and the signal was received
  213. * when not in_syscall, then we want to return via syscall_exit_rfi,
  214. * not syscall_exit. Signal no. in r20, in_syscall in r25 (see
  215. * trampoline code in signal.c).
  216. */
  217. ldi __NR_rt_sigreturn,%r2
  218. comb,= %r2,%r20,.Lrt_sigreturn
  219. .Lin_syscall:
  220. ldil L%syscall_exit,%r2
  221. be 0(%sr7,%r19)
  222. ldo R%syscall_exit(%r2),%r2
  223. .Lrt_sigreturn:
  224. comib,<> 0,%r25,.Lin_syscall
  225. ldil L%syscall_exit_rfi,%r2
  226. be 0(%sr7,%r19)
  227. ldo R%syscall_exit_rfi(%r2),%r2
  228. /* Note! Because we are not running where we were linked, any
  229. calls to functions external to this file must be indirect. To
  230. be safe, we apply the opposite rule to functions within this
  231. file, with local labels given to them to ensure correctness. */
  232. .Lsyscall_nosys:
  233. syscall_nosys:
  234. ldil L%syscall_exit,%r1
  235. be R%syscall_exit(%sr7,%r1)
  236. ldo -ENOSYS(%r0),%r28 /* set errno */
  237. /* Warning! This trace code is a virtual duplicate of the code above so be
  238. * sure to maintain both! */
  239. .Ltracesys:
  240. tracesys:
  241. /* Need to save more registers so the debugger can see where we
  242. * are. This saves only the lower 8 bits of PSW, so that the C
  243. * bit is still clear on syscalls, and the D bit is set if this
  244. * full register save path has been executed. We check the D
  245. * bit on syscall_return_rfi to determine which registers to
  246. * restore. An interrupt results in a full PSW saved with the
  247. * C bit set, a non-straced syscall entry results in C and D clear
  248. * in the saved PSW.
  249. */
  250. mfctl %cr30,%r1 /* get task ptr */
  251. ssm 0,%r2
  252. STREG %r2,TASK_PT_PSW(%r1) /* Lower 8 bits only!! */
  253. mfsp %sr0,%r2
  254. STREG %r2,TASK_PT_SR0(%r1)
  255. mfsp %sr1,%r2
  256. STREG %r2,TASK_PT_SR1(%r1)
  257. mfsp %sr2,%r2
  258. STREG %r2,TASK_PT_SR2(%r1)
  259. mfsp %sr3,%r2
  260. STREG %r2,TASK_PT_SR3(%r1)
  261. STREG %r2,TASK_PT_SR4(%r1)
  262. STREG %r2,TASK_PT_SR5(%r1)
  263. STREG %r2,TASK_PT_SR6(%r1)
  264. STREG %r2,TASK_PT_SR7(%r1)
  265. STREG %r2,TASK_PT_IASQ0(%r1)
  266. STREG %r2,TASK_PT_IASQ1(%r1)
  267. LDREG TASK_PT_GR31(%r1),%r2
  268. STREG %r2,TASK_PT_IAOQ0(%r1)
  269. ldo 4(%r2),%r2
  270. STREG %r2,TASK_PT_IAOQ1(%r1)
  271. ldo TASK_REGS(%r1),%r2
  272. /* reg_save %r2 */
  273. STREG %r3,PT_GR3(%r2)
  274. STREG %r4,PT_GR4(%r2)
  275. STREG %r5,PT_GR5(%r2)
  276. STREG %r6,PT_GR6(%r2)
  277. STREG %r7,PT_GR7(%r2)
  278. STREG %r8,PT_GR8(%r2)
  279. STREG %r9,PT_GR9(%r2)
  280. STREG %r10,PT_GR10(%r2)
  281. STREG %r11,PT_GR11(%r2)
  282. STREG %r12,PT_GR12(%r2)
  283. STREG %r13,PT_GR13(%r2)
  284. STREG %r14,PT_GR14(%r2)
  285. STREG %r15,PT_GR15(%r2)
  286. STREG %r16,PT_GR16(%r2)
  287. STREG %r17,PT_GR17(%r2)
  288. STREG %r18,PT_GR18(%r2)
  289. /* Finished saving things for the debugger */
  290. copy %r2,%r26
  291. ldil L%do_syscall_trace_enter,%r1
  292. ldil L%tracesys_next,%r2
  293. be R%do_syscall_trace_enter(%sr7,%r1)
  294. ldo R%tracesys_next(%r2),%r2
  295. tracesys_next:
  296. /* do_syscall_trace_enter either returned the syscallno, or -1L,
  297. * so we skip restoring the PT_GR20 below, since we pulled it from
  298. * task->thread.regs.gr[20] above.
  299. */
  300. copy %ret0,%r20
  301. mfctl %cr30,%r1 /* get task ptr */
  302. LDREG TASK_PT_GR28(%r1), %r28 /* Restore return value */
  303. LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
  304. LDREG TASK_PT_GR25(%r1), %r25
  305. LDREG TASK_PT_GR24(%r1), %r24
  306. LDREG TASK_PT_GR23(%r1), %r23
  307. LDREG TASK_PT_GR22(%r1), %r22
  308. LDREG TASK_PT_GR21(%r1), %r21
  309. #ifdef CONFIG_64BIT
  310. ldo -16(%r30),%r29 /* Reference param save area */
  311. #else
  312. stw %r22, -52(%r30) /* 5th argument */
  313. stw %r21, -56(%r30) /* 6th argument */
  314. #endif
  315. cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
  316. comiclr,>> __NR_Linux_syscalls, %r20, %r0
  317. b,n .Ltracesys_nosys
  318. /* Note! We cannot use the syscall table that is mapped
  319. nearby since the gateway page is mapped execute-only. */
  320. #ifdef CONFIG_64BIT
  321. LDREG TASK_PT_GR30(%r1), %r19 /* get users sp back */
  322. extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */
  323. ldil L%sys_call_table, %r1
  324. or,= %r2,%r2,%r2
  325. addil L%(sys_call_table64-sys_call_table), %r1
  326. ldo R%sys_call_table(%r1), %r19
  327. or,= %r2,%r2,%r2
  328. ldo R%sys_call_table64(%r1), %r19
  329. #else
  330. load32 sys_call_table, %r19
  331. #endif
  332. LDREGX %r20(%r19), %r19
  333. /* If this is a sys_rt_sigreturn call, and the signal was received
  334. * when not in_syscall, then we want to return via syscall_exit_rfi,
  335. * not syscall_exit. Signal no. in r20, in_syscall in r25 (see
  336. * trampoline code in signal.c).
  337. */
  338. ldi __NR_rt_sigreturn,%r2
  339. comb,= %r2,%r20,.Ltrace_rt_sigreturn
  340. .Ltrace_in_syscall:
  341. ldil L%tracesys_exit,%r2
  342. be 0(%sr7,%r19)
  343. ldo R%tracesys_exit(%r2),%r2
  344. .Ltracesys_nosys:
  345. ldo -ENOSYS(%r0),%r28 /* set errno */
  346. /* Do *not* call this function on the gateway page, because it
  347. makes a direct call to syscall_trace. */
  348. tracesys_exit:
  349. mfctl %cr30,%r1 /* get task ptr */
  350. #ifdef CONFIG_64BIT
  351. ldo -16(%r30),%r29 /* Reference param save area */
  352. #endif
  353. ldo TASK_REGS(%r1),%r26
  354. BL do_syscall_trace_exit,%r2
  355. STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
  356. mfctl %cr30,%r1 /* get task ptr */
  357. LDREG TASK_PT_GR28(%r1), %r28 /* Restore return val. */
  358. ldil L%syscall_exit,%r1
  359. be,n R%syscall_exit(%sr7,%r1)
  360. .Ltrace_rt_sigreturn:
  361. comib,<> 0,%r25,.Ltrace_in_syscall
  362. ldil L%tracesys_sigexit,%r2
  363. be 0(%sr7,%r19)
  364. ldo R%tracesys_sigexit(%r2),%r2
  365. tracesys_sigexit:
  366. mfctl %cr30,%r1 /* get task ptr */
  367. #ifdef CONFIG_64BIT
  368. ldo -16(%r30),%r29 /* Reference param save area */
  369. #endif
  370. BL do_syscall_trace_exit,%r2
  371. ldo TASK_REGS(%r1),%r26
  372. ldil L%syscall_exit_rfi,%r1
  373. be,n R%syscall_exit_rfi(%sr7,%r1)
  374. /*********************************************************
  375. 32/64-bit Light-Weight-Syscall ABI
  376. * - Indicates a hint for userspace inline asm
  377. implementations.
  378. Syscall number (caller-saves)
  379. - %r20
  380. * In asm clobber.
  381. Argument registers (caller-saves)
  382. - %r26, %r25, %r24, %r23, %r22
  383. * In asm input.
  384. Return registers (caller-saves)
  385. - %r28 (return), %r21 (errno)
  386. * In asm output.
  387. Caller-saves registers
  388. - %r1, %r27, %r29
  389. - %r2 (return pointer)
  390. - %r31 (ble link register)
  391. * In asm clobber.
  392. Callee-saves registers
  393. - %r3-%r18
  394. - %r30 (stack pointer)
  395. * Not in asm clobber.
  396. If userspace is 32-bit:
  397. Callee-saves registers
  398. - %r19 (32-bit PIC register)
  399. Differences from 32-bit calling convention:
  400. - Syscall number in %r20
  401. - Additional argument register %r22 (arg4)
  402. - Callee-saves %r19.
  403. If userspace is 64-bit:
  404. Callee-saves registers
  405. - %r27 (64-bit PIC register)
  406. Differences from 64-bit calling convention:
  407. - Syscall number in %r20
  408. - Additional argument register %r22 (arg4)
  409. - Callee-saves %r27.
  410. Error codes returned by entry path:
  411. ENOSYS - r20 was an invalid LWS number.
  412. *********************************************************/
  413. lws_start:
  414. #ifdef CONFIG_64BIT
  415. ssm PSW_SM_W, %r1
  416. extrd,u %r1,PSW_W_BIT,1,%r1
  417. /* sp must be aligned on 4, so deposit the W bit setting into
  418. * the bottom of sp temporarily */
  419. or,od %r1,%r30,%r30
  420. /* Clip LWS number to a 32-bit value for 32-bit processes */
  421. depdi 0, 31, 32, %r20
  422. #endif
  423. /* Is the lws entry number valid? */
  424. comiclr,>> __NR_lws_entries, %r20, %r0
  425. b,n lws_exit_nosys
  426. /* Load table start */
  427. ldil L%lws_table, %r1
  428. ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
  429. LDREGX %r20(%sr2,r28), %r21 /* Scratch use of r21 */
  430. /* Jump to lws, lws table pointers already relocated */
  431. be,n 0(%sr2,%r21)
  432. lws_exit_noerror:
  433. lws_pagefault_enable %r1,%r21
  434. stw,ma %r20, 0(%sr2,%r20)
  435. ssm PSW_SM_I, %r0
  436. b lws_exit
  437. copy %r0, %r21
  438. lws_wouldblock:
  439. ssm PSW_SM_I, %r0
  440. ldo 2(%r0), %r28
  441. b lws_exit
  442. ldo -EAGAIN(%r0), %r21
  443. lws_pagefault:
  444. lws_pagefault_enable %r1,%r21
  445. stw,ma %r20, 0(%sr2,%r20)
  446. ssm PSW_SM_I, %r0
  447. ldo 3(%r0),%r28
  448. b lws_exit
  449. ldo -EAGAIN(%r0),%r21
  450. lws_fault:
  451. ldo 1(%r0),%r28
  452. b lws_exit
  453. ldo -EFAULT(%r0),%r21
  454. lws_exit_nosys:
  455. ldo -ENOSYS(%r0),%r21
  456. /* Fall through: Return to userspace */
  457. lws_exit:
  458. #ifdef CONFIG_64BIT
  459. /* decide whether to reset the wide mode bit
  460. *
  461. * For a syscall, the W bit is stored in the lowest bit
  462. * of sp. Extract it and reset W if it is zero */
  463. extrd,u,*<> %r30,63,1,%r1
  464. rsm PSW_SM_W, %r0
  465. /* now reset the lowest bit of sp if it was set */
  466. xor %r30,%r1,%r30
  467. #endif
  468. be,n 0(%sr7, %r31)
  469. /***************************************************
  470. Implementing 32bit CAS as an atomic operation:
  471. %r26 - Address to examine
  472. %r25 - Old value to check (old)
  473. %r24 - New value to set (new)
  474. %r28 - Return prev through this register.
  475. %r21 - Kernel error code
  476. %r21 returns the following error codes:
  477. EAGAIN - CAS is busy, ldcw failed, try again.
  478. EFAULT - Read or write failed.
  479. If EAGAIN is returned, %r28 indicates the busy reason:
  480. r28 == 1 - CAS is busy. lock contended.
  481. r28 == 2 - CAS is busy. ldcw failed.
  482. r28 == 3 - CAS is busy. page fault.
  483. Scratch: r20, r28, r1
  484. ****************************************************/
  485. /* ELF64 Process entry path */
  486. lws_compare_and_swap64:
  487. #ifdef CONFIG_64BIT
  488. b,n lws_compare_and_swap
  489. #else
  490. /* If we are not a 64-bit kernel, then we don't
  491. * have 64-bit input registers, and calling
  492. * the 64-bit LWS CAS returns ENOSYS.
  493. */
  494. b,n lws_exit_nosys
  495. #endif
  496. /* ELF32/ELF64 Process entry path */
  497. lws_compare_and_swap32:
  498. #ifdef CONFIG_64BIT
  499. /* Wide mode user process? */
  500. bb,<,n %sp, 31, lws_compare_and_swap
  501. /* Clip all the input registers for 32-bit processes */
  502. depdi 0, 31, 32, %r26
  503. depdi 0, 31, 32, %r25
  504. depdi 0, 31, 32, %r24
  505. #endif
  506. lws_compare_and_swap:
  507. /* Trigger memory reference interruptions without writing to memory */
  508. 1: ldw 0(%r26), %r28
  509. 2: stbys,e %r0, 0(%r26)
  510. /* Calculate 8-bit hash index from virtual address */
  511. extru_safe %r26, 27, 8, %r20
  512. /* Load start of lock table */
  513. ldil L%lws_lock_start, %r28
  514. ldo R%lws_lock_start(%r28), %r28
  515. /* Find lock to use, the hash index is one of 0 to
  516. 255, multiplied by 16 (keep it 16-byte aligned)
  517. and add to the lock table offset. */
  518. shlw %r20, 4, %r20
  519. add %r20, %r28, %r20
  520. rsm PSW_SM_I, %r0 /* Disable interrupts */
  521. /* Try to acquire the lock */
  522. LDCW 0(%sr2,%r20), %r28
  523. comclr,<> %r0, %r28, %r0
  524. b,n lws_wouldblock
  525. /* Disable page faults to prevent sleeping in critical region */
  526. lws_pagefault_disable %r21,%r28
  527. /*
  528. prev = *addr;
  529. if ( prev == old )
  530. *addr = new;
  531. return prev;
  532. */
  533. /* NOTES:
  534. This all works because intr_do_signal
  535. and schedule both check the return iasq
  536. and see that we are on the kernel page
  537. so this process is never scheduled off
  538. or is ever sent any signal of any sort,
  539. thus it is wholly atomic from usrspace's
  540. perspective
  541. */
  542. /* The load and store could fail */
  543. 3: ldw 0(%r26), %r28
  544. sub,<> %r28, %r25, %r0
  545. 4: stw %r24, 0(%r26)
  546. b,n lws_exit_noerror
  547. /* A fault occurred on load or stbys,e store */
  548. 5: b,n lws_fault
  549. ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 5b-linux_gateway_page)
  550. ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 5b-linux_gateway_page)
  551. /* A page fault occurred in critical region */
  552. 6: b,n lws_pagefault
  553. ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 6b-linux_gateway_page)
  554. ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 6b-linux_gateway_page)
  555. /***************************************************
  556. New CAS implementation which uses pointers and variable size
  557. information. The value pointed by old and new MUST NOT change
  558. while performing CAS. The lock only protects the value at %r26.
  559. %r26 - Address to examine
  560. %r25 - Pointer to the value to check (old)
  561. %r24 - Pointer to the value to set (new)
  562. %r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
  563. %r28 - Return non-zero on failure
  564. %r21 - Kernel error code
  565. %r21 returns the following error codes:
  566. EAGAIN - CAS is busy, ldcw failed, try again.
  567. EFAULT - Read or write failed.
  568. If EAGAIN is returned, %r28 indicates the busy reason:
  569. r28 == 1 - CAS is busy. lock contended.
  570. r28 == 2 - CAS is busy. ldcw failed.
  571. r28 == 3 - CAS is busy. page fault.
  572. Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
  573. ****************************************************/
  574. lws_compare_and_swap_2:
  575. #ifdef CONFIG_64BIT
  576. /* Wide mode user process? */
  577. bb,<,n %sp, 31, cas2_begin
  578. /* Clip the input registers for 32-bit processes. We don't
  579. need to clip %r23 as we only use it for word operations */
  580. depdi 0, 31, 32, %r26
  581. depdi 0, 31, 32, %r25
  582. depdi 0, 31, 32, %r24
  583. #endif
  584. cas2_begin:
  585. /* Check the validity of the size pointer */
  586. subi,>>= 3, %r23, %r0
  587. b,n lws_exit_nosys
  588. /* Jump to the functions which will load the old and new values into
  589. registers depending on the their size */
  590. shlw %r23, 2, %r29
  591. blr %r29, %r0
  592. nop
  593. /* 8-bit load */
  594. 1: ldb 0(%r25), %r25
  595. b cas2_lock_start
  596. 2: ldb 0(%r24), %r24
  597. nop
  598. nop
  599. nop
  600. nop
  601. nop
  602. /* 16-bit load */
  603. 3: ldh 0(%r25), %r25
  604. b cas2_lock_start
  605. 4: ldh 0(%r24), %r24
  606. nop
  607. nop
  608. nop
  609. nop
  610. nop
  611. /* 32-bit load */
  612. 5: ldw 0(%r25), %r25
  613. b cas2_lock_start
  614. 6: ldw 0(%r24), %r24
  615. nop
  616. nop
  617. nop
  618. nop
  619. nop
  620. /* 64-bit load */
  621. #ifdef CONFIG_64BIT
  622. 7: ldd 0(%r25), %r25
  623. 8: ldd 0(%r24), %r24
  624. #else
  625. /* Load old value into r22/r23 - high/low */
  626. 7: ldw 0(%r25), %r22
  627. 8: ldw 4(%r25), %r23
  628. /* Load new value into fr4 for atomic store later */
  629. 9: flddx 0(%r24), %fr4
  630. #endif
  631. cas2_lock_start:
  632. /* Trigger memory reference interruptions without writing to memory */
  633. copy %r26, %r28
  634. depi_safe 0, 31, 2, %r28
  635. 10: ldw 0(%r28), %r1
  636. 11: stbys,e %r0, 0(%r28)
  637. /* Calculate 8-bit hash index from virtual address */
  638. extru_safe %r26, 27, 8, %r20
  639. /* Load start of lock table */
  640. ldil L%lws_lock_start, %r28
  641. ldo R%lws_lock_start(%r28), %r28
  642. /* Find lock to use, the hash index is one of 0 to
  643. 255, multiplied by 16 (keep it 16-byte aligned)
  644. and add to the lock table offset. */
  645. shlw %r20, 4, %r20
  646. add %r20, %r28, %r20
  647. rsm PSW_SM_I, %r0 /* Disable interrupts */
  648. /* Try to acquire the lock */
  649. LDCW 0(%sr2,%r20), %r28
  650. comclr,<> %r0, %r28, %r0
  651. b,n lws_wouldblock
  652. /* Disable page faults to prevent sleeping in critical region */
  653. lws_pagefault_disable %r21,%r28
  654. /*
  655. prev = *addr;
  656. if ( prev == old )
  657. *addr = new;
  658. return prev;
  659. */
  660. /* NOTES:
  661. This all works because intr_do_signal
  662. and schedule both check the return iasq
  663. and see that we are on the kernel page
  664. so this process is never scheduled off
  665. or is ever sent any signal of any sort,
  666. thus it is wholly atomic from usrspace's
  667. perspective
  668. */
  669. /* Jump to the correct function */
  670. blr %r29, %r0
  671. /* Set %r28 as non-zero for now */
  672. ldo 1(%r0),%r28
  673. /* 8-bit CAS */
  674. 12: ldb 0(%r26), %r29
  675. sub,= %r29, %r25, %r0
  676. b,n lws_exit_noerror
  677. 13: stb %r24, 0(%r26)
  678. b lws_exit_noerror
  679. copy %r0, %r28
  680. nop
  681. nop
  682. /* 16-bit CAS */
  683. 14: ldh 0(%r26), %r29
  684. sub,= %r29, %r25, %r0
  685. b,n lws_exit_noerror
  686. 15: sth %r24, 0(%r26)
  687. b lws_exit_noerror
  688. copy %r0, %r28
  689. nop
  690. nop
  691. /* 32-bit CAS */
  692. 16: ldw 0(%r26), %r29
  693. sub,= %r29, %r25, %r0
  694. b,n lws_exit_noerror
  695. 17: stw %r24, 0(%r26)
  696. b lws_exit_noerror
  697. copy %r0, %r28
  698. nop
  699. nop
  700. /* 64-bit CAS */
  701. #ifdef CONFIG_64BIT
  702. 18: ldd 0(%r26), %r29
  703. sub,*= %r29, %r25, %r0
  704. b,n lws_exit_noerror
  705. 19: std %r24, 0(%r26)
  706. copy %r0, %r28
  707. #else
  708. /* Compare first word */
  709. 18: ldw 0(%r26), %r29
  710. sub,= %r29, %r22, %r0
  711. b,n lws_exit_noerror
  712. /* Compare second word */
  713. 19: ldw 4(%r26), %r29
  714. sub,= %r29, %r23, %r0
  715. b,n lws_exit_noerror
  716. /* Perform the store */
  717. 20: fstdx %fr4, 0(%r26)
  718. copy %r0, %r28
  719. #endif
  720. b lws_exit_noerror
  721. copy %r0, %r28
  722. /* A fault occurred on load or stbys,e store */
  723. 30: b,n lws_fault
  724. ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
  725. ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
  726. ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
  727. ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
  728. ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
  729. ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
  730. ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
  731. ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 30b-linux_gateway_page)
  732. #ifndef CONFIG_64BIT
  733. ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 30b-linux_gateway_page)
  734. #endif
  735. ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 30b-linux_gateway_page)
  736. ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 30b-linux_gateway_page)
  737. /* A page fault occurred in critical region */
  738. 31: b,n lws_pagefault
  739. ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 31b-linux_gateway_page)
  740. ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 31b-linux_gateway_page)
  741. ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
  742. ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
  743. ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
  744. ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 31b-linux_gateway_page)
  745. ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 31b-linux_gateway_page)
  746. ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 31b-linux_gateway_page)
  747. #ifndef CONFIG_64BIT
  748. ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 31b-linux_gateway_page)
  749. #endif
  750. /***************************************************
  751. LWS atomic exchange.
  752. %r26 - Exchange address
  753. %r25 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
  754. %r24 - Address of new value
  755. %r23 - Address of old value
  756. %r28 - Return non-zero on failure
  757. %r21 - Kernel error code
  758. %r21 returns the following error codes:
  759. EAGAIN - CAS is busy, ldcw failed, try again.
  760. EFAULT - Read or write failed.
  761. If EAGAIN is returned, %r28 indicates the busy reason:
  762. r28 == 1 - CAS is busy. lock contended.
  763. r28 == 2 - CAS is busy. ldcw failed.
  764. r28 == 3 - CAS is busy. page fault.
  765. Scratch: r20, r1
  766. ****************************************************/
  767. lws_atomic_xchg:
  768. #ifdef CONFIG_64BIT
  769. /* Wide mode user process? */
  770. bb,<,n %sp, 31, atomic_xchg_begin
  771. /* Clip the input registers for 32-bit processes. We don't
  772. need to clip %r23 as we only use it for word operations */
  773. depdi 0, 31, 32, %r26
  774. depdi 0, 31, 32, %r25
  775. depdi 0, 31, 32, %r24
  776. depdi 0, 31, 32, %r23
  777. #endif
  778. atomic_xchg_begin:
  779. /* Check the validity of the size pointer */
  780. subi,>>= 3, %r25, %r0
  781. b,n lws_exit_nosys
  782. /* Jump to the functions which will load the old and new values into
  783. registers depending on the their size */
  784. shlw %r25, 2, %r1
  785. blr %r1, %r0
  786. nop
  787. /* Perform exception checks */
  788. /* 8-bit exchange */
  789. 1: ldb 0(%r24), %r20
  790. copy %r23, %r20
  791. depi_safe 0, 31, 2, %r20
  792. b atomic_xchg_start
  793. 2: stbys,e %r0, 0(%r20)
  794. nop
  795. nop
  796. nop
  797. /* 16-bit exchange */
  798. 3: ldh 0(%r24), %r20
  799. copy %r23, %r20
  800. depi_safe 0, 31, 2, %r20
  801. b atomic_xchg_start
  802. 4: stbys,e %r0, 0(%r20)
  803. nop
  804. nop
  805. nop
  806. /* 32-bit exchange */
  807. 5: ldw 0(%r24), %r20
  808. b atomic_xchg_start
  809. 6: stbys,e %r0, 0(%r23)
  810. nop
  811. nop
  812. nop
  813. nop
  814. nop
  815. /* 64-bit exchange */
  816. #ifdef CONFIG_64BIT
  817. 7: ldd 0(%r24), %r20
  818. 8: stdby,e %r0, 0(%r23)
  819. #else
  820. 7: ldw 0(%r24), %r20
  821. 8: ldw 4(%r24), %r20
  822. copy %r23, %r20
  823. depi_safe 0, 31, 2, %r20
  824. 9: stbys,e %r0, 0(%r20)
  825. 10: stbys,e %r0, 4(%r20)
  826. #endif
  827. atomic_xchg_start:
  828. /* Trigger memory reference interruptions without writing to memory */
  829. copy %r26, %r28
  830. depi_safe 0, 31, 2, %r28
  831. 11: ldw 0(%r28), %r1
  832. 12: stbys,e %r0, 0(%r28)
  833. /* Calculate 8-bit hash index from virtual address */
  834. extru_safe %r26, 27, 8, %r20
  835. /* Load start of lock table */
  836. ldil L%lws_lock_start, %r28
  837. ldo R%lws_lock_start(%r28), %r28
  838. /* Find lock to use, the hash index is one of 0 to
  839. 255, multiplied by 16 (keep it 16-byte aligned)
  840. and add to the lock table offset. */
  841. shlw %r20, 4, %r20
  842. add %r20, %r28, %r20
  843. rsm PSW_SM_I, %r0 /* Disable interrupts */
  844. /* Try to acquire the lock */
  845. LDCW 0(%sr2,%r20), %r28
  846. comclr,<> %r0, %r28, %r0
  847. b,n lws_wouldblock
  848. /* Disable page faults to prevent sleeping in critical region */
  849. lws_pagefault_disable %r21,%r28
  850. /* NOTES:
  851. This all works because intr_do_signal
  852. and schedule both check the return iasq
  853. and see that we are on the kernel page
  854. so this process is never scheduled off
  855. or is ever sent any signal of any sort,
  856. thus it is wholly atomic from userspace's
  857. perspective
  858. */
  859. /* Jump to the correct function */
  860. blr %r1, %r0
  861. /* Set %r28 as non-zero for now */
  862. ldo 1(%r0),%r28
  863. /* 8-bit exchange */
  864. 14: ldb 0(%r26), %r1
  865. 15: stb %r1, 0(%r23)
  866. 15: ldb 0(%r24), %r1
  867. 17: stb %r1, 0(%r26)
  868. b lws_exit_noerror
  869. copy %r0, %r28
  870. nop
  871. nop
  872. /* 16-bit exchange */
  873. 18: ldh 0(%r26), %r1
  874. 19: sth %r1, 0(%r23)
  875. 20: ldh 0(%r24), %r1
  876. 21: sth %r1, 0(%r26)
  877. b lws_exit_noerror
  878. copy %r0, %r28
  879. nop
  880. nop
  881. /* 32-bit exchange */
  882. 22: ldw 0(%r26), %r1
  883. 23: stw %r1, 0(%r23)
  884. 24: ldw 0(%r24), %r1
  885. 25: stw %r1, 0(%r26)
  886. b lws_exit_noerror
  887. copy %r0, %r28
  888. nop
  889. nop
  890. /* 64-bit exchange */
  891. #ifdef CONFIG_64BIT
  892. 26: ldd 0(%r26), %r1
  893. 27: std %r1, 0(%r23)
  894. 28: ldd 0(%r24), %r1
  895. 29: std %r1, 0(%r26)
  896. #else
  897. 26: flddx 0(%r26), %fr4
  898. 27: fstdx %fr4, 0(%r23)
  899. 28: flddx 0(%r24), %fr4
  900. 29: fstdx %fr4, 0(%r26)
  901. #endif
  902. b lws_exit_noerror
  903. copy %r0, %r28
  904. /* A fault occurred on load or stbys,e store */
  905. 30: b,n lws_fault
  906. ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
  907. ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
  908. ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
  909. ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
  910. ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
  911. ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
  912. ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
  913. ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 30b-linux_gateway_page)
  914. #ifndef CONFIG_64BIT
  915. ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 30b-linux_gateway_page)
  916. ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 30b-linux_gateway_page)
  917. #endif
  918. ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 30b-linux_gateway_page)
  919. ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 30b-linux_gateway_page)
  920. /* A page fault occurred in critical region */
  921. 31: b,n lws_pagefault
  922. ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
  923. ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
  924. ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
  925. ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 31b-linux_gateway_page)
  926. ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 31b-linux_gateway_page)
  927. ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 31b-linux_gateway_page)
  928. ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 31b-linux_gateway_page)
  929. ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 31b-linux_gateway_page)
  930. ASM_EXCEPTIONTABLE_ENTRY(22b-linux_gateway_page, 31b-linux_gateway_page)
  931. ASM_EXCEPTIONTABLE_ENTRY(23b-linux_gateway_page, 31b-linux_gateway_page)
  932. ASM_EXCEPTIONTABLE_ENTRY(24b-linux_gateway_page, 31b-linux_gateway_page)
  933. ASM_EXCEPTIONTABLE_ENTRY(25b-linux_gateway_page, 31b-linux_gateway_page)
  934. ASM_EXCEPTIONTABLE_ENTRY(26b-linux_gateway_page, 31b-linux_gateway_page)
  935. ASM_EXCEPTIONTABLE_ENTRY(27b-linux_gateway_page, 31b-linux_gateway_page)
  936. ASM_EXCEPTIONTABLE_ENTRY(28b-linux_gateway_page, 31b-linux_gateway_page)
  937. ASM_EXCEPTIONTABLE_ENTRY(29b-linux_gateway_page, 31b-linux_gateway_page)
  938. /***************************************************
  939. LWS atomic store.
  940. %r26 - Address to store
  941. %r25 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
  942. %r24 - Address of value to store
  943. %r28 - Return non-zero on failure
  944. %r21 - Kernel error code
  945. %r21 returns the following error codes:
  946. EAGAIN - CAS is busy, ldcw failed, try again.
  947. EFAULT - Read or write failed.
  948. If EAGAIN is returned, %r28 indicates the busy reason:
  949. r28 == 1 - CAS is busy. lock contended.
  950. r28 == 2 - CAS is busy. ldcw failed.
  951. r28 == 3 - CAS is busy. page fault.
  952. Scratch: r20, r1
  953. ****************************************************/
  954. lws_atomic_store:
  955. #ifdef CONFIG_64BIT
  956. /* Wide mode user process? */
  957. bb,<,n %sp, 31, atomic_store_begin
  958. /* Clip the input registers for 32-bit processes. We don't
  959. need to clip %r23 as we only use it for word operations */
  960. depdi 0, 31, 32, %r26
  961. depdi 0, 31, 32, %r25
  962. depdi 0, 31, 32, %r24
  963. #endif
  964. atomic_store_begin:
  965. /* Check the validity of the size pointer */
  966. subi,>>= 3, %r25, %r0
  967. b,n lws_exit_nosys
  968. shlw %r25, 1, %r1
  969. blr %r1, %r0
  970. nop
  971. /* Perform exception checks */
  972. /* 8-bit store */
  973. 1: ldb 0(%r24), %r20
  974. b,n atomic_store_start
  975. nop
  976. nop
  977. /* 16-bit store */
  978. 2: ldh 0(%r24), %r20
  979. b,n atomic_store_start
  980. nop
  981. nop
  982. /* 32-bit store */
  983. 3: ldw 0(%r24), %r20
  984. b,n atomic_store_start
  985. nop
  986. nop
  987. /* 64-bit store */
  988. #ifdef CONFIG_64BIT
  989. 4: ldd 0(%r24), %r20
  990. #else
  991. 4: ldw 0(%r24), %r20
  992. 5: ldw 4(%r24), %r20
  993. #endif
  994. atomic_store_start:
  995. /* Trigger memory reference interruptions without writing to memory */
  996. copy %r26, %r28
  997. depi_safe 0, 31, 2, %r28
  998. 6: ldw 0(%r28), %r1
  999. 7: stbys,e %r0, 0(%r28)
  1000. /* Calculate 8-bit hash index from virtual address */
  1001. extru_safe %r26, 27, 8, %r20
  1002. /* Load start of lock table */
  1003. ldil L%lws_lock_start, %r28
  1004. ldo R%lws_lock_start(%r28), %r28
  1005. /* Find lock to use, the hash index is one of 0 to
  1006. 255, multiplied by 16 (keep it 16-byte aligned)
  1007. and add to the lock table offset. */
  1008. shlw %r20, 4, %r20
  1009. add %r20, %r28, %r20
  1010. rsm PSW_SM_I, %r0 /* Disable interrupts */
  1011. /* Try to acquire the lock */
  1012. LDCW 0(%sr2,%r20), %r28
  1013. comclr,<> %r0, %r28, %r0
  1014. b,n lws_wouldblock
  1015. /* Disable page faults to prevent sleeping in critical region */
  1016. lws_pagefault_disable %r21,%r28
  1017. /* NOTES:
  1018. This all works because intr_do_signal
  1019. and schedule both check the return iasq
  1020. and see that we are on the kernel page
  1021. so this process is never scheduled off
  1022. or is ever sent any signal of any sort,
  1023. thus it is wholly atomic from userspace's
  1024. perspective
  1025. */
  1026. /* Jump to the correct function */
  1027. blr %r1, %r0
  1028. /* Set %r28 as non-zero for now */
  1029. ldo 1(%r0),%r28
  1030. /* 8-bit store */
  1031. 9: ldb 0(%r24), %r1
  1032. 10: stb %r1, 0(%r26)
  1033. b lws_exit_noerror
  1034. copy %r0, %r28
  1035. /* 16-bit store */
  1036. 11: ldh 0(%r24), %r1
  1037. 12: sth %r1, 0(%r26)
  1038. b lws_exit_noerror
  1039. copy %r0, %r28
  1040. /* 32-bit store */
  1041. 13: ldw 0(%r24), %r1
  1042. 14: stw %r1, 0(%r26)
  1043. b lws_exit_noerror
  1044. copy %r0, %r28
  1045. /* 64-bit store */
  1046. #ifdef CONFIG_64BIT
  1047. 15: ldd 0(%r24), %r1
  1048. 16: std %r1, 0(%r26)
  1049. #else
  1050. 15: flddx 0(%r24), %fr4
  1051. 16: fstdx %fr4, 0(%r26)
  1052. #endif
  1053. b lws_exit_noerror
  1054. copy %r0, %r28
  1055. /* A fault occurred on load or stbys,e store */
  1056. 30: b,n lws_fault
  1057. ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
  1058. ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
  1059. ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
  1060. ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
  1061. #ifndef CONFIG_64BIT
  1062. ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
  1063. #endif
  1064. ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
  1065. ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
  1066. /* A page fault occurred in critical region */
  1067. 31: b,n lws_pagefault
  1068. ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 31b-linux_gateway_page)
  1069. ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 31b-linux_gateway_page)
  1070. ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 31b-linux_gateway_page)
  1071. ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 31b-linux_gateway_page)
  1072. ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 31b-linux_gateway_page)
  1073. ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
  1074. ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
  1075. ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
  1076. /* Make sure nothing else is placed on this page */
  1077. .align PAGE_SIZE
  1078. END(linux_gateway_page)
  1079. ENTRY(end_linux_gateway_page)
  1080. /* Relocate symbols assuming linux_gateway_page is mapped
  1081. to virtual address 0x0 */
  1082. #define LWS_ENTRY(_name_) ASM_ULONG_INSN (lws_##_name_ - linux_gateway_page)
  1083. .section .rodata,"a"
  1084. .align 8
  1085. /* Light-weight-syscall table */
  1086. /* Start of lws table. */
  1087. ENTRY(lws_table)
  1088. LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */
  1089. LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */
  1090. LWS_ENTRY(compare_and_swap_2) /* 2 - Atomic 64bit CAS */
  1091. LWS_ENTRY(atomic_xchg) /* 3 - Atomic Exchange */
  1092. LWS_ENTRY(atomic_store) /* 4 - Atomic Store */
  1093. END(lws_table)
  1094. /* End of lws table */
  1095. #ifdef CONFIG_64BIT
  1096. #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
  1097. #else
  1098. #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
  1099. #endif
  1100. #define __SYSCALL(nr, entry) ASM_ULONG_INSN entry
  1101. .align 8
  1102. ENTRY(sys_call_table)
  1103. .export sys_call_table,data
  1104. #include <asm/syscall_table_32.h> /* 32-bit syscalls */
  1105. END(sys_call_table)
  1106. #ifdef CONFIG_64BIT
  1107. .align 8
  1108. ENTRY(sys_call_table64)
  1109. #include <asm/syscall_table_64.h> /* 64-bit syscalls */
  1110. END(sys_call_table64)
  1111. #endif
  1112. /*
  1113. All light-weight-syscall atomic operations
  1114. will use this set of locks
  1115. NOTE: The lws_lock_start symbol must be
  1116. at least 16-byte aligned for safe use
  1117. with ldcw.
  1118. */
  1119. .section .data
  1120. .align L1_CACHE_BYTES
  1121. ENTRY(lws_lock_start)
  1122. /* lws locks */
  1123. .rept 256
  1124. /* Keep locks aligned at 16-bytes */
  1125. .word 1
  1126. .word 0
  1127. .word 0
  1128. .word 0
  1129. .endr
  1130. END(lws_lock_start)
  1131. .previous
  1132. .end