assembler.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * arch/arm/include/asm/assembler.h
  4. *
  5. * Copyright (C) 1996-2000 Russell King
  6. *
  7. * This file contains arm architecture specific defines
  8. * for the different processors.
  9. *
  10. * Do not include any C declarations in this file - it is included by
  11. * assembler source.
  12. */
  13. #ifndef __ASM_ASSEMBLER_H__
  14. #define __ASM_ASSEMBLER_H__
  15. #ifndef __ASSEMBLY__
  16. #error "Only include this from assembly code"
  17. #endif
  18. #include <asm/ptrace.h>
  19. #include <asm/opcodes-virt.h>
  20. #include <asm/asm-offsets.h>
  21. #include <asm/page.h>
  22. #include <asm/thread_info.h>
  23. #include <asm/uaccess-asm.h>
  24. #define IOMEM(x) (x)
  25. /*
  26. * Endian independent macros for shifting bytes within registers.
  27. */
  28. #ifndef __ARMEB__
  29. #define lspull lsr
  30. #define lspush lsl
  31. #define get_byte_0 lsl #0
  32. #define get_byte_1 lsr #8
  33. #define get_byte_2 lsr #16
  34. #define get_byte_3 lsr #24
  35. #define put_byte_0 lsl #0
  36. #define put_byte_1 lsl #8
  37. #define put_byte_2 lsl #16
  38. #define put_byte_3 lsl #24
  39. #else
  40. #define lspull lsl
  41. #define lspush lsr
  42. #define get_byte_0 lsr #24
  43. #define get_byte_1 lsr #16
  44. #define get_byte_2 lsr #8
  45. #define get_byte_3 lsl #0
  46. #define put_byte_0 lsl #24
  47. #define put_byte_1 lsl #16
  48. #define put_byte_2 lsl #8
  49. #define put_byte_3 lsl #0
  50. #endif
  51. /* Select code for any configuration running in BE8 mode */
  52. #ifdef CONFIG_CPU_ENDIAN_BE8
  53. #define ARM_BE8(code...) code
  54. #else
  55. #define ARM_BE8(code...)
  56. #endif
  57. /*
  58. * Data preload for architectures that support it
  59. */
  60. #if __LINUX_ARM_ARCH__ >= 5
  61. #define PLD(code...) code
  62. #else
  63. #define PLD(code...)
  64. #endif
  65. /*
  66. * This can be used to enable code to cacheline align the destination
  67. * pointer when bulk writing to memory. Experiments on StrongARM and
  68. * XScale didn't show this a worthwhile thing to do when the cache is not
  69. * set to write-allocate (this would need further testing on XScale when WA
  70. * is used).
  71. *
  72. * On Feroceon there is much to gain however, regardless of cache mode.
  73. */
  74. #ifdef CONFIG_CPU_FEROCEON
  75. #define CALGN(code...) code
  76. #else
  77. #define CALGN(code...)
  78. #endif
  79. #define IMM12_MASK 0xfff
  80. /* the frame pointer used for stack unwinding */
  81. ARM( fpreg .req r11 )
  82. THUMB( fpreg .req r7 )
  83. /*
  84. * Enable and disable interrupts
  85. */
  86. #if __LINUX_ARM_ARCH__ >= 6
  87. .macro disable_irq_notrace
  88. cpsid i
  89. .endm
  90. .macro enable_irq_notrace
  91. cpsie i
  92. .endm
  93. #else
  94. .macro disable_irq_notrace
  95. msr cpsr_c, #PSR_I_BIT | SVC_MODE
  96. .endm
  97. .macro enable_irq_notrace
  98. msr cpsr_c, #SVC_MODE
  99. .endm
  100. #endif
  101. #if __LINUX_ARM_ARCH__ < 7
  102. .macro dsb, args
  103. mcr p15, 0, r0, c7, c10, 4
  104. .endm
  105. .macro isb, args
  106. mcr p15, 0, r0, c7, c5, 4
  107. .endm
  108. #endif
  109. .macro asm_trace_hardirqs_off, save=1
  110. #if defined(CONFIG_TRACE_IRQFLAGS)
  111. .if \save
  112. stmdb sp!, {r0-r3, ip, lr}
  113. .endif
  114. bl trace_hardirqs_off
  115. .if \save
  116. ldmia sp!, {r0-r3, ip, lr}
  117. .endif
  118. #endif
  119. .endm
  120. .macro asm_trace_hardirqs_on, cond=al, save=1
  121. #if defined(CONFIG_TRACE_IRQFLAGS)
  122. /*
  123. * actually the registers should be pushed and pop'd conditionally, but
  124. * after bl the flags are certainly clobbered
  125. */
  126. .if \save
  127. stmdb sp!, {r0-r3, ip, lr}
  128. .endif
  129. bl\cond trace_hardirqs_on
  130. .if \save
  131. ldmia sp!, {r0-r3, ip, lr}
  132. .endif
  133. #endif
  134. .endm
  135. .macro disable_irq, save=1
  136. disable_irq_notrace
  137. asm_trace_hardirqs_off \save
  138. .endm
  139. .macro enable_irq
  140. asm_trace_hardirqs_on
  141. enable_irq_notrace
  142. .endm
  143. /*
  144. * Save the current IRQ state and disable IRQs. Note that this macro
  145. * assumes FIQs are enabled, and that the processor is in SVC mode.
  146. */
  147. .macro save_and_disable_irqs, oldcpsr
  148. #ifdef CONFIG_CPU_V7M
  149. mrs \oldcpsr, primask
  150. #else
  151. mrs \oldcpsr, cpsr
  152. #endif
  153. disable_irq
  154. .endm
  155. .macro save_and_disable_irqs_notrace, oldcpsr
  156. #ifdef CONFIG_CPU_V7M
  157. mrs \oldcpsr, primask
  158. #else
  159. mrs \oldcpsr, cpsr
  160. #endif
  161. disable_irq_notrace
  162. .endm
  163. /*
  164. * Restore interrupt state previously stored in a register. We don't
  165. * guarantee that this will preserve the flags.
  166. */
  167. .macro restore_irqs_notrace, oldcpsr
  168. #ifdef CONFIG_CPU_V7M
  169. msr primask, \oldcpsr
  170. #else
  171. msr cpsr_c, \oldcpsr
  172. #endif
  173. .endm
  174. .macro restore_irqs, oldcpsr
  175. tst \oldcpsr, #PSR_I_BIT
  176. asm_trace_hardirqs_on cond=eq
  177. restore_irqs_notrace \oldcpsr
  178. .endm
  179. /*
  180. * Assembly version of "adr rd, BSYM(sym)". This should only be used to
  181. * reference local symbols in the same assembly file which are to be
  182. * resolved by the assembler. Other usage is undefined.
  183. */
  184. .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
  185. .macro badr\c, rd, sym
  186. #ifdef CONFIG_THUMB2_KERNEL
  187. adr\c \rd, \sym + 1
  188. #else
  189. adr\c \rd, \sym
  190. #endif
  191. .endm
  192. .endr
  193. /*
  194. * Get current thread_info.
  195. */
  196. .macro get_thread_info, rd
  197. /* thread_info is the first member of struct task_struct */
  198. get_current \rd
  199. .endm
  200. /*
  201. * Increment/decrement the preempt count.
  202. */
  203. #ifdef CONFIG_PREEMPT_COUNT
  204. .macro inc_preempt_count, ti, tmp
  205. ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
  206. add \tmp, \tmp, #1 @ increment it
  207. str \tmp, [\ti, #TI_PREEMPT]
  208. .endm
  209. .macro dec_preempt_count, ti, tmp
  210. ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
  211. sub \tmp, \tmp, #1 @ decrement it
  212. str \tmp, [\ti, #TI_PREEMPT]
  213. .endm
  214. .macro dec_preempt_count_ti, ti, tmp
  215. get_thread_info \ti
  216. dec_preempt_count \ti, \tmp
  217. .endm
  218. #else
  219. .macro inc_preempt_count, ti, tmp
  220. .endm
  221. .macro dec_preempt_count, ti, tmp
  222. .endm
  223. .macro dec_preempt_count_ti, ti, tmp
  224. .endm
  225. #endif
  226. #define USERL(l, x...) \
  227. 9999: x; \
  228. .pushsection __ex_table,"a"; \
  229. .align 3; \
  230. .long 9999b,l; \
  231. .popsection
  232. #define USER(x...) USERL(9001f, x)
  233. #ifdef CONFIG_SMP
  234. #define ALT_SMP(instr...) \
  235. 9998: instr
  236. /*
  237. * Note: if you get assembler errors from ALT_UP() when building with
  238. * CONFIG_THUMB2_KERNEL, you almost certainly need to use
  239. * ALT_SMP( W(instr) ... )
  240. */
  241. #define ALT_UP(instr...) \
  242. .pushsection ".alt.smp.init", "a" ;\
  243. .align 2 ;\
  244. .long 9998b - . ;\
  245. 9997: instr ;\
  246. .if . - 9997b == 2 ;\
  247. nop ;\
  248. .endif ;\
  249. .if . - 9997b != 4 ;\
  250. .error "ALT_UP() content must assemble to exactly 4 bytes";\
  251. .endif ;\
  252. .popsection
  253. #define ALT_UP_B(label) \
  254. .pushsection ".alt.smp.init", "a" ;\
  255. .align 2 ;\
  256. .long 9998b - . ;\
  257. W(b) . + (label - 9998b) ;\
  258. .popsection
  259. #else
  260. #define ALT_SMP(instr...)
  261. #define ALT_UP(instr...) instr
  262. #define ALT_UP_B(label) b label
  263. #endif
  264. /*
  265. * this_cpu_offset - load the per-CPU offset of this CPU into
  266. * register 'rd'
  267. */
  268. .macro this_cpu_offset, rd:req
  269. #ifdef CONFIG_SMP
  270. ALT_SMP(mrc p15, 0, \rd, c13, c0, 4)
  271. #ifdef CONFIG_CPU_V6
  272. ALT_UP_B(.L1_\@)
  273. .L0_\@:
  274. .subsection 1
  275. .L1_\@: ldr_va \rd, __per_cpu_offset
  276. b .L0_\@
  277. .previous
  278. #endif
  279. #else
  280. mov \rd, #0
  281. #endif
  282. .endm
  283. /*
  284. * set_current - store the task pointer of this CPU's current task
  285. */
  286. .macro set_current, rn:req, tmp:req
  287. #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
  288. 9998: mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
  289. #ifdef CONFIG_CPU_V6
  290. ALT_UP_B(.L0_\@)
  291. .subsection 1
  292. .L0_\@: str_va \rn, __current, \tmp
  293. b .L1_\@
  294. .previous
  295. .L1_\@:
  296. #endif
  297. #else
  298. str_va \rn, __current, \tmp
  299. #endif
  300. .endm
  301. /*
  302. * get_current - load the task pointer of this CPU's current task
  303. */
  304. .macro get_current, rd:req
  305. #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
  306. 9998: mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
  307. #ifdef CONFIG_CPU_V6
  308. ALT_UP_B(.L0_\@)
  309. .subsection 1
  310. .L0_\@: ldr_va \rd, __current
  311. b .L1_\@
  312. .previous
  313. .L1_\@:
  314. #endif
  315. #else
  316. ldr_va \rd, __current
  317. #endif
  318. .endm
  319. /*
  320. * reload_current - reload the task pointer of this CPU's current task
  321. * into the TLS register
  322. */
  323. .macro reload_current, t1:req, t2:req
  324. #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
  325. #ifdef CONFIG_CPU_V6
  326. ALT_SMP(nop)
  327. ALT_UP_B(.L0_\@)
  328. #endif
  329. ldr_this_cpu \t1, __entry_task, \t1, \t2
  330. mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
  331. .L0_\@:
  332. #endif
  333. .endm
  334. /*
  335. * Instruction barrier
  336. */
  337. .macro instr_sync
  338. #if __LINUX_ARM_ARCH__ >= 7
  339. isb
  340. #elif __LINUX_ARM_ARCH__ == 6
  341. mcr p15, 0, r0, c7, c5, 4
  342. #endif
  343. .endm
  344. /*
  345. * SMP data memory barrier
  346. */
  347. .macro smp_dmb mode
  348. #ifdef CONFIG_SMP
  349. #if __LINUX_ARM_ARCH__ >= 7
  350. .ifeqs "\mode","arm"
  351. ALT_SMP(dmb ish)
  352. .else
  353. ALT_SMP(W(dmb) ish)
  354. .endif
  355. #elif __LINUX_ARM_ARCH__ == 6
  356. ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
  357. #else
  358. #error Incompatible SMP platform
  359. #endif
  360. .ifeqs "\mode","arm"
  361. ALT_UP(nop)
  362. .else
  363. ALT_UP(W(nop))
  364. .endif
  365. #endif
  366. .endm
  367. /*
  368. * Raw SMP data memory barrier
  369. */
  370. .macro __smp_dmb mode
  371. #if __LINUX_ARM_ARCH__ >= 7
  372. .ifeqs "\mode","arm"
  373. dmb ish
  374. .else
  375. W(dmb) ish
  376. .endif
  377. #elif __LINUX_ARM_ARCH__ == 6
  378. mcr p15, 0, r0, c7, c10, 5 @ dmb
  379. #else
  380. .error "Incompatible SMP platform"
  381. #endif
  382. .endm
  383. #if defined(CONFIG_CPU_V7M)
  384. /*
  385. * setmode is used to assert to be in svc mode during boot. For v7-M
  386. * this is done in __v7m_setup, so setmode can be empty here.
  387. */
  388. .macro setmode, mode, reg
  389. .endm
  390. #elif defined(CONFIG_THUMB2_KERNEL)
  391. .macro setmode, mode, reg
  392. mov \reg, #\mode
  393. msr cpsr_c, \reg
  394. .endm
  395. #else
  396. .macro setmode, mode, reg
  397. msr cpsr_c, #\mode
  398. .endm
  399. #endif
  400. /*
  401. * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
  402. * a scratch register for the macro to overwrite.
  403. *
  404. * This macro is intended for forcing the CPU into SVC mode at boot time.
  405. * you cannot return to the original mode.
  406. */
  407. .macro safe_svcmode_maskall reg:req
  408. #if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
  409. mrs \reg , cpsr
  410. eor \reg, \reg, #HYP_MODE
  411. tst \reg, #MODE_MASK
  412. bic \reg , \reg , #MODE_MASK
  413. orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
  414. THUMB( orr \reg , \reg , #PSR_T_BIT )
  415. bne 1f
  416. orr \reg, \reg, #PSR_A_BIT
  417. badr lr, 2f
  418. msr spsr_cxsf, \reg
  419. __MSR_ELR_HYP(14)
  420. __ERET
  421. 1: msr cpsr_c, \reg
  422. 2:
  423. #else
  424. /*
  425. * workaround for possibly broken pre-v6 hardware
  426. * (akita, Sharp Zaurus C-1000, PXA270-based)
  427. */
  428. setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
  429. #endif
  430. .endm
  431. /*
  432. * STRT/LDRT access macros with ARM and Thumb-2 variants
  433. */
  434. #ifdef CONFIG_THUMB2_KERNEL
  435. .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
  436. 9999:
  437. .if \inc == 1
  438. \instr\()b\t\cond\().w \reg, [\ptr, #\off]
  439. .elseif \inc == 4
  440. \instr\t\cond\().w \reg, [\ptr, #\off]
  441. .else
  442. .error "Unsupported inc macro argument"
  443. .endif
  444. .pushsection __ex_table,"a"
  445. .align 3
  446. .long 9999b, \abort
  447. .popsection
  448. .endm
  449. .macro usracc, instr, reg, ptr, inc, cond, rept, abort
  450. @ explicit IT instruction needed because of the label
  451. @ introduced by the USER macro
  452. .ifnc \cond,al
  453. .if \rept == 1
  454. itt \cond
  455. .elseif \rept == 2
  456. ittt \cond
  457. .else
  458. .error "Unsupported rept macro argument"
  459. .endif
  460. .endif
  461. @ Slightly optimised to avoid incrementing the pointer twice
  462. usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
  463. .if \rept == 2
  464. usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
  465. .endif
  466. add\cond \ptr, #\rept * \inc
  467. .endm
  468. #else /* !CONFIG_THUMB2_KERNEL */
  469. .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
  470. .rept \rept
  471. 9999:
  472. .if \inc == 1
  473. \instr\()b\t\cond \reg, [\ptr], #\inc
  474. .elseif \inc == 4
  475. \instr\t\cond \reg, [\ptr], #\inc
  476. .else
  477. .error "Unsupported inc macro argument"
  478. .endif
  479. .pushsection __ex_table,"a"
  480. .align 3
  481. .long 9999b, \abort
  482. .popsection
  483. .endr
  484. .endm
  485. #endif /* CONFIG_THUMB2_KERNEL */
  486. .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
  487. usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
  488. .endm
  489. .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
  490. usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
  491. .endm
  492. /* Utility macro for declaring string literals */
  493. .macro string name:req, string
  494. .type \name , #object
  495. \name:
  496. .asciz "\string"
  497. .size \name , . - \name
  498. .endm
  499. .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
  500. .macro ret\c, reg
  501. #if __LINUX_ARM_ARCH__ < 6
  502. mov\c pc, \reg
  503. #else
  504. .ifeqs "\reg", "lr"
  505. bx\c \reg
  506. .else
  507. mov\c pc, \reg
  508. .endif
  509. #endif
  510. .endm
  511. .endr
  512. .macro ret.w, reg
  513. ret \reg
  514. #ifdef CONFIG_THUMB2_KERNEL
  515. nop
  516. #endif
  517. .endm
  518. .macro bug, msg, line
  519. #ifdef CONFIG_THUMB2_KERNEL
  520. 1: .inst 0xde02
  521. #else
  522. 1: .inst 0xe7f001f2
  523. #endif
  524. #ifdef CONFIG_DEBUG_BUGVERBOSE
  525. .pushsection .rodata.str, "aMS", %progbits, 1
  526. 2: .asciz "\msg"
  527. .popsection
  528. .pushsection __bug_table, "aw"
  529. .align 2
  530. .word 1b, 2b
  531. .hword \line
  532. .popsection
  533. #endif
  534. .endm
  535. #ifdef CONFIG_KPROBES
  536. #define _ASM_NOKPROBE(entry) \
  537. .pushsection "_kprobe_blacklist", "aw" ; \
  538. .balign 4 ; \
  539. .long entry; \
  540. .popsection
  541. #else
  542. #define _ASM_NOKPROBE(entry)
  543. #endif
  544. .macro __adldst_l, op, reg, sym, tmp, c
  545. .if __LINUX_ARM_ARCH__ < 7
  546. ldr\c \tmp, .La\@
  547. .subsection 1
  548. .align 2
  549. .La\@: .long \sym - .Lpc\@
  550. .previous
  551. .else
  552. .ifnb \c
  553. THUMB( ittt \c )
  554. .endif
  555. movw\c \tmp, #:lower16:\sym - .Lpc\@
  556. movt\c \tmp, #:upper16:\sym - .Lpc\@
  557. .endif
  558. #ifndef CONFIG_THUMB2_KERNEL
  559. .set .Lpc\@, . + 8 // PC bias
  560. .ifc \op, add
  561. add\c \reg, \tmp, pc
  562. .else
  563. \op\c \reg, [pc, \tmp]
  564. .endif
  565. #else
  566. .Lb\@: add\c \tmp, \tmp, pc
  567. /*
  568. * In Thumb-2 builds, the PC bias depends on whether we are currently
  569. * emitting into a .arm or a .thumb section. The size of the add opcode
  570. * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
  571. * emitting in ARM mode, so let's use this to account for the bias.
  572. */
  573. .set .Lpc\@, . + (. - .Lb\@)
  574. .ifnc \op, add
  575. \op\c \reg, [\tmp]
  576. .endif
  577. #endif
  578. .endm
  579. /*
  580. * mov_l - move a constant value or [relocated] address into a register
  581. */
  582. .macro mov_l, dst:req, imm:req, cond
  583. .if __LINUX_ARM_ARCH__ < 7
  584. ldr\cond \dst, =\imm
  585. .else
  586. movw\cond \dst, #:lower16:\imm
  587. movt\cond \dst, #:upper16:\imm
  588. .endif
  589. .endm
  590. /*
  591. * adr_l - adr pseudo-op with unlimited range
  592. *
  593. * @dst: destination register
  594. * @sym: name of the symbol
  595. * @cond: conditional opcode suffix
  596. */
  597. .macro adr_l, dst:req, sym:req, cond
  598. __adldst_l add, \dst, \sym, \dst, \cond
  599. .endm
  600. /*
  601. * ldr_l - ldr <literal> pseudo-op with unlimited range
  602. *
  603. * @dst: destination register
  604. * @sym: name of the symbol
  605. * @cond: conditional opcode suffix
  606. */
  607. .macro ldr_l, dst:req, sym:req, cond
  608. __adldst_l ldr, \dst, \sym, \dst, \cond
  609. .endm
  610. /*
  611. * str_l - str <literal> pseudo-op with unlimited range
  612. *
  613. * @src: source register
  614. * @sym: name of the symbol
  615. * @tmp: mandatory scratch register
  616. * @cond: conditional opcode suffix
  617. */
  618. .macro str_l, src:req, sym:req, tmp:req, cond
  619. __adldst_l str, \src, \sym, \tmp, \cond
  620. .endm
  621. .macro __ldst_va, op, reg, tmp, sym, cond, offset
  622. #if __LINUX_ARM_ARCH__ >= 7 || \
  623. !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
  624. (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
  625. mov_l \tmp, \sym, \cond
  626. #else
  627. /*
  628. * Avoid a literal load, by emitting a sequence of ADD/LDR instructions
  629. * with the appropriate relocations. The combined sequence has a range
  630. * of -/+ 256 MiB, which should be sufficient for the core kernel and
  631. * for modules loaded into the module region.
  632. */
  633. .globl \sym
  634. .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
  635. .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
  636. .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
  637. .L0_\@: sub\cond \tmp, pc, #8 - \offset
  638. .L1_\@: sub\cond \tmp, \tmp, #4 - \offset
  639. .L2_\@:
  640. #endif
  641. \op\cond \reg, [\tmp, #\offset]
  642. .endm
  643. /*
  644. * ldr_va - load a 32-bit word from the virtual address of \sym
  645. */
  646. .macro ldr_va, rd:req, sym:req, cond, tmp, offset=0
  647. .ifnb \tmp
  648. __ldst_va ldr, \rd, \tmp, \sym, \cond, \offset
  649. .else
  650. __ldst_va ldr, \rd, \rd, \sym, \cond, \offset
  651. .endif
  652. .endm
  653. /*
  654. * str_va - store a 32-bit word to the virtual address of \sym
  655. */
  656. .macro str_va, rn:req, sym:req, tmp:req, cond
  657. __ldst_va str, \rn, \tmp, \sym, \cond, 0
  658. .endm
  659. /*
  660. * ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym',
  661. * without using a temp register. Supported in ARM mode
  662. * only.
  663. */
  664. .macro ldr_this_cpu_armv6, rd:req, sym:req
  665. this_cpu_offset \rd
  666. .globl \sym
  667. .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
  668. .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
  669. .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
  670. add \rd, \rd, pc
  671. .L0_\@: sub \rd, \rd, #4
  672. .L1_\@: sub \rd, \rd, #0
  673. .L2_\@: ldr \rd, [\rd, #4]
  674. .endm
  675. /*
  676. * ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym'
  677. * into register 'rd', which may be the stack pointer,
  678. * using 't1' and 't2' as general temp registers. These
  679. * are permitted to overlap with 'rd' if != sp
  680. */
  681. .macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
  682. #ifndef CONFIG_SMP
  683. ldr_va \rd, \sym, tmp=\t1
  684. #elif __LINUX_ARM_ARCH__ >= 7 || \
  685. !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
  686. (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
  687. this_cpu_offset \t1
  688. mov_l \t2, \sym
  689. ldr \rd, [\t1, \t2]
  690. #else
  691. ldr_this_cpu_armv6 \rd, \sym
  692. #endif
  693. .endm
  694. /*
  695. * rev_l - byte-swap a 32-bit value
  696. *
  697. * @val: source/destination register
  698. * @tmp: scratch register
  699. */
  700. .macro rev_l, val:req, tmp:req
  701. .if __LINUX_ARM_ARCH__ < 6
  702. eor \tmp, \val, \val, ror #16
  703. bic \tmp, \tmp, #0x00ff0000
  704. mov \val, \val, ror #8
  705. eor \val, \val, \tmp, lsr #8
  706. .else
  707. rev \val, \val
  708. .endif
  709. .endm
  710. /*
  711. * bl_r - branch and link to register
  712. *
  713. * @dst: target to branch to
  714. * @c: conditional opcode suffix
  715. */
  716. .macro bl_r, dst:req, c
  717. .if __LINUX_ARM_ARCH__ < 6
  718. mov\c lr, pc
  719. mov\c pc, \dst
  720. .else
  721. blx\c \dst
  722. .endif
  723. .endm
  724. #endif /* __ASM_ASSEMBLER_H__ */