umulsidi3.S 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
  2. #include <linux/linkage.h>
  3. #include <asm/asmmacro.h>
  4. #include <asm/core.h>
  5. #if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32 || XCHAL_HAVE_MAC16
  6. #define XCHAL_NO_MUL 0
  7. #else
  8. #define XCHAL_NO_MUL 1
  9. #endif
  10. ENTRY(__umulsidi3)
  11. #ifdef __XTENSA_CALL0_ABI__
  12. abi_entry(32)
  13. s32i a12, sp, 16
  14. s32i a13, sp, 20
  15. s32i a14, sp, 24
  16. s32i a15, sp, 28
  17. #elif XCHAL_NO_MUL
  18. /* This is not really a leaf function; allocate enough stack space
  19. to allow CALL12s to a helper function. */
  20. abi_entry(32)
  21. #else
  22. abi_entry_default
  23. #endif
  24. #ifdef __XTENSA_EB__
  25. #define wh a2
  26. #define wl a3
  27. #else
  28. #define wh a3
  29. #define wl a2
  30. #endif /* __XTENSA_EB__ */
  31. /* This code is taken from the mulsf3 routine in ieee754-sf.S.
  32. See more comments there. */
  33. #if XCHAL_HAVE_MUL32_HIGH
  34. mull a6, a2, a3
  35. muluh wh, a2, a3
  36. mov wl, a6
  37. #else /* ! MUL32_HIGH */
  38. #if defined(__XTENSA_CALL0_ABI__) && XCHAL_NO_MUL
  39. /* a0 and a8 will be clobbered by calling the multiply function
  40. but a8 is not used here and need not be saved. */
  41. s32i a0, sp, 0
  42. #endif
  43. #if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
  44. #define a2h a4
  45. #define a3h a5
  46. /* Get the high halves of the inputs into registers. */
  47. srli a2h, a2, 16
  48. srli a3h, a3, 16
  49. #define a2l a2
  50. #define a3l a3
  51. #if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
  52. /* Clear the high halves of the inputs. This does not matter
  53. for MUL16 because the high bits are ignored. */
  54. extui a2, a2, 0, 16
  55. extui a3, a3, 0, 16
  56. #endif
  57. #endif /* MUL16 || MUL32 */
  58. #if XCHAL_HAVE_MUL16
  59. #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
  60. mul16u dst, xreg ## xhalf, yreg ## yhalf
  61. #elif XCHAL_HAVE_MUL32
  62. #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
  63. mull dst, xreg ## xhalf, yreg ## yhalf
  64. #elif XCHAL_HAVE_MAC16
  65. /* The preprocessor insists on inserting a space when concatenating after
  66. a period in the definition of do_mul below. These macros are a workaround
  67. using underscores instead of periods when doing the concatenation. */
  68. #define umul_aa_ll umul.aa.ll
  69. #define umul_aa_lh umul.aa.lh
  70. #define umul_aa_hl umul.aa.hl
  71. #define umul_aa_hh umul.aa.hh
  72. #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
  73. umul_aa_ ## xhalf ## yhalf xreg, yreg; \
  74. rsr dst, ACCLO
  75. #else /* no multiply hardware */
  76. #define set_arg_l(dst, src) \
  77. extui dst, src, 0, 16
  78. #define set_arg_h(dst, src) \
  79. srli dst, src, 16
  80. #ifdef __XTENSA_CALL0_ABI__
  81. #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
  82. set_arg_ ## xhalf (a13, xreg); \
  83. set_arg_ ## yhalf (a14, yreg); \
  84. call0 .Lmul_mulsi3; \
  85. mov dst, a12
  86. #else
  87. #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
  88. set_arg_ ## xhalf (a14, xreg); \
  89. set_arg_ ## yhalf (a15, yreg); \
  90. call12 .Lmul_mulsi3; \
  91. mov dst, a14
  92. #endif /* __XTENSA_CALL0_ABI__ */
  93. #endif /* no multiply hardware */
  94. /* Add pp1 and pp2 into a6 with carry-out in a9. */
  95. do_mul(a6, a2, l, a3, h) /* pp 1 */
  96. do_mul(a11, a2, h, a3, l) /* pp 2 */
  97. movi a9, 0
  98. add a6, a6, a11
  99. bgeu a6, a11, 1f
  100. addi a9, a9, 1
  101. 1:
  102. /* Shift the high half of a9/a6 into position in a9. Note that
  103. this value can be safely incremented without any carry-outs. */
  104. ssai 16
  105. src a9, a9, a6
  106. /* Compute the low word into a6. */
  107. do_mul(a11, a2, l, a3, l) /* pp 0 */
  108. sll a6, a6
  109. add a6, a6, a11
  110. bgeu a6, a11, 1f
  111. addi a9, a9, 1
  112. 1:
  113. /* Compute the high word into wh. */
  114. do_mul(wh, a2, h, a3, h) /* pp 3 */
  115. add wh, wh, a9
  116. mov wl, a6
  117. #endif /* !MUL32_HIGH */
  118. #if defined(__XTENSA_CALL0_ABI__) && XCHAL_NO_MUL
  119. /* Restore the original return address. */
  120. l32i a0, sp, 0
  121. #endif
  122. #ifdef __XTENSA_CALL0_ABI__
  123. l32i a12, sp, 16
  124. l32i a13, sp, 20
  125. l32i a14, sp, 24
  126. l32i a15, sp, 28
  127. abi_ret(32)
  128. #else
  129. abi_ret_default
  130. #endif
  131. #if XCHAL_NO_MUL
  132. .macro do_addx2 dst, as, at, tmp
  133. #if XCHAL_HAVE_ADDX
  134. addx2 \dst, \as, \at
  135. #else
  136. slli \tmp, \as, 1
  137. add \dst, \tmp, \at
  138. #endif
  139. .endm
  140. .macro do_addx4 dst, as, at, tmp
  141. #if XCHAL_HAVE_ADDX
  142. addx4 \dst, \as, \at
  143. #else
  144. slli \tmp, \as, 2
  145. add \dst, \tmp, \at
  146. #endif
  147. .endm
  148. .macro do_addx8 dst, as, at, tmp
  149. #if XCHAL_HAVE_ADDX
  150. addx8 \dst, \as, \at
  151. #else
  152. slli \tmp, \as, 3
  153. add \dst, \tmp, \at
  154. #endif
  155. .endm
  156. /* For Xtensa processors with no multiply hardware, this simplified
  157. version of _mulsi3 is used for multiplying 16-bit chunks of
  158. the floating-point mantissas. When using CALL0, this function
  159. uses a custom ABI: the inputs are passed in a13 and a14, the
  160. result is returned in a12, and a8 and a15 are clobbered. */
  161. .align 4
  162. .Lmul_mulsi3:
  163. abi_entry_default
  164. .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
  165. movi \dst, 0
  166. 1: add \tmp1, \src2, \dst
  167. extui \tmp2, \src1, 0, 1
  168. movnez \dst, \tmp1, \tmp2
  169. do_addx2 \tmp1, \src2, \dst, \tmp1
  170. extui \tmp2, \src1, 1, 1
  171. movnez \dst, \tmp1, \tmp2
  172. do_addx4 \tmp1, \src2, \dst, \tmp1
  173. extui \tmp2, \src1, 2, 1
  174. movnez \dst, \tmp1, \tmp2
  175. do_addx8 \tmp1, \src2, \dst, \tmp1
  176. extui \tmp2, \src1, 3, 1
  177. movnez \dst, \tmp1, \tmp2
  178. srli \src1, \src1, 4
  179. slli \src2, \src2, 4
  180. bnez \src1, 1b
  181. .endm
  182. #ifdef __XTENSA_CALL0_ABI__
  183. mul_mulsi3_body a12, a13, a14, a15, a8
  184. #else
  185. /* The result will be written into a2, so save that argument in a4. */
  186. mov a4, a2
  187. mul_mulsi3_body a2, a4, a3, a5, a6
  188. #endif
  189. abi_ret_default
  190. #endif /* XCHAL_NO_MUL */
  191. ENDPROC(__umulsidi3)