memcpy_power7.S 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. *
  4. * Copyright (C) IBM Corporation, 2012
  5. *
  6. * Author: Anton Blanchard <[email protected]>
  7. */
  8. #include <asm/ppc_asm.h>
  9. #ifndef SELFTEST_CASE
  10. /* 0 == don't use VMX, 1 == use VMX */
  11. #define SELFTEST_CASE 0
  12. #endif
  13. #ifdef __BIG_ENDIAN__
  14. #define LVS(VRT,RA,RB) lvsl VRT,RA,RB
  15. #define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
  16. #else
  17. #define LVS(VRT,RA,RB) lvsr VRT,RA,RB
  18. #define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
  19. #endif
  20. _GLOBAL(memcpy_power7)
  21. cmpldi r5,16
  22. cmpldi cr1,r5,4096
  23. std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
  24. blt .Lshort_copy
  25. #ifdef CONFIG_ALTIVEC
  26. test_feature = SELFTEST_CASE
  27. BEGIN_FTR_SECTION
  28. bgt cr1, .Lvmx_copy
  29. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  30. #endif
  31. .Lnonvmx_copy:
  32. /* Get the source 8B aligned */
  33. neg r6,r4
  34. mtocrf 0x01,r6
  35. clrldi r6,r6,(64-3)
  36. bf cr7*4+3,1f
  37. lbz r0,0(r4)
  38. addi r4,r4,1
  39. stb r0,0(r3)
  40. addi r3,r3,1
  41. 1: bf cr7*4+2,2f
  42. lhz r0,0(r4)
  43. addi r4,r4,2
  44. sth r0,0(r3)
  45. addi r3,r3,2
  46. 2: bf cr7*4+1,3f
  47. lwz r0,0(r4)
  48. addi r4,r4,4
  49. stw r0,0(r3)
  50. addi r3,r3,4
  51. 3: sub r5,r5,r6
  52. cmpldi r5,128
  53. blt 5f
  54. mflr r0
  55. stdu r1,-STACKFRAMESIZE(r1)
  56. std r14,STK_REG(R14)(r1)
  57. std r15,STK_REG(R15)(r1)
  58. std r16,STK_REG(R16)(r1)
  59. std r17,STK_REG(R17)(r1)
  60. std r18,STK_REG(R18)(r1)
  61. std r19,STK_REG(R19)(r1)
  62. std r20,STK_REG(R20)(r1)
  63. std r21,STK_REG(R21)(r1)
  64. std r22,STK_REG(R22)(r1)
  65. std r0,STACKFRAMESIZE+16(r1)
  66. srdi r6,r5,7
  67. mtctr r6
  68. /* Now do cacheline (128B) sized loads and stores. */
  69. .align 5
  70. 4:
  71. ld r0,0(r4)
  72. ld r6,8(r4)
  73. ld r7,16(r4)
  74. ld r8,24(r4)
  75. ld r9,32(r4)
  76. ld r10,40(r4)
  77. ld r11,48(r4)
  78. ld r12,56(r4)
  79. ld r14,64(r4)
  80. ld r15,72(r4)
  81. ld r16,80(r4)
  82. ld r17,88(r4)
  83. ld r18,96(r4)
  84. ld r19,104(r4)
  85. ld r20,112(r4)
  86. ld r21,120(r4)
  87. addi r4,r4,128
  88. std r0,0(r3)
  89. std r6,8(r3)
  90. std r7,16(r3)
  91. std r8,24(r3)
  92. std r9,32(r3)
  93. std r10,40(r3)
  94. std r11,48(r3)
  95. std r12,56(r3)
  96. std r14,64(r3)
  97. std r15,72(r3)
  98. std r16,80(r3)
  99. std r17,88(r3)
  100. std r18,96(r3)
  101. std r19,104(r3)
  102. std r20,112(r3)
  103. std r21,120(r3)
  104. addi r3,r3,128
  105. bdnz 4b
  106. clrldi r5,r5,(64-7)
  107. ld r14,STK_REG(R14)(r1)
  108. ld r15,STK_REG(R15)(r1)
  109. ld r16,STK_REG(R16)(r1)
  110. ld r17,STK_REG(R17)(r1)
  111. ld r18,STK_REG(R18)(r1)
  112. ld r19,STK_REG(R19)(r1)
  113. ld r20,STK_REG(R20)(r1)
  114. ld r21,STK_REG(R21)(r1)
  115. ld r22,STK_REG(R22)(r1)
  116. addi r1,r1,STACKFRAMESIZE
  117. /* Up to 127B to go */
  118. 5: srdi r6,r5,4
  119. mtocrf 0x01,r6
  120. 6: bf cr7*4+1,7f
  121. ld r0,0(r4)
  122. ld r6,8(r4)
  123. ld r7,16(r4)
  124. ld r8,24(r4)
  125. ld r9,32(r4)
  126. ld r10,40(r4)
  127. ld r11,48(r4)
  128. ld r12,56(r4)
  129. addi r4,r4,64
  130. std r0,0(r3)
  131. std r6,8(r3)
  132. std r7,16(r3)
  133. std r8,24(r3)
  134. std r9,32(r3)
  135. std r10,40(r3)
  136. std r11,48(r3)
  137. std r12,56(r3)
  138. addi r3,r3,64
  139. /* Up to 63B to go */
  140. 7: bf cr7*4+2,8f
  141. ld r0,0(r4)
  142. ld r6,8(r4)
  143. ld r7,16(r4)
  144. ld r8,24(r4)
  145. addi r4,r4,32
  146. std r0,0(r3)
  147. std r6,8(r3)
  148. std r7,16(r3)
  149. std r8,24(r3)
  150. addi r3,r3,32
  151. /* Up to 31B to go */
  152. 8: bf cr7*4+3,9f
  153. ld r0,0(r4)
  154. ld r6,8(r4)
  155. addi r4,r4,16
  156. std r0,0(r3)
  157. std r6,8(r3)
  158. addi r3,r3,16
  159. 9: clrldi r5,r5,(64-4)
  160. /* Up to 15B to go */
  161. .Lshort_copy:
  162. mtocrf 0x01,r5
  163. bf cr7*4+0,12f
  164. lwz r0,0(r4) /* Less chance of a reject with word ops */
  165. lwz r6,4(r4)
  166. addi r4,r4,8
  167. stw r0,0(r3)
  168. stw r6,4(r3)
  169. addi r3,r3,8
  170. 12: bf cr7*4+1,13f
  171. lwz r0,0(r4)
  172. addi r4,r4,4
  173. stw r0,0(r3)
  174. addi r3,r3,4
  175. 13: bf cr7*4+2,14f
  176. lhz r0,0(r4)
  177. addi r4,r4,2
  178. sth r0,0(r3)
  179. addi r3,r3,2
  180. 14: bf cr7*4+3,15f
  181. lbz r0,0(r4)
  182. stb r0,0(r3)
  183. 15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
  184. blr
  185. .Lunwind_stack_nonvmx_copy:
  186. addi r1,r1,STACKFRAMESIZE
  187. b .Lnonvmx_copy
  188. .Lvmx_copy:
  189. #ifdef CONFIG_ALTIVEC
  190. mflr r0
  191. std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
  192. std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
  193. std r0,16(r1)
  194. stdu r1,-STACKFRAMESIZE(r1)
  195. bl enter_vmx_ops
  196. cmpwi cr1,r3,0
  197. ld r0,STACKFRAMESIZE+16(r1)
  198. ld r3,STK_REG(R31)(r1)
  199. ld r4,STK_REG(R30)(r1)
  200. ld r5,STK_REG(R29)(r1)
  201. mtlr r0
  202. /*
  203. * We prefetch both the source and destination using enhanced touch
  204. * instructions. We use a stream ID of 0 for the load side and
  205. * 1 for the store side.
  206. */
  207. clrrdi r6,r4,7
  208. clrrdi r9,r3,7
  209. ori r9,r9,1 /* stream=1 */
  210. srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
  211. cmpldi r7,0x3FF
  212. ble 1f
  213. li r7,0x3FF
  214. 1: lis r0,0x0E00 /* depth=7 */
  215. sldi r7,r7,7
  216. or r7,r7,r0
  217. ori r10,r7,1 /* stream=1 */
  218. lis r8,0x8000 /* GO=1 */
  219. clrldi r8,r8,32
  220. dcbt 0,r6,0b01000
  221. dcbt 0,r7,0b01010
  222. dcbtst 0,r9,0b01000
  223. dcbtst 0,r10,0b01010
  224. eieio
  225. dcbt 0,r8,0b01010 /* GO */
  226. beq cr1,.Lunwind_stack_nonvmx_copy
  227. /*
  228. * If source and destination are not relatively aligned we use a
  229. * slower permute loop.
  230. */
  231. xor r6,r4,r3
  232. rldicl. r6,r6,0,(64-4)
  233. bne .Lvmx_unaligned_copy
  234. /* Get the destination 16B aligned */
  235. neg r6,r3
  236. mtocrf 0x01,r6
  237. clrldi r6,r6,(64-4)
  238. bf cr7*4+3,1f
  239. lbz r0,0(r4)
  240. addi r4,r4,1
  241. stb r0,0(r3)
  242. addi r3,r3,1
  243. 1: bf cr7*4+2,2f
  244. lhz r0,0(r4)
  245. addi r4,r4,2
  246. sth r0,0(r3)
  247. addi r3,r3,2
  248. 2: bf cr7*4+1,3f
  249. lwz r0,0(r4)
  250. addi r4,r4,4
  251. stw r0,0(r3)
  252. addi r3,r3,4
  253. 3: bf cr7*4+0,4f
  254. ld r0,0(r4)
  255. addi r4,r4,8
  256. std r0,0(r3)
  257. addi r3,r3,8
  258. 4: sub r5,r5,r6
  259. /* Get the desination 128B aligned */
  260. neg r6,r3
  261. srdi r7,r6,4
  262. mtocrf 0x01,r7
  263. clrldi r6,r6,(64-7)
  264. li r9,16
  265. li r10,32
  266. li r11,48
  267. bf cr7*4+3,5f
  268. lvx v1,0,r4
  269. addi r4,r4,16
  270. stvx v1,0,r3
  271. addi r3,r3,16
  272. 5: bf cr7*4+2,6f
  273. lvx v1,0,r4
  274. lvx v0,r4,r9
  275. addi r4,r4,32
  276. stvx v1,0,r3
  277. stvx v0,r3,r9
  278. addi r3,r3,32
  279. 6: bf cr7*4+1,7f
  280. lvx v3,0,r4
  281. lvx v2,r4,r9
  282. lvx v1,r4,r10
  283. lvx v0,r4,r11
  284. addi r4,r4,64
  285. stvx v3,0,r3
  286. stvx v2,r3,r9
  287. stvx v1,r3,r10
  288. stvx v0,r3,r11
  289. addi r3,r3,64
  290. 7: sub r5,r5,r6
  291. srdi r6,r5,7
  292. std r14,STK_REG(R14)(r1)
  293. std r15,STK_REG(R15)(r1)
  294. std r16,STK_REG(R16)(r1)
  295. li r12,64
  296. li r14,80
  297. li r15,96
  298. li r16,112
  299. mtctr r6
  300. /*
  301. * Now do cacheline sized loads and stores. By this stage the
  302. * cacheline stores are also cacheline aligned.
  303. */
  304. .align 5
  305. 8:
  306. lvx v7,0,r4
  307. lvx v6,r4,r9
  308. lvx v5,r4,r10
  309. lvx v4,r4,r11
  310. lvx v3,r4,r12
  311. lvx v2,r4,r14
  312. lvx v1,r4,r15
  313. lvx v0,r4,r16
  314. addi r4,r4,128
  315. stvx v7,0,r3
  316. stvx v6,r3,r9
  317. stvx v5,r3,r10
  318. stvx v4,r3,r11
  319. stvx v3,r3,r12
  320. stvx v2,r3,r14
  321. stvx v1,r3,r15
  322. stvx v0,r3,r16
  323. addi r3,r3,128
  324. bdnz 8b
  325. ld r14,STK_REG(R14)(r1)
  326. ld r15,STK_REG(R15)(r1)
  327. ld r16,STK_REG(R16)(r1)
  328. /* Up to 127B to go */
  329. clrldi r5,r5,(64-7)
  330. srdi r6,r5,4
  331. mtocrf 0x01,r6
  332. bf cr7*4+1,9f
  333. lvx v3,0,r4
  334. lvx v2,r4,r9
  335. lvx v1,r4,r10
  336. lvx v0,r4,r11
  337. addi r4,r4,64
  338. stvx v3,0,r3
  339. stvx v2,r3,r9
  340. stvx v1,r3,r10
  341. stvx v0,r3,r11
  342. addi r3,r3,64
  343. 9: bf cr7*4+2,10f
  344. lvx v1,0,r4
  345. lvx v0,r4,r9
  346. addi r4,r4,32
  347. stvx v1,0,r3
  348. stvx v0,r3,r9
  349. addi r3,r3,32
  350. 10: bf cr7*4+3,11f
  351. lvx v1,0,r4
  352. addi r4,r4,16
  353. stvx v1,0,r3
  354. addi r3,r3,16
  355. /* Up to 15B to go */
  356. 11: clrldi r5,r5,(64-4)
  357. mtocrf 0x01,r5
  358. bf cr7*4+0,12f
  359. ld r0,0(r4)
  360. addi r4,r4,8
  361. std r0,0(r3)
  362. addi r3,r3,8
  363. 12: bf cr7*4+1,13f
  364. lwz r0,0(r4)
  365. addi r4,r4,4
  366. stw r0,0(r3)
  367. addi r3,r3,4
  368. 13: bf cr7*4+2,14f
  369. lhz r0,0(r4)
  370. addi r4,r4,2
  371. sth r0,0(r3)
  372. addi r3,r3,2
  373. 14: bf cr7*4+3,15f
  374. lbz r0,0(r4)
  375. stb r0,0(r3)
  376. 15: addi r1,r1,STACKFRAMESIZE
  377. ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
  378. b exit_vmx_ops /* tail call optimise */
  379. .Lvmx_unaligned_copy:
  380. /* Get the destination 16B aligned */
  381. neg r6,r3
  382. mtocrf 0x01,r6
  383. clrldi r6,r6,(64-4)
  384. bf cr7*4+3,1f
  385. lbz r0,0(r4)
  386. addi r4,r4,1
  387. stb r0,0(r3)
  388. addi r3,r3,1
  389. 1: bf cr7*4+2,2f
  390. lhz r0,0(r4)
  391. addi r4,r4,2
  392. sth r0,0(r3)
  393. addi r3,r3,2
  394. 2: bf cr7*4+1,3f
  395. lwz r0,0(r4)
  396. addi r4,r4,4
  397. stw r0,0(r3)
  398. addi r3,r3,4
  399. 3: bf cr7*4+0,4f
  400. lwz r0,0(r4) /* Less chance of a reject with word ops */
  401. lwz r7,4(r4)
  402. addi r4,r4,8
  403. stw r0,0(r3)
  404. stw r7,4(r3)
  405. addi r3,r3,8
  406. 4: sub r5,r5,r6
  407. /* Get the desination 128B aligned */
  408. neg r6,r3
  409. srdi r7,r6,4
  410. mtocrf 0x01,r7
  411. clrldi r6,r6,(64-7)
  412. li r9,16
  413. li r10,32
  414. li r11,48
  415. LVS(v16,0,r4) /* Setup permute control vector */
  416. lvx v0,0,r4
  417. addi r4,r4,16
  418. bf cr7*4+3,5f
  419. lvx v1,0,r4
  420. VPERM(v8,v0,v1,v16)
  421. addi r4,r4,16
  422. stvx v8,0,r3
  423. addi r3,r3,16
  424. vor v0,v1,v1
  425. 5: bf cr7*4+2,6f
  426. lvx v1,0,r4
  427. VPERM(v8,v0,v1,v16)
  428. lvx v0,r4,r9
  429. VPERM(v9,v1,v0,v16)
  430. addi r4,r4,32
  431. stvx v8,0,r3
  432. stvx v9,r3,r9
  433. addi r3,r3,32
  434. 6: bf cr7*4+1,7f
  435. lvx v3,0,r4
  436. VPERM(v8,v0,v3,v16)
  437. lvx v2,r4,r9
  438. VPERM(v9,v3,v2,v16)
  439. lvx v1,r4,r10
  440. VPERM(v10,v2,v1,v16)
  441. lvx v0,r4,r11
  442. VPERM(v11,v1,v0,v16)
  443. addi r4,r4,64
  444. stvx v8,0,r3
  445. stvx v9,r3,r9
  446. stvx v10,r3,r10
  447. stvx v11,r3,r11
  448. addi r3,r3,64
  449. 7: sub r5,r5,r6
  450. srdi r6,r5,7
  451. std r14,STK_REG(R14)(r1)
  452. std r15,STK_REG(R15)(r1)
  453. std r16,STK_REG(R16)(r1)
  454. li r12,64
  455. li r14,80
  456. li r15,96
  457. li r16,112
  458. mtctr r6
  459. /*
  460. * Now do cacheline sized loads and stores. By this stage the
  461. * cacheline stores are also cacheline aligned.
  462. */
  463. .align 5
  464. 8:
  465. lvx v7,0,r4
  466. VPERM(v8,v0,v7,v16)
  467. lvx v6,r4,r9
  468. VPERM(v9,v7,v6,v16)
  469. lvx v5,r4,r10
  470. VPERM(v10,v6,v5,v16)
  471. lvx v4,r4,r11
  472. VPERM(v11,v5,v4,v16)
  473. lvx v3,r4,r12
  474. VPERM(v12,v4,v3,v16)
  475. lvx v2,r4,r14
  476. VPERM(v13,v3,v2,v16)
  477. lvx v1,r4,r15
  478. VPERM(v14,v2,v1,v16)
  479. lvx v0,r4,r16
  480. VPERM(v15,v1,v0,v16)
  481. addi r4,r4,128
  482. stvx v8,0,r3
  483. stvx v9,r3,r9
  484. stvx v10,r3,r10
  485. stvx v11,r3,r11
  486. stvx v12,r3,r12
  487. stvx v13,r3,r14
  488. stvx v14,r3,r15
  489. stvx v15,r3,r16
  490. addi r3,r3,128
  491. bdnz 8b
  492. ld r14,STK_REG(R14)(r1)
  493. ld r15,STK_REG(R15)(r1)
  494. ld r16,STK_REG(R16)(r1)
  495. /* Up to 127B to go */
  496. clrldi r5,r5,(64-7)
  497. srdi r6,r5,4
  498. mtocrf 0x01,r6
  499. bf cr7*4+1,9f
  500. lvx v3,0,r4
  501. VPERM(v8,v0,v3,v16)
  502. lvx v2,r4,r9
  503. VPERM(v9,v3,v2,v16)
  504. lvx v1,r4,r10
  505. VPERM(v10,v2,v1,v16)
  506. lvx v0,r4,r11
  507. VPERM(v11,v1,v0,v16)
  508. addi r4,r4,64
  509. stvx v8,0,r3
  510. stvx v9,r3,r9
  511. stvx v10,r3,r10
  512. stvx v11,r3,r11
  513. addi r3,r3,64
  514. 9: bf cr7*4+2,10f
  515. lvx v1,0,r4
  516. VPERM(v8,v0,v1,v16)
  517. lvx v0,r4,r9
  518. VPERM(v9,v1,v0,v16)
  519. addi r4,r4,32
  520. stvx v8,0,r3
  521. stvx v9,r3,r9
  522. addi r3,r3,32
  523. 10: bf cr7*4+3,11f
  524. lvx v1,0,r4
  525. VPERM(v8,v0,v1,v16)
  526. addi r4,r4,16
  527. stvx v8,0,r3
  528. addi r3,r3,16
  529. /* Up to 15B to go */
  530. 11: clrldi r5,r5,(64-4)
  531. addi r4,r4,-16 /* Unwind the +16 load offset */
  532. mtocrf 0x01,r5
  533. bf cr7*4+0,12f
  534. lwz r0,0(r4) /* Less chance of a reject with word ops */
  535. lwz r6,4(r4)
  536. addi r4,r4,8
  537. stw r0,0(r3)
  538. stw r6,4(r3)
  539. addi r3,r3,8
  540. 12: bf cr7*4+1,13f
  541. lwz r0,0(r4)
  542. addi r4,r4,4
  543. stw r0,0(r3)
  544. addi r3,r3,4
  545. 13: bf cr7*4+2,14f
  546. lhz r0,0(r4)
  547. addi r4,r4,2
  548. sth r0,0(r3)
  549. addi r3,r3,2
  550. 14: bf cr7*4+3,15f
  551. lbz r0,0(r4)
  552. stb r0,0(r3)
  553. 15: addi r1,r1,STACKFRAMESIZE
  554. ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
  555. b exit_vmx_ops /* tail call optimise */
  556. #endif /* CONFIG_ALTIVEC */