ev6-copy_page.S 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * arch/alpha/lib/ev6-copy_page.S
  4. *
  5. * Copy an entire page.
  6. */
  7. /* The following comparison of this routine vs the normal copy_page.S
  8. was written by an unnamed ev6 hardware designer and forwarded to me
  9. via Steven Hobbs <[email protected]>.
  10. First Problem: STQ overflows.
  11. -----------------------------
  12. It would be nice if EV6 handled every resource overflow efficiently,
  13. but for some it doesn't. Including store queue overflows. It causes
  14. a trap and a restart of the pipe.
  15. To get around this we sometimes use (to borrow a term from a VSSAD
  16. researcher) "aeration". The idea is to slow the rate at which the
  17. processor receives valid instructions by inserting nops in the fetch
  18. path. In doing so, you can prevent the overflow and actually make
  19. the code run faster. You can, of course, take advantage of the fact
  20. that the processor can fetch at most 4 aligned instructions per cycle.
  21. I inserted enough nops to force it to take 10 cycles to fetch the
  22. loop code. In theory, EV6 should be able to execute this loop in
  23. 9 cycles but I was not able to get it to run that fast -- the initial
  24. conditions were such that I could not reach this optimum rate on
  25. (chaotic) EV6. I wrote the code such that everything would issue
  26. in order.
  27. Second Problem: Dcache index matches.
  28. -------------------------------------
  29. If you are going to use this routine on random aligned pages, there
  30. is a 25% chance that the pages will be at the same dcache indices.
  31. This results in many nasty memory traps without care.
  32. The solution is to schedule the prefetches to avoid the memory
  33. conflicts. I schedule the wh64 prefetches farther ahead of the
  34. read prefetches to avoid this problem.
  35. Third Problem: Needs more prefetching.
  36. --------------------------------------
  37. In order to improve the code I added deeper prefetching to take the
  38. most advantage of EV6's bandwidth.
  39. I also prefetched the read stream. Note that adding the read prefetch
  40. forced me to add another cycle to the inner-most kernel - up to 11
  41. from the original 8 cycles per iteration. We could improve performance
  42. further by unrolling the loop and doing multiple prefetches per cycle.
  43. I think that the code below will be very robust and fast code for the
  44. purposes of copying aligned pages. It is slower when both source and
  45. destination pages are in the dcache, but it is my guess that this is
  46. less important than the dcache miss case. */
  47. #include <asm/export.h>
  48. .text
  49. .align 4
  50. .global copy_page
  51. .ent copy_page
  52. copy_page:
  53. .prologue 0
  54. /* Prefetch 5 read cachelines; write-hint 10 cache lines. */
  55. wh64 ($16)
  56. ldl $31,0($17)
  57. ldl $31,64($17)
  58. lda $1,1*64($16)
  59. wh64 ($1)
  60. ldl $31,128($17)
  61. ldl $31,192($17)
  62. lda $1,2*64($16)
  63. wh64 ($1)
  64. ldl $31,256($17)
  65. lda $18,118
  66. lda $1,3*64($16)
  67. wh64 ($1)
  68. nop
  69. lda $1,4*64($16)
  70. lda $2,5*64($16)
  71. wh64 ($1)
  72. wh64 ($2)
  73. lda $1,6*64($16)
  74. lda $2,7*64($16)
  75. wh64 ($1)
  76. wh64 ($2)
  77. lda $1,8*64($16)
  78. lda $2,9*64($16)
  79. wh64 ($1)
  80. wh64 ($2)
  81. lda $19,10*64($16)
  82. nop
  83. /* Main prefetching/write-hinting loop. */
  84. 1: ldq $0,0($17)
  85. ldq $1,8($17)
  86. unop
  87. unop
  88. unop
  89. unop
  90. ldq $2,16($17)
  91. ldq $3,24($17)
  92. ldq $4,32($17)
  93. ldq $5,40($17)
  94. unop
  95. unop
  96. unop
  97. unop
  98. ldq $6,48($17)
  99. ldq $7,56($17)
  100. ldl $31,320($17)
  101. unop
  102. unop
  103. unop
  104. /* This gives the extra cycle of aeration above the minimum. */
  105. unop
  106. unop
  107. unop
  108. unop
  109. wh64 ($19)
  110. unop
  111. unop
  112. unop
  113. stq $0,0($16)
  114. subq $18,1,$18
  115. stq $1,8($16)
  116. unop
  117. unop
  118. stq $2,16($16)
  119. addq $17,64,$17
  120. stq $3,24($16)
  121. stq $4,32($16)
  122. stq $5,40($16)
  123. addq $19,64,$19
  124. unop
  125. stq $6,48($16)
  126. stq $7,56($16)
  127. addq $16,64,$16
  128. bne $18, 1b
  129. /* Prefetch the final 5 cache lines of the read stream. */
  130. lda $18,10
  131. ldl $31,320($17)
  132. ldl $31,384($17)
  133. ldl $31,448($17)
  134. ldl $31,512($17)
  135. ldl $31,576($17)
  136. nop
  137. nop
  138. /* Non-prefetching, non-write-hinting cleanup loop for the
  139. final 10 cache lines. */
  140. 2: ldq $0,0($17)
  141. ldq $1,8($17)
  142. ldq $2,16($17)
  143. ldq $3,24($17)
  144. ldq $4,32($17)
  145. ldq $5,40($17)
  146. ldq $6,48($17)
  147. ldq $7,56($17)
  148. stq $0,0($16)
  149. subq $18,1,$18
  150. stq $1,8($16)
  151. addq $17,64,$17
  152. stq $2,16($16)
  153. stq $3,24($16)
  154. stq $4,32($16)
  155. stq $5,40($16)
  156. stq $6,48($16)
  157. stq $7,56($16)
  158. addq $16,64,$16
  159. bne $18, 2b
  160. ret
  161. nop
  162. unop
  163. nop
  164. .end copy_page
  165. EXPORT_SYMBOL(copy_page)