coherency_ll.S 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Coherency fabric: low level functions
  4. *
  5. * Copyright (C) 2012 Marvell
  6. *
  7. * Gregory CLEMENT <[email protected]>
  8. *
  9. * This file implements the assembly function to add a CPU to the
  10. * coherency fabric. This function is called by each of the secondary
  11. * CPUs during their early boot in an SMP kernel, this why this
  12. * function have to callable from assembly. It can also be called by a
  13. * primary CPU from C code during its boot.
  14. */
  15. #include <linux/linkage.h>
  16. #define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
  17. #define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
  18. #include <asm/assembler.h>
  19. #include <asm/cp15.h>
  20. .text
  21. /*
  22. * Returns the coherency base address in r1 (r0 is untouched), or 0 if
  23. * the coherency fabric is not enabled.
  24. */
  25. ENTRY(ll_get_coherency_base)
  26. mrc p15, 0, r1, c1, c0, 0
  27. tst r1, #CR_M @ Check MMU bit enabled
  28. bne 1f
  29. /*
  30. * MMU is disabled, use the physical address of the coherency
  31. * base address, (or 0x0 if the coherency fabric is not mapped)
  32. */
  33. adr r1, 3f
  34. ldr r3, [r1]
  35. ldr r1, [r1, r3]
  36. b 2f
  37. 1:
  38. /*
  39. * MMU is enabled, use the virtual address of the coherency
  40. * base address.
  41. */
  42. ldr r1, =coherency_base
  43. ldr r1, [r1]
  44. 2:
  45. ret lr
  46. ENDPROC(ll_get_coherency_base)
  47. /*
  48. * Returns the coherency CPU mask in r3 (r0 is untouched). This
  49. * coherency CPU mask can be used with the coherency fabric
  50. * configuration and control registers. Note that the mask is already
  51. * endian-swapped as appropriate so that the calling functions do not
  52. * have to care about endianness issues while accessing the coherency
  53. * fabric registers
  54. */
  55. ENTRY(ll_get_coherency_cpumask)
  56. mrc p15, 0, r3, cr0, cr0, 5
  57. and r3, r3, #15
  58. mov r2, #(1 << 24)
  59. lsl r3, r2, r3
  60. ARM_BE8(rev r3, r3)
  61. ret lr
  62. ENDPROC(ll_get_coherency_cpumask)
  63. /*
  64. * ll_add_cpu_to_smp_group(), ll_enable_coherency() and
  65. * ll_disable_coherency() use the strex/ldrex instructions while the
  66. * MMU can be disabled. The Armada XP SoC has an exclusive monitor
  67. * that tracks transactions to Device and/or SO memory and thanks to
  68. * that, exclusive transactions are functional even when the MMU is
  69. * disabled.
  70. */
  71. ENTRY(ll_add_cpu_to_smp_group)
  72. /*
  73. * As r0 is not modified by ll_get_coherency_base() and
  74. * ll_get_coherency_cpumask(), we use it to temporarly save lr
  75. * and avoid it being modified by the branch and link
  76. * calls. This function is used very early in the secondary
  77. * CPU boot, and no stack is available at this point.
  78. */
  79. mov r0, lr
  80. bl ll_get_coherency_base
  81. /* Bail out if the coherency is not enabled */
  82. cmp r1, #0
  83. reteq r0
  84. bl ll_get_coherency_cpumask
  85. mov lr, r0
  86. add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
  87. 1:
  88. ldrex r2, [r0]
  89. orr r2, r2, r3
  90. strex r1, r2, [r0]
  91. cmp r1, #0
  92. bne 1b
  93. ret lr
  94. ENDPROC(ll_add_cpu_to_smp_group)
  95. ENTRY(ll_enable_coherency)
  96. /*
  97. * As r0 is not modified by ll_get_coherency_base() and
  98. * ll_get_coherency_cpumask(), we use it to temporarly save lr
  99. * and avoid it being modified by the branch and link
  100. * calls. This function is used very early in the secondary
  101. * CPU boot, and no stack is available at this point.
  102. */
  103. mov r0, lr
  104. bl ll_get_coherency_base
  105. /* Bail out if the coherency is not enabled */
  106. cmp r1, #0
  107. reteq r0
  108. bl ll_get_coherency_cpumask
  109. mov lr, r0
  110. add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
  111. 1:
  112. ldrex r2, [r0]
  113. orr r2, r2, r3
  114. strex r1, r2, [r0]
  115. cmp r1, #0
  116. bne 1b
  117. dsb
  118. mov r0, #0
  119. ret lr
  120. ENDPROC(ll_enable_coherency)
  121. ENTRY(ll_disable_coherency)
  122. /*
  123. * As r0 is not modified by ll_get_coherency_base() and
  124. * ll_get_coherency_cpumask(), we use it to temporarly save lr
  125. * and avoid it being modified by the branch and link
  126. * calls. This function is used very early in the secondary
  127. * CPU boot, and no stack is available at this point.
  128. */
  129. mov r0, lr
  130. bl ll_get_coherency_base
  131. /* Bail out if the coherency is not enabled */
  132. cmp r1, #0
  133. reteq r0
  134. bl ll_get_coherency_cpumask
  135. mov lr, r0
  136. add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
  137. 1:
  138. ldrex r2, [r0]
  139. bic r2, r2, r3
  140. strex r1, r2, [r0]
  141. cmp r1, #0
  142. bne 1b
  143. dsb
  144. ret lr
  145. ENDPROC(ll_disable_coherency)
  146. .align 2
  147. 3:
  148. .long coherency_phys_base - .