memcmp.S 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2013-2021, Arm Limited.
  4. *
  5. * Adapted from the original at:
  6. * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/memcmp.S
  7. */
  8. #include <linux/linkage.h>
  9. #include <asm/assembler.h>
  10. /* Assumptions:
  11. *
  12. * ARMv8-a, AArch64, unaligned accesses.
  13. */
  14. #define L(label) .L ## label
  15. /* Parameters and result. */
  16. #define src1 x0
  17. #define src2 x1
  18. #define limit x2
  19. #define result w0
  20. /* Internal variables. */
  21. #define data1 x3
  22. #define data1w w3
  23. #define data1h x4
  24. #define data2 x5
  25. #define data2w w5
  26. #define data2h x6
  27. #define tmp1 x7
  28. #define tmp2 x8
  29. SYM_FUNC_START(__pi_memcmp)
  30. subs limit, limit, 8
  31. b.lo L(less8)
  32. ldr data1, [src1], 8
  33. ldr data2, [src2], 8
  34. cmp data1, data2
  35. b.ne L(return)
  36. subs limit, limit, 8
  37. b.gt L(more16)
  38. ldr data1, [src1, limit]
  39. ldr data2, [src2, limit]
  40. b L(return)
  41. L(more16):
  42. ldr data1, [src1], 8
  43. ldr data2, [src2], 8
  44. cmp data1, data2
  45. bne L(return)
  46. /* Jump directly to comparing the last 16 bytes for 32 byte (or less)
  47. strings. */
  48. subs limit, limit, 16
  49. b.ls L(last_bytes)
  50. /* We overlap loads between 0-32 bytes at either side of SRC1 when we
  51. try to align, so limit it only to strings larger than 128 bytes. */
  52. cmp limit, 96
  53. b.ls L(loop16)
  54. /* Align src1 and adjust src2 with bytes not yet done. */
  55. and tmp1, src1, 15
  56. add limit, limit, tmp1
  57. sub src1, src1, tmp1
  58. sub src2, src2, tmp1
  59. /* Loop performing 16 bytes per iteration using aligned src1.
  60. Limit is pre-decremented by 16 and must be larger than zero.
  61. Exit if <= 16 bytes left to do or if the data is not equal. */
  62. .p2align 4
  63. L(loop16):
  64. ldp data1, data1h, [src1], 16
  65. ldp data2, data2h, [src2], 16
  66. subs limit, limit, 16
  67. ccmp data1, data2, 0, hi
  68. ccmp data1h, data2h, 0, eq
  69. b.eq L(loop16)
  70. cmp data1, data2
  71. bne L(return)
  72. mov data1, data1h
  73. mov data2, data2h
  74. cmp data1, data2
  75. bne L(return)
  76. /* Compare last 1-16 bytes using unaligned access. */
  77. L(last_bytes):
  78. add src1, src1, limit
  79. add src2, src2, limit
  80. ldp data1, data1h, [src1]
  81. ldp data2, data2h, [src2]
  82. cmp data1, data2
  83. bne L(return)
  84. mov data1, data1h
  85. mov data2, data2h
  86. cmp data1, data2
  87. /* Compare data bytes and set return value to 0, -1 or 1. */
  88. L(return):
  89. #ifndef __AARCH64EB__
  90. rev data1, data1
  91. rev data2, data2
  92. #endif
  93. cmp data1, data2
  94. L(ret_eq):
  95. cset result, ne
  96. cneg result, result, lo
  97. ret
  98. .p2align 4
  99. /* Compare up to 8 bytes. Limit is [-8..-1]. */
  100. L(less8):
  101. adds limit, limit, 4
  102. b.lo L(less4)
  103. ldr data1w, [src1], 4
  104. ldr data2w, [src2], 4
  105. cmp data1w, data2w
  106. b.ne L(return)
  107. sub limit, limit, 4
  108. L(less4):
  109. adds limit, limit, 4
  110. beq L(ret_eq)
  111. L(byte_loop):
  112. ldrb data1w, [src1], 1
  113. ldrb data2w, [src2], 1
  114. subs limit, limit, 1
  115. ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */
  116. b.eq L(byte_loop)
  117. sub result, data1w, data2w
  118. ret
  119. SYM_FUNC_END(__pi_memcmp)
  120. SYM_FUNC_ALIAS_WEAK(memcmp, __pi_memcmp)
  121. EXPORT_SYMBOL_NOKASAN(memcmp)