cmpxchg.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2017 Imagination Technologies
  4. * Author: Paul Burton <[email protected]>
  5. */
  6. #include <linux/bitops.h>
  7. #include <asm/cmpxchg.h>
  8. unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
  9. {
  10. u32 old32, new32, load32, mask;
  11. volatile u32 *ptr32;
  12. unsigned int shift;
  13. /* Check that ptr is naturally aligned */
  14. WARN_ON((unsigned long)ptr & (size - 1));
  15. /* Mask value to the correct size. */
  16. mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
  17. val &= mask;
  18. /*
  19. * Calculate a shift & mask that correspond to the value we wish to
  20. * exchange within the naturally aligned 4 byte integer that includes
  21. * it.
  22. */
  23. shift = (unsigned long)ptr & 0x3;
  24. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  25. shift ^= sizeof(u32) - size;
  26. shift *= BITS_PER_BYTE;
  27. mask <<= shift;
  28. /*
  29. * Calculate a pointer to the naturally aligned 4 byte integer that
  30. * includes our byte of interest, and load its value.
  31. */
  32. ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
  33. load32 = *ptr32;
  34. do {
  35. old32 = load32;
  36. new32 = (load32 & ~mask) | (val << shift);
  37. load32 = arch_cmpxchg(ptr32, old32, new32);
  38. } while (load32 != old32);
  39. return (load32 & mask) >> shift;
  40. }
  41. unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
  42. unsigned long new, unsigned int size)
  43. {
  44. u32 mask, old32, new32, load32, load;
  45. volatile u32 *ptr32;
  46. unsigned int shift;
  47. /* Check that ptr is naturally aligned */
  48. WARN_ON((unsigned long)ptr & (size - 1));
  49. /* Mask inputs to the correct size. */
  50. mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
  51. old &= mask;
  52. new &= mask;
  53. /*
  54. * Calculate a shift & mask that correspond to the value we wish to
  55. * compare & exchange within the naturally aligned 4 byte integer
  56. * that includes it.
  57. */
  58. shift = (unsigned long)ptr & 0x3;
  59. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  60. shift ^= sizeof(u32) - size;
  61. shift *= BITS_PER_BYTE;
  62. mask <<= shift;
  63. /*
  64. * Calculate a pointer to the naturally aligned 4 byte integer that
  65. * includes our byte of interest, and load its value.
  66. */
  67. ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
  68. load32 = *ptr32;
  69. while (true) {
  70. /*
  71. * Ensure the byte we want to exchange matches the expected
  72. * old value, and if not then bail.
  73. */
  74. load = (load32 & mask) >> shift;
  75. if (load != old)
  76. return load;
  77. /*
  78. * Calculate the old & new values of the naturally aligned
  79. * 4 byte integer that include the byte we want to exchange.
  80. * Attempt to exchange the old value for the new value, and
  81. * return if we succeed.
  82. */
  83. old32 = (load32 & ~mask) | (old << shift);
  84. new32 = (load32 & ~mask) | (new << shift);
  85. load32 = arch_cmpxchg(ptr32, old32, new32);
  86. if (load32 == old32)
  87. return old;
  88. }
  89. }