123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104 |
- // SPDX-License-Identifier: GPL-2.0-or-later
- /*
- * Copyright (C) 2017 Imagination Technologies
- * Author: Paul Burton <[email protected]>
- */
- #include <linux/bitops.h>
- #include <asm/cmpxchg.h>
- unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
- {
- u32 old32, new32, load32, mask;
- volatile u32 *ptr32;
- unsigned int shift;
- /* Check that ptr is naturally aligned */
- WARN_ON((unsigned long)ptr & (size - 1));
- /* Mask value to the correct size. */
- mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
- val &= mask;
- /*
- * Calculate a shift & mask that correspond to the value we wish to
- * exchange within the naturally aligned 4 byte integer that includes
- * it.
- */
- shift = (unsigned long)ptr & 0x3;
- if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
- shift ^= sizeof(u32) - size;
- shift *= BITS_PER_BYTE;
- mask <<= shift;
- /*
- * Calculate a pointer to the naturally aligned 4 byte integer that
- * includes our byte of interest, and load its value.
- */
- ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
- load32 = *ptr32;
- do {
- old32 = load32;
- new32 = (load32 & ~mask) | (val << shift);
- load32 = arch_cmpxchg(ptr32, old32, new32);
- } while (load32 != old32);
- return (load32 & mask) >> shift;
- }
- unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
- unsigned long new, unsigned int size)
- {
- u32 mask, old32, new32, load32, load;
- volatile u32 *ptr32;
- unsigned int shift;
- /* Check that ptr is naturally aligned */
- WARN_ON((unsigned long)ptr & (size - 1));
- /* Mask inputs to the correct size. */
- mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
- old &= mask;
- new &= mask;
- /*
- * Calculate a shift & mask that correspond to the value we wish to
- * compare & exchange within the naturally aligned 4 byte integer
- * that includes it.
- */
- shift = (unsigned long)ptr & 0x3;
- if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
- shift ^= sizeof(u32) - size;
- shift *= BITS_PER_BYTE;
- mask <<= shift;
- /*
- * Calculate a pointer to the naturally aligned 4 byte integer that
- * includes our byte of interest, and load its value.
- */
- ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
- load32 = *ptr32;
- while (true) {
- /*
- * Ensure the byte we want to exchange matches the expected
- * old value, and if not then bail.
- */
- load = (load32 & mask) >> shift;
- if (load != old)
- return load;
- /*
- * Calculate the old & new values of the naturally aligned
- * 4 byte integer that include the byte we want to exchange.
- * Attempt to exchange the old value for the new value, and
- * return if we succeed.
- */
- old32 = (load32 & ~mask) | (old << shift);
- new32 = (load32 & ~mask) | (new << shift);
- load32 = arch_cmpxchg(ptr32, old32, new32);
- if (load32 == old32)
- return old;
- }
- }
|