asm.h 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657
  1. /* SPDX-License-Identifier: GPL-2.0-only
  2. * Copyright (C) 2020 Marvell.
  3. */
  4. #ifndef __SOC_OTX2_ASM_H
  5. #define __SOC_OTX2_ASM_H
  6. #include <linux/types.h>
  7. #if defined(CONFIG_ARM64)
  8. /*
  9. * otx2_lmt_flush is used for LMT store operation.
  10. * On octeontx2 platform CPT instruction enqueue and
  11. * NIX packet send are only possible via LMTST
  12. * operations and it uses LDEOR instruction targeting
  13. * the coprocessor address.
  14. */
  15. #define otx2_lmt_flush(ioaddr) \
  16. ({ \
  17. u64 result = 0; \
  18. __asm__ volatile(".cpu generic+lse\n" \
  19. "ldeor xzr, %x[rf], [%[rs]]" \
  20. : [rf]"=r" (result) \
  21. : [rs]"r" (ioaddr)); \
  22. (result); \
  23. })
  24. /*
  25. * STEORL store to memory with release semantics.
  26. * This will avoid using DMB barrier after each LMTST
  27. * operation.
  28. */
  29. #define cn10k_lmt_flush(val, addr) \
  30. ({ \
  31. __asm__ volatile(".cpu generic+lse\n" \
  32. "steorl %x[rf],[%[rs]]" \
  33. : [rf] "+r"(val) \
  34. : [rs] "r"(addr)); \
  35. })
  36. static inline u64 otx2_atomic64_fetch_add(u64 incr, u64 *ptr)
  37. {
  38. u64 result;
  39. asm volatile (".cpu generic+lse\n"
  40. "ldadda %x[i], %x[r], [%[b]]"
  41. : [r] "=r" (result), "+m" (*ptr)
  42. : [i] "r" (incr), [b] "r" (ptr)
  43. : "memory");
  44. return result;
  45. }
  46. #else
  47. #define otx2_lmt_flush(ioaddr) ({ 0; })
  48. #define cn10k_lmt_flush(val, addr) ({ addr = val; })
  49. #define otx2_atomic64_fetch_add(incr, ptr) ({ incr; })
  50. #endif
  51. #endif /* __SOC_OTX2_ASM_H */