locks.c 1.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Spin and read/write lock operations.
  4. *
  5. * Copyright (C) 2001-2004 Paul Mackerras <[email protected]>, IBM
  6. * Copyright (C) 2001 Anton Blanchard <[email protected]>, IBM
  7. * Copyright (C) 2002 Dave Engebretsen <[email protected]>, IBM
  8. * Rework to support virtual processors
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/export.h>
  13. #include <linux/smp.h>
  14. /* waiting for a spinlock... */
  15. #if defined(CONFIG_PPC_SPLPAR)
  16. #include <asm/hvcall.h>
  17. #include <asm/smp.h>
  18. void splpar_spin_yield(arch_spinlock_t *lock)
  19. {
  20. unsigned int lock_value, holder_cpu, yield_count;
  21. lock_value = lock->slock;
  22. if (lock_value == 0)
  23. return;
  24. holder_cpu = lock_value & 0xffff;
  25. BUG_ON(holder_cpu >= NR_CPUS);
  26. yield_count = yield_count_of(holder_cpu);
  27. if ((yield_count & 1) == 0)
  28. return; /* virtual cpu is currently running */
  29. rmb();
  30. if (lock->slock != lock_value)
  31. return; /* something has changed */
  32. yield_to_preempted(holder_cpu, yield_count);
  33. }
  34. EXPORT_SYMBOL_GPL(splpar_spin_yield);
  35. /*
  36. * Waiting for a read lock or a write lock on a rwlock...
  37. * This turns out to be the same for read and write locks, since
  38. * we only know the holder if it is write-locked.
  39. */
  40. void splpar_rw_yield(arch_rwlock_t *rw)
  41. {
  42. int lock_value;
  43. unsigned int holder_cpu, yield_count;
  44. lock_value = rw->lock;
  45. if (lock_value >= 0)
  46. return; /* no write lock at present */
  47. holder_cpu = lock_value & 0xffff;
  48. BUG_ON(holder_cpu >= NR_CPUS);
  49. yield_count = yield_count_of(holder_cpu);
  50. if ((yield_count & 1) == 0)
  51. return; /* virtual cpu is currently running */
  52. rmb();
  53. if (rw->lock != lock_value)
  54. return; /* something has changed */
  55. yield_to_preempted(holder_cpu, yield_count);
  56. }
  57. #endif