mmap_unlock_work.h 1.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /* Copyright (c) 2021 Facebook
  3. */
  4. #ifndef __MMAP_UNLOCK_WORK_H__
  5. #define __MMAP_UNLOCK_WORK_H__
  6. #include <linux/irq_work.h>
  7. /* irq_work to run mmap_read_unlock() in irq_work */
  8. struct mmap_unlock_irq_work {
  9. struct irq_work irq_work;
  10. struct mm_struct *mm;
  11. };
  12. DECLARE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
  13. /*
  14. * We cannot do mmap_read_unlock() when the irq is disabled, because of
  15. * risk to deadlock with rq_lock. To look up vma when the irqs are
  16. * disabled, we need to run mmap_read_unlock() in irq_work. We use a
  17. * percpu variable to do the irq_work. If the irq_work is already used
  18. * by another lookup, we fall over.
  19. */
  20. static inline bool bpf_mmap_unlock_get_irq_work(struct mmap_unlock_irq_work **work_ptr)
  21. {
  22. struct mmap_unlock_irq_work *work = NULL;
  23. bool irq_work_busy = false;
  24. if (irqs_disabled()) {
  25. if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
  26. work = this_cpu_ptr(&mmap_unlock_work);
  27. if (irq_work_is_busy(&work->irq_work)) {
  28. /* cannot queue more up_read, fallback */
  29. irq_work_busy = true;
  30. }
  31. } else {
  32. /*
  33. * PREEMPT_RT does not allow to trylock mmap sem in
  34. * interrupt disabled context. Force the fallback code.
  35. */
  36. irq_work_busy = true;
  37. }
  38. }
  39. *work_ptr = work;
  40. return irq_work_busy;
  41. }
  42. static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm)
  43. {
  44. if (!work) {
  45. mmap_read_unlock(mm);
  46. } else {
  47. work->mm = mm;
  48. /* The lock will be released once we're out of interrupt
  49. * context. Tell lockdep that we've released it now so
  50. * it doesn't complain that we forgot to release it.
  51. */
  52. rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
  53. irq_work_queue(&work->irq_work);
  54. }
  55. }
  56. #endif /* __MMAP_UNLOCK_WORK_H__ */