pkeys.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_PKEYS_H
  3. #define _ASM_X86_PKEYS_H
  4. /*
  5. * If more than 16 keys are ever supported, a thorough audit
  6. * will be necessary to ensure that the types that store key
  7. * numbers and masks have sufficient capacity.
  8. */
  9. #define arch_max_pkey() (cpu_feature_enabled(X86_FEATURE_OSPKE) ? 16 : 1)
  10. extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
  11. unsigned long init_val);
  12. static inline bool arch_pkeys_enabled(void)
  13. {
  14. return cpu_feature_enabled(X86_FEATURE_OSPKE);
  15. }
  16. /*
  17. * Try to dedicate one of the protection keys to be used as an
  18. * execute-only protection key.
  19. */
  20. extern int __execute_only_pkey(struct mm_struct *mm);
  21. static inline int execute_only_pkey(struct mm_struct *mm)
  22. {
  23. if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
  24. return ARCH_DEFAULT_PKEY;
  25. return __execute_only_pkey(mm);
  26. }
  27. extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
  28. int prot, int pkey);
  29. static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
  30. int prot, int pkey)
  31. {
  32. if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
  33. return 0;
  34. return __arch_override_mprotect_pkey(vma, prot, pkey);
  35. }
  36. #define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | VM_PKEY_BIT3)
  37. #define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map)
  38. #define mm_set_pkey_allocated(mm, pkey) do { \
  39. mm_pkey_allocation_map(mm) |= (1U << pkey); \
  40. } while (0)
  41. #define mm_set_pkey_free(mm, pkey) do { \
  42. mm_pkey_allocation_map(mm) &= ~(1U << pkey); \
  43. } while (0)
  44. static inline
  45. bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
  46. {
  47. /*
  48. * "Allocated" pkeys are those that have been returned
  49. * from pkey_alloc() or pkey 0 which is allocated
  50. * implicitly when the mm is created.
  51. */
  52. if (pkey < 0)
  53. return false;
  54. if (pkey >= arch_max_pkey())
  55. return false;
  56. /*
  57. * The exec-only pkey is set in the allocation map, but
  58. * is not available to any of the user interfaces like
  59. * mprotect_pkey().
  60. */
  61. if (pkey == mm->context.execute_only_pkey)
  62. return false;
  63. return mm_pkey_allocation_map(mm) & (1U << pkey);
  64. }
  65. /*
  66. * Returns a positive, 4-bit key on success, or -1 on failure.
  67. */
  68. static inline
  69. int mm_pkey_alloc(struct mm_struct *mm)
  70. {
  71. /*
  72. * Note: this is the one and only place we make sure
  73. * that the pkey is valid as far as the hardware is
  74. * concerned. The rest of the kernel trusts that
  75. * only good, valid pkeys come out of here.
  76. */
  77. u16 all_pkeys_mask = ((1U << arch_max_pkey()) - 1);
  78. int ret;
  79. /*
  80. * Are we out of pkeys? We must handle this specially
  81. * because ffz() behavior is undefined if there are no
  82. * zeros.
  83. */
  84. if (mm_pkey_allocation_map(mm) == all_pkeys_mask)
  85. return -1;
  86. ret = ffz(mm_pkey_allocation_map(mm));
  87. mm_set_pkey_allocated(mm, ret);
  88. return ret;
  89. }
  90. static inline
  91. int mm_pkey_free(struct mm_struct *mm, int pkey)
  92. {
  93. if (!mm_pkey_is_allocated(mm, pkey))
  94. return -EINVAL;
  95. mm_set_pkey_free(mm, pkey);
  96. return 0;
  97. }
  98. static inline int vma_pkey(struct vm_area_struct *vma)
  99. {
  100. unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
  101. VM_PKEY_BIT2 | VM_PKEY_BIT3;
  102. return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
  103. }
  104. #endif /*_ASM_X86_PKEYS_H */