secretmem.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corporation, 2021
  4. *
  5. * Author: Mike Rapoport <[email protected]>
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/fs.h>
  9. #include <linux/swap.h>
  10. #include <linux/mount.h>
  11. #include <linux/memfd.h>
  12. #include <linux/bitops.h>
  13. #include <linux/printk.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/syscalls.h>
  16. #include <linux/pseudo_fs.h>
  17. #include <linux/secretmem.h>
  18. #include <linux/set_memory.h>
  19. #include <linux/sched/signal.h>
  20. #include <uapi/linux/magic.h>
  21. #include <asm/tlbflush.h>
  22. #include "internal.h"
  23. #undef pr_fmt
  24. #define pr_fmt(fmt) "secretmem: " fmt
  25. /*
  26. * Define mode and flag masks to allow validation of the system call
  27. * parameters.
  28. */
  29. #define SECRETMEM_MODE_MASK (0x0)
  30. #define SECRETMEM_FLAGS_MASK SECRETMEM_MODE_MASK
  31. static bool secretmem_enable __ro_after_init;
  32. module_param_named(enable, secretmem_enable, bool, 0400);
  33. MODULE_PARM_DESC(secretmem_enable,
  34. "Enable secretmem and memfd_secret(2) system call");
  35. static atomic_t secretmem_users;
  36. bool secretmem_active(void)
  37. {
  38. return !!atomic_read(&secretmem_users);
  39. }
  40. static vm_fault_t secretmem_fault(struct vm_fault *vmf)
  41. {
  42. struct address_space *mapping = vmf->vma->vm_file->f_mapping;
  43. struct inode *inode = file_inode(vmf->vma->vm_file);
  44. pgoff_t offset = vmf->pgoff;
  45. gfp_t gfp = vmf->gfp_mask;
  46. unsigned long addr;
  47. struct page *page;
  48. vm_fault_t ret;
  49. int err;
  50. if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
  51. return vmf_error(-EINVAL);
  52. filemap_invalidate_lock_shared(mapping);
  53. retry:
  54. page = find_lock_page(mapping, offset);
  55. if (!page) {
  56. page = alloc_page(gfp | __GFP_ZERO);
  57. if (!page) {
  58. ret = VM_FAULT_OOM;
  59. goto out;
  60. }
  61. err = set_direct_map_invalid_noflush(page);
  62. if (err) {
  63. put_page(page);
  64. ret = vmf_error(err);
  65. goto out;
  66. }
  67. __SetPageUptodate(page);
  68. err = add_to_page_cache_lru(page, mapping, offset, gfp);
  69. if (unlikely(err)) {
  70. put_page(page);
  71. /*
  72. * If a split of large page was required, it
  73. * already happened when we marked the page invalid
  74. * which guarantees that this call won't fail
  75. */
  76. set_direct_map_default_noflush(page);
  77. if (err == -EEXIST)
  78. goto retry;
  79. ret = vmf_error(err);
  80. goto out;
  81. }
  82. addr = (unsigned long)page_address(page);
  83. flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
  84. }
  85. vmf->page = page;
  86. ret = VM_FAULT_LOCKED;
  87. out:
  88. filemap_invalidate_unlock_shared(mapping);
  89. return ret;
  90. }
  91. static const struct vm_operations_struct secretmem_vm_ops = {
  92. .fault = secretmem_fault,
  93. };
  94. static int secretmem_release(struct inode *inode, struct file *file)
  95. {
  96. atomic_dec(&secretmem_users);
  97. return 0;
  98. }
  99. static int secretmem_mmap(struct file *file, struct vm_area_struct *vma)
  100. {
  101. unsigned long len = vma->vm_end - vma->vm_start;
  102. if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
  103. return -EINVAL;
  104. if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
  105. return -EAGAIN;
  106. vm_flags_set(vma, VM_LOCKED | VM_DONTDUMP);
  107. vma->vm_ops = &secretmem_vm_ops;
  108. return 0;
  109. }
  110. bool vma_is_secretmem(struct vm_area_struct *vma)
  111. {
  112. return vma->vm_ops == &secretmem_vm_ops;
  113. }
  114. static const struct file_operations secretmem_fops = {
  115. .release = secretmem_release,
  116. .mmap = secretmem_mmap,
  117. };
  118. static int secretmem_migrate_folio(struct address_space *mapping,
  119. struct folio *dst, struct folio *src, enum migrate_mode mode)
  120. {
  121. return -EBUSY;
  122. }
  123. static void secretmem_free_folio(struct folio *folio)
  124. {
  125. set_direct_map_default_noflush(&folio->page);
  126. folio_zero_segment(folio, 0, folio_size(folio));
  127. }
  128. const struct address_space_operations secretmem_aops = {
  129. .dirty_folio = noop_dirty_folio,
  130. .free_folio = secretmem_free_folio,
  131. .migrate_folio = secretmem_migrate_folio,
  132. };
  133. static int secretmem_setattr(struct user_namespace *mnt_userns,
  134. struct dentry *dentry, struct iattr *iattr)
  135. {
  136. struct inode *inode = d_inode(dentry);
  137. struct address_space *mapping = inode->i_mapping;
  138. unsigned int ia_valid = iattr->ia_valid;
  139. int ret;
  140. filemap_invalidate_lock(mapping);
  141. if ((ia_valid & ATTR_SIZE) && inode->i_size)
  142. ret = -EINVAL;
  143. else
  144. ret = simple_setattr(mnt_userns, dentry, iattr);
  145. filemap_invalidate_unlock(mapping);
  146. return ret;
  147. }
  148. static const struct inode_operations secretmem_iops = {
  149. .setattr = secretmem_setattr,
  150. };
  151. static struct vfsmount *secretmem_mnt;
  152. static struct file *secretmem_file_create(unsigned long flags)
  153. {
  154. struct file *file = ERR_PTR(-ENOMEM);
  155. struct inode *inode;
  156. const char *anon_name = "[secretmem]";
  157. const struct qstr qname = QSTR_INIT(anon_name, strlen(anon_name));
  158. int err;
  159. inode = alloc_anon_inode(secretmem_mnt->mnt_sb);
  160. if (IS_ERR(inode))
  161. return ERR_CAST(inode);
  162. err = security_inode_init_security_anon(inode, &qname, NULL);
  163. if (err) {
  164. file = ERR_PTR(err);
  165. goto err_free_inode;
  166. }
  167. file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem",
  168. O_RDWR, &secretmem_fops);
  169. if (IS_ERR(file))
  170. goto err_free_inode;
  171. mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
  172. mapping_set_unevictable(inode->i_mapping);
  173. inode->i_op = &secretmem_iops;
  174. inode->i_mapping->a_ops = &secretmem_aops;
  175. /* pretend we are a normal file with zero size */
  176. inode->i_mode |= S_IFREG;
  177. inode->i_size = 0;
  178. return file;
  179. err_free_inode:
  180. iput(inode);
  181. return file;
  182. }
  183. SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
  184. {
  185. struct file *file;
  186. int fd, err;
  187. /* make sure local flags do not confict with global fcntl.h */
  188. BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
  189. if (!secretmem_enable)
  190. return -ENOSYS;
  191. if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC))
  192. return -EINVAL;
  193. if (atomic_read(&secretmem_users) < 0)
  194. return -ENFILE;
  195. fd = get_unused_fd_flags(flags & O_CLOEXEC);
  196. if (fd < 0)
  197. return fd;
  198. file = secretmem_file_create(flags);
  199. if (IS_ERR(file)) {
  200. err = PTR_ERR(file);
  201. goto err_put_fd;
  202. }
  203. file->f_flags |= O_LARGEFILE;
  204. atomic_inc(&secretmem_users);
  205. fd_install(fd, file);
  206. return fd;
  207. err_put_fd:
  208. put_unused_fd(fd);
  209. return err;
  210. }
  211. static int secretmem_init_fs_context(struct fs_context *fc)
  212. {
  213. return init_pseudo(fc, SECRETMEM_MAGIC) ? 0 : -ENOMEM;
  214. }
  215. static struct file_system_type secretmem_fs = {
  216. .name = "secretmem",
  217. .init_fs_context = secretmem_init_fs_context,
  218. .kill_sb = kill_anon_super,
  219. };
  220. static int __init secretmem_init(void)
  221. {
  222. if (!secretmem_enable)
  223. return 0;
  224. secretmem_mnt = kern_mount(&secretmem_fs);
  225. if (IS_ERR(secretmem_mnt))
  226. return PTR_ERR(secretmem_mnt);
  227. /* prevent secretmem mappings from ever getting PROT_EXEC */
  228. secretmem_mnt->mnt_flags |= MNT_NOEXEC;
  229. return 0;
  230. }
  231. fs_initcall(secretmem_init);