ioctl.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2016-20 Intel Corporation. */
  3. #include <asm/mman.h>
  4. #include <asm/sgx.h>
  5. #include <linux/mman.h>
  6. #include <linux/delay.h>
  7. #include <linux/file.h>
  8. #include <linux/hashtable.h>
  9. #include <linux/highmem.h>
  10. #include <linux/ratelimit.h>
  11. #include <linux/sched/signal.h>
  12. #include <linux/shmem_fs.h>
  13. #include <linux/slab.h>
  14. #include <linux/suspend.h>
  15. #include "driver.h"
  16. #include "encl.h"
  17. #include "encls.h"
  18. struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl, bool reclaim)
  19. {
  20. struct sgx_va_page *va_page = NULL;
  21. void *err;
  22. BUILD_BUG_ON(SGX_VA_SLOT_COUNT !=
  23. (SGX_ENCL_PAGE_VA_OFFSET_MASK >> 3) + 1);
  24. if (!(encl->page_cnt % SGX_VA_SLOT_COUNT)) {
  25. va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
  26. if (!va_page)
  27. return ERR_PTR(-ENOMEM);
  28. va_page->epc_page = sgx_alloc_va_page(reclaim);
  29. if (IS_ERR(va_page->epc_page)) {
  30. err = ERR_CAST(va_page->epc_page);
  31. kfree(va_page);
  32. return err;
  33. }
  34. WARN_ON_ONCE(encl->page_cnt % SGX_VA_SLOT_COUNT);
  35. }
  36. encl->page_cnt++;
  37. return va_page;
  38. }
  39. void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
  40. {
  41. encl->page_cnt--;
  42. if (va_page) {
  43. sgx_encl_free_epc_page(va_page->epc_page);
  44. list_del(&va_page->list);
  45. kfree(va_page);
  46. }
  47. }
  48. static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
  49. {
  50. struct sgx_epc_page *secs_epc;
  51. struct sgx_va_page *va_page;
  52. struct sgx_pageinfo pginfo;
  53. struct sgx_secinfo secinfo;
  54. unsigned long encl_size;
  55. struct file *backing;
  56. long ret;
  57. va_page = sgx_encl_grow(encl, true);
  58. if (IS_ERR(va_page))
  59. return PTR_ERR(va_page);
  60. else if (va_page)
  61. list_add(&va_page->list, &encl->va_pages);
  62. /* else the tail page of the VA page list had free slots. */
  63. /* The extra page goes to SECS. */
  64. encl_size = secs->size + PAGE_SIZE;
  65. backing = shmem_file_setup("SGX backing", encl_size + (encl_size >> 5),
  66. VM_NORESERVE);
  67. if (IS_ERR(backing)) {
  68. ret = PTR_ERR(backing);
  69. goto err_out_shrink;
  70. }
  71. encl->backing = backing;
  72. secs_epc = sgx_alloc_epc_page(&encl->secs, true);
  73. if (IS_ERR(secs_epc)) {
  74. ret = PTR_ERR(secs_epc);
  75. goto err_out_backing;
  76. }
  77. encl->secs.epc_page = secs_epc;
  78. pginfo.addr = 0;
  79. pginfo.contents = (unsigned long)secs;
  80. pginfo.metadata = (unsigned long)&secinfo;
  81. pginfo.secs = 0;
  82. memset(&secinfo, 0, sizeof(secinfo));
  83. ret = __ecreate((void *)&pginfo, sgx_get_epc_virt_addr(secs_epc));
  84. if (ret) {
  85. ret = -EIO;
  86. goto err_out;
  87. }
  88. if (secs->attributes & SGX_ATTR_DEBUG)
  89. set_bit(SGX_ENCL_DEBUG, &encl->flags);
  90. encl->secs.encl = encl;
  91. encl->secs.type = SGX_PAGE_TYPE_SECS;
  92. encl->base = secs->base;
  93. encl->size = secs->size;
  94. encl->attributes = secs->attributes;
  95. encl->attributes_mask = SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | SGX_ATTR_KSS;
  96. /* Set only after completion, as encl->lock has not been taken. */
  97. set_bit(SGX_ENCL_CREATED, &encl->flags);
  98. return 0;
  99. err_out:
  100. sgx_encl_free_epc_page(encl->secs.epc_page);
  101. encl->secs.epc_page = NULL;
  102. err_out_backing:
  103. fput(encl->backing);
  104. encl->backing = NULL;
  105. err_out_shrink:
  106. sgx_encl_shrink(encl, va_page);
  107. return ret;
  108. }
  109. /**
  110. * sgx_ioc_enclave_create() - handler for %SGX_IOC_ENCLAVE_CREATE
  111. * @encl: An enclave pointer.
  112. * @arg: The ioctl argument.
  113. *
  114. * Allocate kernel data structures for the enclave and invoke ECREATE.
  115. *
  116. * Return:
  117. * - 0: Success.
  118. * - -EIO: ECREATE failed.
  119. * - -errno: POSIX error.
  120. */
  121. static long sgx_ioc_enclave_create(struct sgx_encl *encl, void __user *arg)
  122. {
  123. struct sgx_enclave_create create_arg;
  124. void *secs;
  125. int ret;
  126. if (test_bit(SGX_ENCL_CREATED, &encl->flags))
  127. return -EINVAL;
  128. if (copy_from_user(&create_arg, arg, sizeof(create_arg)))
  129. return -EFAULT;
  130. secs = kmalloc(PAGE_SIZE, GFP_KERNEL);
  131. if (!secs)
  132. return -ENOMEM;
  133. if (copy_from_user(secs, (void __user *)create_arg.src, PAGE_SIZE))
  134. ret = -EFAULT;
  135. else
  136. ret = sgx_encl_create(encl, secs);
  137. kfree(secs);
  138. return ret;
  139. }
  140. static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
  141. {
  142. u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK;
  143. u64 pt = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
  144. if (pt != SGX_SECINFO_REG && pt != SGX_SECINFO_TCS)
  145. return -EINVAL;
  146. if ((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R))
  147. return -EINVAL;
  148. /*
  149. * CPU will silently overwrite the permissions as zero, which means
  150. * that we need to validate it ourselves.
  151. */
  152. if (pt == SGX_SECINFO_TCS && perm)
  153. return -EINVAL;
  154. if (secinfo->flags & SGX_SECINFO_RESERVED_MASK)
  155. return -EINVAL;
  156. if (memchr_inv(secinfo->reserved, 0, sizeof(secinfo->reserved)))
  157. return -EINVAL;
  158. return 0;
  159. }
  160. static int __sgx_encl_add_page(struct sgx_encl *encl,
  161. struct sgx_encl_page *encl_page,
  162. struct sgx_epc_page *epc_page,
  163. struct sgx_secinfo *secinfo, unsigned long src)
  164. {
  165. struct sgx_pageinfo pginfo;
  166. struct vm_area_struct *vma;
  167. struct page *src_page;
  168. int ret;
  169. /* Deny noexec. */
  170. vma = find_vma(current->mm, src);
  171. if (!vma)
  172. return -EFAULT;
  173. if (!(vma->vm_flags & VM_MAYEXEC))
  174. return -EACCES;
  175. ret = get_user_pages(src, 1, 0, &src_page, NULL);
  176. if (ret < 1)
  177. return -EFAULT;
  178. pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page);
  179. pginfo.addr = encl_page->desc & PAGE_MASK;
  180. pginfo.metadata = (unsigned long)secinfo;
  181. pginfo.contents = (unsigned long)kmap_atomic(src_page);
  182. ret = __eadd(&pginfo, sgx_get_epc_virt_addr(epc_page));
  183. kunmap_atomic((void *)pginfo.contents);
  184. put_page(src_page);
  185. return ret ? -EIO : 0;
  186. }
  187. /*
  188. * If the caller requires measurement of the page as a proof for the content,
  189. * use EEXTEND to add a measurement for 256 bytes of the page. Repeat this
  190. * operation until the entire page is measured."
  191. */
  192. static int __sgx_encl_extend(struct sgx_encl *encl,
  193. struct sgx_epc_page *epc_page)
  194. {
  195. unsigned long offset;
  196. int ret;
  197. for (offset = 0; offset < PAGE_SIZE; offset += SGX_EEXTEND_BLOCK_SIZE) {
  198. ret = __eextend(sgx_get_epc_virt_addr(encl->secs.epc_page),
  199. sgx_get_epc_virt_addr(epc_page) + offset);
  200. if (ret) {
  201. if (encls_failed(ret))
  202. ENCLS_WARN(ret, "EEXTEND");
  203. return -EIO;
  204. }
  205. }
  206. return 0;
  207. }
  208. static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
  209. unsigned long offset, struct sgx_secinfo *secinfo,
  210. unsigned long flags)
  211. {
  212. struct sgx_encl_page *encl_page;
  213. struct sgx_epc_page *epc_page;
  214. struct sgx_va_page *va_page;
  215. int ret;
  216. encl_page = sgx_encl_page_alloc(encl, offset, secinfo->flags);
  217. if (IS_ERR(encl_page))
  218. return PTR_ERR(encl_page);
  219. epc_page = sgx_alloc_epc_page(encl_page, true);
  220. if (IS_ERR(epc_page)) {
  221. kfree(encl_page);
  222. return PTR_ERR(epc_page);
  223. }
  224. va_page = sgx_encl_grow(encl, true);
  225. if (IS_ERR(va_page)) {
  226. ret = PTR_ERR(va_page);
  227. goto err_out_free;
  228. }
  229. mmap_read_lock(current->mm);
  230. mutex_lock(&encl->lock);
  231. /*
  232. * Adding to encl->va_pages must be done under encl->lock. Ditto for
  233. * deleting (via sgx_encl_shrink()) in the error path.
  234. */
  235. if (va_page)
  236. list_add(&va_page->list, &encl->va_pages);
  237. /*
  238. * Insert prior to EADD in case of OOM. EADD modifies MRENCLAVE, i.e.
  239. * can't be gracefully unwound, while failure on EADD/EXTEND is limited
  240. * to userspace errors (or kernel/hardware bugs).
  241. */
  242. ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
  243. encl_page, GFP_KERNEL);
  244. if (ret)
  245. goto err_out_unlock;
  246. ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
  247. src);
  248. if (ret)
  249. goto err_out;
  250. /*
  251. * Complete the "add" before doing the "extend" so that the "add"
  252. * isn't in a half-baked state in the extremely unlikely scenario
  253. * the enclave will be destroyed in response to EEXTEND failure.
  254. */
  255. encl_page->encl = encl;
  256. encl_page->epc_page = epc_page;
  257. encl_page->type = (secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK) >> 8;
  258. encl->secs_child_cnt++;
  259. if (flags & SGX_PAGE_MEASURE) {
  260. ret = __sgx_encl_extend(encl, epc_page);
  261. if (ret)
  262. goto err_out;
  263. }
  264. sgx_mark_page_reclaimable(encl_page->epc_page);
  265. mutex_unlock(&encl->lock);
  266. mmap_read_unlock(current->mm);
  267. return ret;
  268. err_out:
  269. xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
  270. err_out_unlock:
  271. sgx_encl_shrink(encl, va_page);
  272. mutex_unlock(&encl->lock);
  273. mmap_read_unlock(current->mm);
  274. err_out_free:
  275. sgx_encl_free_epc_page(epc_page);
  276. kfree(encl_page);
  277. return ret;
  278. }
  279. /*
  280. * Ensure user provided offset and length values are valid for
  281. * an enclave.
  282. */
  283. static int sgx_validate_offset_length(struct sgx_encl *encl,
  284. unsigned long offset,
  285. unsigned long length)
  286. {
  287. if (!IS_ALIGNED(offset, PAGE_SIZE))
  288. return -EINVAL;
  289. if (!length || !IS_ALIGNED(length, PAGE_SIZE))
  290. return -EINVAL;
  291. if (offset + length < offset)
  292. return -EINVAL;
  293. if (offset + length - PAGE_SIZE >= encl->size)
  294. return -EINVAL;
  295. return 0;
  296. }
  297. /**
  298. * sgx_ioc_enclave_add_pages() - The handler for %SGX_IOC_ENCLAVE_ADD_PAGES
  299. * @encl: an enclave pointer
  300. * @arg: a user pointer to a struct sgx_enclave_add_pages instance
  301. *
  302. * Add one or more pages to an uninitialized enclave, and optionally extend the
  303. * measurement with the contents of the page. The SECINFO and measurement mask
  304. * are applied to all pages.
  305. *
  306. * A SECINFO for a TCS is required to always contain zero permissions because
  307. * CPU silently zeros them. Allowing anything else would cause a mismatch in
  308. * the measurement.
  309. *
  310. * mmap()'s protection bits are capped by the page permissions. For each page
  311. * address, the maximum protection bits are computed with the following
  312. * heuristics:
  313. *
  314. * 1. A regular page: PROT_R, PROT_W and PROT_X match the SECINFO permissions.
  315. * 2. A TCS page: PROT_R | PROT_W.
  316. *
  317. * mmap() is not allowed to surpass the minimum of the maximum protection bits
  318. * within the given address range.
  319. *
  320. * The function deinitializes kernel data structures for enclave and returns
  321. * -EIO in any of the following conditions:
  322. *
  323. * - Enclave Page Cache (EPC), the physical memory holding enclaves, has
  324. * been invalidated. This will cause EADD and EEXTEND to fail.
  325. * - If the source address is corrupted somehow when executing EADD.
  326. *
  327. * Return:
  328. * - 0: Success.
  329. * - -EACCES: The source page is located in a noexec partition.
  330. * - -ENOMEM: Out of EPC pages.
  331. * - -EINTR: The call was interrupted before data was processed.
  332. * - -EIO: Either EADD or EEXTEND failed because invalid source address
  333. * or power cycle.
  334. * - -errno: POSIX error.
  335. */
  336. static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
  337. {
  338. struct sgx_enclave_add_pages add_arg;
  339. struct sgx_secinfo secinfo;
  340. unsigned long c;
  341. int ret;
  342. if (!test_bit(SGX_ENCL_CREATED, &encl->flags) ||
  343. test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
  344. return -EINVAL;
  345. if (copy_from_user(&add_arg, arg, sizeof(add_arg)))
  346. return -EFAULT;
  347. if (!IS_ALIGNED(add_arg.src, PAGE_SIZE))
  348. return -EINVAL;
  349. if (sgx_validate_offset_length(encl, add_arg.offset, add_arg.length))
  350. return -EINVAL;
  351. if (copy_from_user(&secinfo, (void __user *)add_arg.secinfo,
  352. sizeof(secinfo)))
  353. return -EFAULT;
  354. if (sgx_validate_secinfo(&secinfo))
  355. return -EINVAL;
  356. for (c = 0 ; c < add_arg.length; c += PAGE_SIZE) {
  357. if (signal_pending(current)) {
  358. if (!c)
  359. ret = -ERESTARTSYS;
  360. break;
  361. }
  362. if (need_resched())
  363. cond_resched();
  364. ret = sgx_encl_add_page(encl, add_arg.src + c, add_arg.offset + c,
  365. &secinfo, add_arg.flags);
  366. if (ret)
  367. break;
  368. }
  369. add_arg.count = c;
  370. if (copy_to_user(arg, &add_arg, sizeof(add_arg)))
  371. return -EFAULT;
  372. return ret;
  373. }
  374. static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
  375. void *hash)
  376. {
  377. SHASH_DESC_ON_STACK(shash, tfm);
  378. shash->tfm = tfm;
  379. return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
  380. }
  381. static int sgx_get_key_hash(const void *modulus, void *hash)
  382. {
  383. struct crypto_shash *tfm;
  384. int ret;
  385. tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
  386. if (IS_ERR(tfm))
  387. return PTR_ERR(tfm);
  388. ret = __sgx_get_key_hash(tfm, modulus, hash);
  389. crypto_free_shash(tfm);
  390. return ret;
  391. }
  392. static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
  393. void *token)
  394. {
  395. u64 mrsigner[4];
  396. int i, j;
  397. void *addr;
  398. int ret;
  399. /*
  400. * Deny initializing enclaves with attributes (namely provisioning)
  401. * that have not been explicitly allowed.
  402. */
  403. if (encl->attributes & ~encl->attributes_mask)
  404. return -EACCES;
  405. /*
  406. * Attributes should not be enforced *only* against what's available on
  407. * platform (done in sgx_encl_create) but checked and enforced against
  408. * the mask for enforcement in sigstruct. For example an enclave could
  409. * opt to sign with AVX bit in xfrm, but still be loadable on a platform
  410. * without it if the sigstruct->body.attributes_mask does not turn that
  411. * bit on.
  412. */
  413. if (sigstruct->body.attributes & sigstruct->body.attributes_mask &
  414. sgx_attributes_reserved_mask)
  415. return -EINVAL;
  416. if (sigstruct->body.miscselect & sigstruct->body.misc_mask &
  417. sgx_misc_reserved_mask)
  418. return -EINVAL;
  419. if (sigstruct->body.xfrm & sigstruct->body.xfrm_mask &
  420. sgx_xfrm_reserved_mask)
  421. return -EINVAL;
  422. ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
  423. if (ret)
  424. return ret;
  425. mutex_lock(&encl->lock);
  426. /*
  427. * ENCLS[EINIT] is interruptible because it has such a high latency,
  428. * e.g. 50k+ cycles on success. If an IRQ/NMI/SMI becomes pending,
  429. * EINIT may fail with SGX_UNMASKED_EVENT so that the event can be
  430. * serviced.
  431. */
  432. for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
  433. for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
  434. addr = sgx_get_epc_virt_addr(encl->secs.epc_page);
  435. preempt_disable();
  436. sgx_update_lepubkeyhash(mrsigner);
  437. ret = __einit(sigstruct, token, addr);
  438. preempt_enable();
  439. if (ret == SGX_UNMASKED_EVENT)
  440. continue;
  441. else
  442. break;
  443. }
  444. if (ret != SGX_UNMASKED_EVENT)
  445. break;
  446. msleep_interruptible(SGX_EINIT_SLEEP_TIME);
  447. if (signal_pending(current)) {
  448. ret = -ERESTARTSYS;
  449. goto err_out;
  450. }
  451. }
  452. if (encls_faulted(ret)) {
  453. if (encls_failed(ret))
  454. ENCLS_WARN(ret, "EINIT");
  455. ret = -EIO;
  456. } else if (ret) {
  457. pr_debug("EINIT returned %d\n", ret);
  458. ret = -EPERM;
  459. } else {
  460. set_bit(SGX_ENCL_INITIALIZED, &encl->flags);
  461. }
  462. err_out:
  463. mutex_unlock(&encl->lock);
  464. return ret;
  465. }
  466. /**
  467. * sgx_ioc_enclave_init() - handler for %SGX_IOC_ENCLAVE_INIT
  468. * @encl: an enclave pointer
  469. * @arg: userspace pointer to a struct sgx_enclave_init instance
  470. *
  471. * Flush any outstanding enqueued EADD operations and perform EINIT. The
  472. * Launch Enclave Public Key Hash MSRs are rewritten as necessary to match
  473. * the enclave's MRSIGNER, which is caculated from the provided sigstruct.
  474. *
  475. * Return:
  476. * - 0: Success.
  477. * - -EPERM: Invalid SIGSTRUCT.
  478. * - -EIO: EINIT failed because of a power cycle.
  479. * - -errno: POSIX error.
  480. */
  481. static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
  482. {
  483. struct sgx_sigstruct *sigstruct;
  484. struct sgx_enclave_init init_arg;
  485. void *token;
  486. int ret;
  487. if (!test_bit(SGX_ENCL_CREATED, &encl->flags) ||
  488. test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
  489. return -EINVAL;
  490. if (copy_from_user(&init_arg, arg, sizeof(init_arg)))
  491. return -EFAULT;
  492. /*
  493. * 'sigstruct' must be on a page boundary and 'token' on a 512 byte
  494. * boundary. kmalloc() will give this alignment when allocating
  495. * PAGE_SIZE bytes.
  496. */
  497. sigstruct = kmalloc(PAGE_SIZE, GFP_KERNEL);
  498. if (!sigstruct)
  499. return -ENOMEM;
  500. token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2);
  501. memset(token, 0, SGX_LAUNCH_TOKEN_SIZE);
  502. if (copy_from_user(sigstruct, (void __user *)init_arg.sigstruct,
  503. sizeof(*sigstruct))) {
  504. ret = -EFAULT;
  505. goto out;
  506. }
  507. /*
  508. * A legacy field used with Intel signed enclaves. These used to mean
  509. * regular and architectural enclaves. The CPU only accepts these values
  510. * but they do not have any other meaning.
  511. *
  512. * Thus, reject any other values.
  513. */
  514. if (sigstruct->header.vendor != 0x0000 &&
  515. sigstruct->header.vendor != 0x8086) {
  516. ret = -EINVAL;
  517. goto out;
  518. }
  519. ret = sgx_encl_init(encl, sigstruct, token);
  520. out:
  521. kfree(sigstruct);
  522. return ret;
  523. }
  524. /**
  525. * sgx_ioc_enclave_provision() - handler for %SGX_IOC_ENCLAVE_PROVISION
  526. * @encl: an enclave pointer
  527. * @arg: userspace pointer to a struct sgx_enclave_provision instance
  528. *
  529. * Allow ATTRIBUTE.PROVISION_KEY for an enclave by providing a file handle to
  530. * /dev/sgx_provision.
  531. *
  532. * Return:
  533. * - 0: Success.
  534. * - -errno: Otherwise.
  535. */
  536. static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg)
  537. {
  538. struct sgx_enclave_provision params;
  539. if (copy_from_user(&params, arg, sizeof(params)))
  540. return -EFAULT;
  541. return sgx_set_attribute(&encl->attributes_mask, params.fd);
  542. }
  543. /*
  544. * Ensure enclave is ready for SGX2 functions. Readiness is checked
  545. * by ensuring the hardware supports SGX2 and the enclave is initialized
  546. * and thus able to handle requests to modify pages within it.
  547. */
  548. static int sgx_ioc_sgx2_ready(struct sgx_encl *encl)
  549. {
  550. if (!(cpu_feature_enabled(X86_FEATURE_SGX2)))
  551. return -ENODEV;
  552. if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
  553. return -EINVAL;
  554. return 0;
  555. }
  556. /*
  557. * Some SGX functions require that no cached linear-to-physical address
  558. * mappings are present before they can succeed. Collaborate with
  559. * hardware via ENCLS[ETRACK] to ensure that all cached
  560. * linear-to-physical address mappings belonging to all threads of
  561. * the enclave are cleared. See sgx_encl_cpumask() for details.
  562. *
  563. * Must be called with enclave's mutex held from the time the
  564. * SGX function requiring that no cached linear-to-physical mappings
  565. * are present is executed until this ETRACK flow is complete.
  566. */
  567. static int sgx_enclave_etrack(struct sgx_encl *encl)
  568. {
  569. void *epc_virt;
  570. int ret;
  571. epc_virt = sgx_get_epc_virt_addr(encl->secs.epc_page);
  572. ret = __etrack(epc_virt);
  573. if (ret) {
  574. /*
  575. * ETRACK only fails when there is an OS issue. For
  576. * example, two consecutive ETRACK was sent without
  577. * completed IPI between.
  578. */
  579. pr_err_once("ETRACK returned %d (0x%x)", ret, ret);
  580. /*
  581. * Send IPIs to kick CPUs out of the enclave and
  582. * try ETRACK again.
  583. */
  584. on_each_cpu_mask(sgx_encl_cpumask(encl), sgx_ipi_cb, NULL, 1);
  585. ret = __etrack(epc_virt);
  586. if (ret) {
  587. pr_err_once("ETRACK repeat returned %d (0x%x)",
  588. ret, ret);
  589. return -EFAULT;
  590. }
  591. }
  592. on_each_cpu_mask(sgx_encl_cpumask(encl), sgx_ipi_cb, NULL, 1);
  593. return 0;
  594. }
  595. /**
  596. * sgx_enclave_restrict_permissions() - Restrict EPCM permissions
  597. * @encl: Enclave to which the pages belong.
  598. * @modp: Checked parameters from user on which pages need modifying and
  599. * their new permissions.
  600. *
  601. * Return:
  602. * - 0: Success.
  603. * - -errno: Otherwise.
  604. */
  605. static long
  606. sgx_enclave_restrict_permissions(struct sgx_encl *encl,
  607. struct sgx_enclave_restrict_permissions *modp)
  608. {
  609. struct sgx_encl_page *entry;
  610. struct sgx_secinfo secinfo;
  611. unsigned long addr;
  612. unsigned long c;
  613. void *epc_virt;
  614. int ret;
  615. memset(&secinfo, 0, sizeof(secinfo));
  616. secinfo.flags = modp->permissions & SGX_SECINFO_PERMISSION_MASK;
  617. for (c = 0 ; c < modp->length; c += PAGE_SIZE) {
  618. addr = encl->base + modp->offset + c;
  619. sgx_reclaim_direct();
  620. mutex_lock(&encl->lock);
  621. entry = sgx_encl_load_page(encl, addr);
  622. if (IS_ERR(entry)) {
  623. ret = PTR_ERR(entry) == -EBUSY ? -EAGAIN : -EFAULT;
  624. goto out_unlock;
  625. }
  626. /*
  627. * Changing EPCM permissions is only supported on regular
  628. * SGX pages. Attempting this change on other pages will
  629. * result in #PF.
  630. */
  631. if (entry->type != SGX_PAGE_TYPE_REG) {
  632. ret = -EINVAL;
  633. goto out_unlock;
  634. }
  635. /*
  636. * Apart from ensuring that read-access remains, do not verify
  637. * the permission bits requested. Kernel has no control over
  638. * how EPCM permissions can be relaxed from within the enclave.
  639. * ENCLS[EMODPR] can only remove existing EPCM permissions,
  640. * attempting to set new permissions will be ignored by the
  641. * hardware.
  642. */
  643. /* Change EPCM permissions. */
  644. epc_virt = sgx_get_epc_virt_addr(entry->epc_page);
  645. ret = __emodpr(&secinfo, epc_virt);
  646. if (encls_faulted(ret)) {
  647. /*
  648. * All possible faults should be avoidable:
  649. * parameters have been checked, will only change
  650. * permissions of a regular page, and no concurrent
  651. * SGX1/SGX2 ENCLS instructions since these
  652. * are protected with mutex.
  653. */
  654. pr_err_once("EMODPR encountered exception %d\n",
  655. ENCLS_TRAPNR(ret));
  656. ret = -EFAULT;
  657. goto out_unlock;
  658. }
  659. if (encls_failed(ret)) {
  660. modp->result = ret;
  661. ret = -EFAULT;
  662. goto out_unlock;
  663. }
  664. ret = sgx_enclave_etrack(encl);
  665. if (ret) {
  666. ret = -EFAULT;
  667. goto out_unlock;
  668. }
  669. mutex_unlock(&encl->lock);
  670. }
  671. ret = 0;
  672. goto out;
  673. out_unlock:
  674. mutex_unlock(&encl->lock);
  675. out:
  676. modp->count = c;
  677. return ret;
  678. }
  679. /**
  680. * sgx_ioc_enclave_restrict_permissions() - handler for
  681. * %SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS
  682. * @encl: an enclave pointer
  683. * @arg: userspace pointer to a &struct sgx_enclave_restrict_permissions
  684. * instance
  685. *
  686. * SGX2 distinguishes between relaxing and restricting the enclave page
  687. * permissions maintained by the hardware (EPCM permissions) of pages
  688. * belonging to an initialized enclave (after SGX_IOC_ENCLAVE_INIT).
  689. *
  690. * EPCM permissions cannot be restricted from within the enclave, the enclave
  691. * requires the kernel to run the privileged level 0 instructions ENCLS[EMODPR]
  692. * and ENCLS[ETRACK]. An attempt to relax EPCM permissions with this call
  693. * will be ignored by the hardware.
  694. *
  695. * Return:
  696. * - 0: Success
  697. * - -errno: Otherwise
  698. */
  699. static long sgx_ioc_enclave_restrict_permissions(struct sgx_encl *encl,
  700. void __user *arg)
  701. {
  702. struct sgx_enclave_restrict_permissions params;
  703. long ret;
  704. ret = sgx_ioc_sgx2_ready(encl);
  705. if (ret)
  706. return ret;
  707. if (copy_from_user(&params, arg, sizeof(params)))
  708. return -EFAULT;
  709. if (sgx_validate_offset_length(encl, params.offset, params.length))
  710. return -EINVAL;
  711. if (params.permissions & ~SGX_SECINFO_PERMISSION_MASK)
  712. return -EINVAL;
  713. /*
  714. * Fail early if invalid permissions requested to prevent ENCLS[EMODPR]
  715. * from faulting later when the CPU does the same check.
  716. */
  717. if ((params.permissions & SGX_SECINFO_W) &&
  718. !(params.permissions & SGX_SECINFO_R))
  719. return -EINVAL;
  720. if (params.result || params.count)
  721. return -EINVAL;
  722. ret = sgx_enclave_restrict_permissions(encl, &params);
  723. if (copy_to_user(arg, &params, sizeof(params)))
  724. return -EFAULT;
  725. return ret;
  726. }
  727. /**
  728. * sgx_enclave_modify_types() - Modify type of SGX enclave pages
  729. * @encl: Enclave to which the pages belong.
  730. * @modt: Checked parameters from user about which pages need modifying
  731. * and their new page type.
  732. *
  733. * Return:
  734. * - 0: Success
  735. * - -errno: Otherwise
  736. */
  737. static long sgx_enclave_modify_types(struct sgx_encl *encl,
  738. struct sgx_enclave_modify_types *modt)
  739. {
  740. unsigned long max_prot_restore;
  741. enum sgx_page_type page_type;
  742. struct sgx_encl_page *entry;
  743. struct sgx_secinfo secinfo;
  744. unsigned long prot;
  745. unsigned long addr;
  746. unsigned long c;
  747. void *epc_virt;
  748. int ret;
  749. page_type = modt->page_type & SGX_PAGE_TYPE_MASK;
  750. /*
  751. * The only new page types allowed by hardware are PT_TCS and PT_TRIM.
  752. */
  753. if (page_type != SGX_PAGE_TYPE_TCS && page_type != SGX_PAGE_TYPE_TRIM)
  754. return -EINVAL;
  755. memset(&secinfo, 0, sizeof(secinfo));
  756. secinfo.flags = page_type << 8;
  757. for (c = 0 ; c < modt->length; c += PAGE_SIZE) {
  758. addr = encl->base + modt->offset + c;
  759. sgx_reclaim_direct();
  760. mutex_lock(&encl->lock);
  761. entry = sgx_encl_load_page(encl, addr);
  762. if (IS_ERR(entry)) {
  763. ret = PTR_ERR(entry) == -EBUSY ? -EAGAIN : -EFAULT;
  764. goto out_unlock;
  765. }
  766. /*
  767. * Borrow the logic from the Intel SDM. Regular pages
  768. * (SGX_PAGE_TYPE_REG) can change type to SGX_PAGE_TYPE_TCS
  769. * or SGX_PAGE_TYPE_TRIM but TCS pages can only be trimmed.
  770. * CET pages not supported yet.
  771. */
  772. if (!(entry->type == SGX_PAGE_TYPE_REG ||
  773. (entry->type == SGX_PAGE_TYPE_TCS &&
  774. page_type == SGX_PAGE_TYPE_TRIM))) {
  775. ret = -EINVAL;
  776. goto out_unlock;
  777. }
  778. max_prot_restore = entry->vm_max_prot_bits;
  779. /*
  780. * Once a regular page becomes a TCS page it cannot be
  781. * changed back. So the maximum allowed protection reflects
  782. * the TCS page that is always RW from kernel perspective but
  783. * will be inaccessible from within enclave. Before doing
  784. * so, do make sure that the new page type continues to
  785. * respect the originally vetted page permissions.
  786. */
  787. if (entry->type == SGX_PAGE_TYPE_REG &&
  788. page_type == SGX_PAGE_TYPE_TCS) {
  789. if (~entry->vm_max_prot_bits & (VM_READ | VM_WRITE)) {
  790. ret = -EPERM;
  791. goto out_unlock;
  792. }
  793. prot = PROT_READ | PROT_WRITE;
  794. entry->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
  795. /*
  796. * Prevent page from being reclaimed while mutex
  797. * is released.
  798. */
  799. if (sgx_unmark_page_reclaimable(entry->epc_page)) {
  800. ret = -EAGAIN;
  801. goto out_entry_changed;
  802. }
  803. /*
  804. * Do not keep encl->lock because of dependency on
  805. * mmap_lock acquired in sgx_zap_enclave_ptes().
  806. */
  807. mutex_unlock(&encl->lock);
  808. sgx_zap_enclave_ptes(encl, addr);
  809. mutex_lock(&encl->lock);
  810. sgx_mark_page_reclaimable(entry->epc_page);
  811. }
  812. /* Change EPC type */
  813. epc_virt = sgx_get_epc_virt_addr(entry->epc_page);
  814. ret = __emodt(&secinfo, epc_virt);
  815. if (encls_faulted(ret)) {
  816. /*
  817. * All possible faults should be avoidable:
  818. * parameters have been checked, will only change
  819. * valid page types, and no concurrent
  820. * SGX1/SGX2 ENCLS instructions since these are
  821. * protected with mutex.
  822. */
  823. pr_err_once("EMODT encountered exception %d\n",
  824. ENCLS_TRAPNR(ret));
  825. ret = -EFAULT;
  826. goto out_entry_changed;
  827. }
  828. if (encls_failed(ret)) {
  829. modt->result = ret;
  830. ret = -EFAULT;
  831. goto out_entry_changed;
  832. }
  833. ret = sgx_enclave_etrack(encl);
  834. if (ret) {
  835. ret = -EFAULT;
  836. goto out_unlock;
  837. }
  838. entry->type = page_type;
  839. mutex_unlock(&encl->lock);
  840. }
  841. ret = 0;
  842. goto out;
  843. out_entry_changed:
  844. entry->vm_max_prot_bits = max_prot_restore;
  845. out_unlock:
  846. mutex_unlock(&encl->lock);
  847. out:
  848. modt->count = c;
  849. return ret;
  850. }
  851. /**
  852. * sgx_ioc_enclave_modify_types() - handler for %SGX_IOC_ENCLAVE_MODIFY_TYPES
  853. * @encl: an enclave pointer
  854. * @arg: userspace pointer to a &struct sgx_enclave_modify_types instance
  855. *
  856. * Ability to change the enclave page type supports the following use cases:
  857. *
  858. * * It is possible to add TCS pages to an enclave by changing the type of
  859. * regular pages (%SGX_PAGE_TYPE_REG) to TCS (%SGX_PAGE_TYPE_TCS) pages.
  860. * With this support the number of threads supported by an initialized
  861. * enclave can be increased dynamically.
  862. *
  863. * * Regular or TCS pages can dynamically be removed from an initialized
  864. * enclave by changing the page type to %SGX_PAGE_TYPE_TRIM. Changing the
  865. * page type to %SGX_PAGE_TYPE_TRIM marks the page for removal with actual
  866. * removal done by handler of %SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl() called
  867. * after ENCLU[EACCEPT] is run on %SGX_PAGE_TYPE_TRIM page from within the
  868. * enclave.
  869. *
  870. * Return:
  871. * - 0: Success
  872. * - -errno: Otherwise
  873. */
  874. static long sgx_ioc_enclave_modify_types(struct sgx_encl *encl,
  875. void __user *arg)
  876. {
  877. struct sgx_enclave_modify_types params;
  878. long ret;
  879. ret = sgx_ioc_sgx2_ready(encl);
  880. if (ret)
  881. return ret;
  882. if (copy_from_user(&params, arg, sizeof(params)))
  883. return -EFAULT;
  884. if (sgx_validate_offset_length(encl, params.offset, params.length))
  885. return -EINVAL;
  886. if (params.page_type & ~SGX_PAGE_TYPE_MASK)
  887. return -EINVAL;
  888. if (params.result || params.count)
  889. return -EINVAL;
  890. ret = sgx_enclave_modify_types(encl, &params);
  891. if (copy_to_user(arg, &params, sizeof(params)))
  892. return -EFAULT;
  893. return ret;
  894. }
  895. /**
  896. * sgx_encl_remove_pages() - Remove trimmed pages from SGX enclave
  897. * @encl: Enclave to which the pages belong
  898. * @params: Checked parameters from user on which pages need to be removed
  899. *
  900. * Return:
  901. * - 0: Success.
  902. * - -errno: Otherwise.
  903. */
  904. static long sgx_encl_remove_pages(struct sgx_encl *encl,
  905. struct sgx_enclave_remove_pages *params)
  906. {
  907. struct sgx_encl_page *entry;
  908. struct sgx_secinfo secinfo;
  909. unsigned long addr;
  910. unsigned long c;
  911. void *epc_virt;
  912. int ret;
  913. memset(&secinfo, 0, sizeof(secinfo));
  914. secinfo.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X;
  915. for (c = 0 ; c < params->length; c += PAGE_SIZE) {
  916. addr = encl->base + params->offset + c;
  917. sgx_reclaim_direct();
  918. mutex_lock(&encl->lock);
  919. entry = sgx_encl_load_page(encl, addr);
  920. if (IS_ERR(entry)) {
  921. ret = PTR_ERR(entry) == -EBUSY ? -EAGAIN : -EFAULT;
  922. goto out_unlock;
  923. }
  924. if (entry->type != SGX_PAGE_TYPE_TRIM) {
  925. ret = -EPERM;
  926. goto out_unlock;
  927. }
  928. /*
  929. * ENCLS[EMODPR] is a no-op instruction used to inform if
  930. * ENCLU[EACCEPT] was run from within the enclave. If
  931. * ENCLS[EMODPR] is run with RWX on a trimmed page that is
  932. * not yet accepted then it will return
  933. * %SGX_PAGE_NOT_MODIFIABLE, after the trimmed page is
  934. * accepted the instruction will encounter a page fault.
  935. */
  936. epc_virt = sgx_get_epc_virt_addr(entry->epc_page);
  937. ret = __emodpr(&secinfo, epc_virt);
  938. if (!encls_faulted(ret) || ENCLS_TRAPNR(ret) != X86_TRAP_PF) {
  939. ret = -EPERM;
  940. goto out_unlock;
  941. }
  942. if (sgx_unmark_page_reclaimable(entry->epc_page)) {
  943. ret = -EBUSY;
  944. goto out_unlock;
  945. }
  946. /*
  947. * Do not keep encl->lock because of dependency on
  948. * mmap_lock acquired in sgx_zap_enclave_ptes().
  949. */
  950. mutex_unlock(&encl->lock);
  951. sgx_zap_enclave_ptes(encl, addr);
  952. mutex_lock(&encl->lock);
  953. sgx_encl_free_epc_page(entry->epc_page);
  954. encl->secs_child_cnt--;
  955. entry->epc_page = NULL;
  956. xa_erase(&encl->page_array, PFN_DOWN(entry->desc));
  957. sgx_encl_shrink(encl, NULL);
  958. kfree(entry);
  959. mutex_unlock(&encl->lock);
  960. }
  961. ret = 0;
  962. goto out;
  963. out_unlock:
  964. mutex_unlock(&encl->lock);
  965. out:
  966. params->count = c;
  967. return ret;
  968. }
  969. /**
  970. * sgx_ioc_enclave_remove_pages() - handler for %SGX_IOC_ENCLAVE_REMOVE_PAGES
  971. * @encl: an enclave pointer
  972. * @arg: userspace pointer to &struct sgx_enclave_remove_pages instance
  973. *
  974. * Final step of the flow removing pages from an initialized enclave. The
  975. * complete flow is:
  976. *
  977. * 1) User changes the type of the pages to be removed to %SGX_PAGE_TYPE_TRIM
  978. * using the %SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl().
  979. * 2) User approves the page removal by running ENCLU[EACCEPT] from within
  980. * the enclave.
  981. * 3) User initiates actual page removal using the
  982. * %SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl() that is handled here.
  983. *
  984. * First remove any page table entries pointing to the page and then proceed
  985. * with the actual removal of the enclave page and data in support of it.
  986. *
  987. * VA pages are not affected by this removal. It is thus possible that the
  988. * enclave may end up with more VA pages than needed to support all its
  989. * pages.
  990. *
  991. * Return:
  992. * - 0: Success
  993. * - -errno: Otherwise
  994. */
  995. static long sgx_ioc_enclave_remove_pages(struct sgx_encl *encl,
  996. void __user *arg)
  997. {
  998. struct sgx_enclave_remove_pages params;
  999. long ret;
  1000. ret = sgx_ioc_sgx2_ready(encl);
  1001. if (ret)
  1002. return ret;
  1003. if (copy_from_user(&params, arg, sizeof(params)))
  1004. return -EFAULT;
  1005. if (sgx_validate_offset_length(encl, params.offset, params.length))
  1006. return -EINVAL;
  1007. if (params.count)
  1008. return -EINVAL;
  1009. ret = sgx_encl_remove_pages(encl, &params);
  1010. if (copy_to_user(arg, &params, sizeof(params)))
  1011. return -EFAULT;
  1012. return ret;
  1013. }
  1014. long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
  1015. {
  1016. struct sgx_encl *encl = filep->private_data;
  1017. int ret;
  1018. if (test_and_set_bit(SGX_ENCL_IOCTL, &encl->flags))
  1019. return -EBUSY;
  1020. switch (cmd) {
  1021. case SGX_IOC_ENCLAVE_CREATE:
  1022. ret = sgx_ioc_enclave_create(encl, (void __user *)arg);
  1023. break;
  1024. case SGX_IOC_ENCLAVE_ADD_PAGES:
  1025. ret = sgx_ioc_enclave_add_pages(encl, (void __user *)arg);
  1026. break;
  1027. case SGX_IOC_ENCLAVE_INIT:
  1028. ret = sgx_ioc_enclave_init(encl, (void __user *)arg);
  1029. break;
  1030. case SGX_IOC_ENCLAVE_PROVISION:
  1031. ret = sgx_ioc_enclave_provision(encl, (void __user *)arg);
  1032. break;
  1033. case SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS:
  1034. ret = sgx_ioc_enclave_restrict_permissions(encl,
  1035. (void __user *)arg);
  1036. break;
  1037. case SGX_IOC_ENCLAVE_MODIFY_TYPES:
  1038. ret = sgx_ioc_enclave_modify_types(encl, (void __user *)arg);
  1039. break;
  1040. case SGX_IOC_ENCLAVE_REMOVE_PAGES:
  1041. ret = sgx_ioc_enclave_remove_pages(encl, (void __user *)arg);
  1042. break;
  1043. default:
  1044. ret = -ENOIOCTLCMD;
  1045. break;
  1046. }
  1047. clear_bit(SGX_ENCL_IOCTL, &encl->flags);
  1048. return ret;
  1049. }