main.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2016-20 Intel Corporation. */
  3. #include <cpuid.h>
  4. #include <elf.h>
  5. #include <errno.h>
  6. #include <fcntl.h>
  7. #include <stdbool.h>
  8. #include <stdio.h>
  9. #include <stdint.h>
  10. #include <stdlib.h>
  11. #include <string.h>
  12. #include <unistd.h>
  13. #include <sys/ioctl.h>
  14. #include <sys/mman.h>
  15. #include <sys/stat.h>
  16. #include <sys/time.h>
  17. #include <sys/types.h>
  18. #include <sys/auxv.h>
  19. #include "defines.h"
  20. #include "../kselftest_harness.h"
  21. #include "main.h"
  22. static const uint64_t MAGIC = 0x1122334455667788ULL;
  23. static const uint64_t MAGIC2 = 0x8877665544332211ULL;
  24. vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
  25. /*
  26. * Security Information (SECINFO) data structure needed by a few SGX
  27. * instructions (eg. ENCLU[EACCEPT] and ENCLU[EMODPE]) holds meta-data
  28. * about an enclave page. &enum sgx_secinfo_page_state specifies the
  29. * secinfo flags used for page state.
  30. */
  31. enum sgx_secinfo_page_state {
  32. SGX_SECINFO_PENDING = (1 << 3),
  33. SGX_SECINFO_MODIFIED = (1 << 4),
  34. SGX_SECINFO_PR = (1 << 5),
  35. };
  36. struct vdso_symtab {
  37. Elf64_Sym *elf_symtab;
  38. const char *elf_symstrtab;
  39. Elf64_Word *elf_hashtab;
  40. };
  41. static Elf64_Dyn *vdso_get_dyntab(void *addr)
  42. {
  43. Elf64_Ehdr *ehdr = addr;
  44. Elf64_Phdr *phdrtab = addr + ehdr->e_phoff;
  45. int i;
  46. for (i = 0; i < ehdr->e_phnum; i++)
  47. if (phdrtab[i].p_type == PT_DYNAMIC)
  48. return addr + phdrtab[i].p_offset;
  49. return NULL;
  50. }
  51. static void *vdso_get_dyn(void *addr, Elf64_Dyn *dyntab, Elf64_Sxword tag)
  52. {
  53. int i;
  54. for (i = 0; dyntab[i].d_tag != DT_NULL; i++)
  55. if (dyntab[i].d_tag == tag)
  56. return addr + dyntab[i].d_un.d_ptr;
  57. return NULL;
  58. }
  59. static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab)
  60. {
  61. Elf64_Dyn *dyntab = vdso_get_dyntab(addr);
  62. symtab->elf_symtab = vdso_get_dyn(addr, dyntab, DT_SYMTAB);
  63. if (!symtab->elf_symtab)
  64. return false;
  65. symtab->elf_symstrtab = vdso_get_dyn(addr, dyntab, DT_STRTAB);
  66. if (!symtab->elf_symstrtab)
  67. return false;
  68. symtab->elf_hashtab = vdso_get_dyn(addr, dyntab, DT_HASH);
  69. if (!symtab->elf_hashtab)
  70. return false;
  71. return true;
  72. }
  73. static inline int sgx2_supported(void)
  74. {
  75. unsigned int eax, ebx, ecx, edx;
  76. __cpuid_count(SGX_CPUID, 0x0, eax, ebx, ecx, edx);
  77. return eax & 0x2;
  78. }
  79. static unsigned long elf_sym_hash(const char *name)
  80. {
  81. unsigned long h = 0, high;
  82. while (*name) {
  83. h = (h << 4) + *name++;
  84. high = h & 0xf0000000;
  85. if (high)
  86. h ^= high >> 24;
  87. h &= ~high;
  88. }
  89. return h;
  90. }
  91. static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name)
  92. {
  93. Elf64_Word bucketnum = symtab->elf_hashtab[0];
  94. Elf64_Word *buckettab = &symtab->elf_hashtab[2];
  95. Elf64_Word *chaintab = &symtab->elf_hashtab[2 + bucketnum];
  96. Elf64_Sym *sym;
  97. Elf64_Word i;
  98. for (i = buckettab[elf_sym_hash(name) % bucketnum]; i != STN_UNDEF;
  99. i = chaintab[i]) {
  100. sym = &symtab->elf_symtab[i];
  101. if (!strcmp(name, &symtab->elf_symstrtab[sym->st_name]))
  102. return sym;
  103. }
  104. return NULL;
  105. }
  106. /*
  107. * Return the offset in the enclave where the TCS segment can be found.
  108. * The first RW segment loaded is the TCS.
  109. */
  110. static off_t encl_get_tcs_offset(struct encl *encl)
  111. {
  112. int i;
  113. for (i = 0; i < encl->nr_segments; i++) {
  114. struct encl_segment *seg = &encl->segment_tbl[i];
  115. if (i == 0 && seg->prot == (PROT_READ | PROT_WRITE))
  116. return seg->offset;
  117. }
  118. return -1;
  119. }
  120. /*
  121. * Return the offset in the enclave where the data segment can be found.
  122. * The first RW segment loaded is the TCS, skip that to get info on the
  123. * data segment.
  124. */
  125. static off_t encl_get_data_offset(struct encl *encl)
  126. {
  127. int i;
  128. for (i = 1; i < encl->nr_segments; i++) {
  129. struct encl_segment *seg = &encl->segment_tbl[i];
  130. if (seg->prot == (PROT_READ | PROT_WRITE))
  131. return seg->offset;
  132. }
  133. return -1;
  134. }
  135. FIXTURE(enclave) {
  136. struct encl encl;
  137. struct sgx_enclave_run run;
  138. };
  139. static bool setup_test_encl(unsigned long heap_size, struct encl *encl,
  140. struct __test_metadata *_metadata)
  141. {
  142. Elf64_Sym *sgx_enter_enclave_sym = NULL;
  143. struct vdso_symtab symtab;
  144. struct encl_segment *seg;
  145. char maps_line[256];
  146. FILE *maps_file;
  147. unsigned int i;
  148. void *addr;
  149. if (!encl_load("test_encl.elf", encl, heap_size)) {
  150. encl_delete(encl);
  151. TH_LOG("Failed to load the test enclave.");
  152. return false;
  153. }
  154. if (!encl_measure(encl))
  155. goto err;
  156. if (!encl_build(encl))
  157. goto err;
  158. /*
  159. * An enclave consumer only must do this.
  160. */
  161. for (i = 0; i < encl->nr_segments; i++) {
  162. struct encl_segment *seg = &encl->segment_tbl[i];
  163. addr = mmap((void *)encl->encl_base + seg->offset, seg->size,
  164. seg->prot, MAP_SHARED | MAP_FIXED, encl->fd, 0);
  165. EXPECT_NE(addr, MAP_FAILED);
  166. if (addr == MAP_FAILED)
  167. goto err;
  168. }
  169. /* Get vDSO base address */
  170. addr = (void *)getauxval(AT_SYSINFO_EHDR);
  171. if (!addr)
  172. goto err;
  173. if (!vdso_get_symtab(addr, &symtab))
  174. goto err;
  175. sgx_enter_enclave_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave");
  176. if (!sgx_enter_enclave_sym)
  177. goto err;
  178. vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value;
  179. return true;
  180. err:
  181. for (i = 0; i < encl->nr_segments; i++) {
  182. seg = &encl->segment_tbl[i];
  183. TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot);
  184. }
  185. maps_file = fopen("/proc/self/maps", "r");
  186. if (maps_file != NULL) {
  187. while (fgets(maps_line, sizeof(maps_line), maps_file) != NULL) {
  188. maps_line[strlen(maps_line) - 1] = '\0';
  189. if (strstr(maps_line, "/dev/sgx_enclave"))
  190. TH_LOG("%s", maps_line);
  191. }
  192. fclose(maps_file);
  193. }
  194. TH_LOG("Failed to initialize the test enclave.");
  195. encl_delete(encl);
  196. return false;
  197. }
  198. FIXTURE_SETUP(enclave)
  199. {
  200. }
  201. FIXTURE_TEARDOWN(enclave)
  202. {
  203. encl_delete(&self->encl);
  204. }
  205. #define ENCL_CALL(op, run, clobbered) \
  206. ({ \
  207. int ret; \
  208. if ((clobbered)) \
  209. ret = vdso_sgx_enter_enclave((unsigned long)(op), 0, 0, \
  210. EENTER, 0, 0, (run)); \
  211. else \
  212. ret = sgx_enter_enclave((void *)(op), NULL, 0, EENTER, NULL, NULL, \
  213. (run)); \
  214. ret; \
  215. })
  216. #define EXPECT_EEXIT(run) \
  217. do { \
  218. EXPECT_EQ((run)->function, EEXIT); \
  219. if ((run)->function != EEXIT) \
  220. TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \
  221. (run)->exception_error_code, (run)->exception_addr); \
  222. } while (0)
  223. TEST_F(enclave, unclobbered_vdso)
  224. {
  225. struct encl_op_get_from_buf get_op;
  226. struct encl_op_put_to_buf put_op;
  227. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  228. memset(&self->run, 0, sizeof(self->run));
  229. self->run.tcs = self->encl.encl_base;
  230. put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
  231. put_op.value = MAGIC;
  232. EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
  233. EXPECT_EEXIT(&self->run);
  234. EXPECT_EQ(self->run.user_data, 0);
  235. get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
  236. get_op.value = 0;
  237. EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
  238. EXPECT_EQ(get_op.value, MAGIC);
  239. EXPECT_EEXIT(&self->run);
  240. EXPECT_EQ(self->run.user_data, 0);
  241. }
  242. /*
  243. * A section metric is concatenated in a way that @low bits 12-31 define the
  244. * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
  245. * metric.
  246. */
  247. static unsigned long sgx_calc_section_metric(unsigned int low,
  248. unsigned int high)
  249. {
  250. return (low & GENMASK_ULL(31, 12)) +
  251. ((high & GENMASK_ULL(19, 0)) << 32);
  252. }
  253. /*
  254. * Sum total available physical SGX memory across all EPC sections
  255. *
  256. * Return: total available physical SGX memory available on system
  257. */
  258. static unsigned long get_total_epc_mem(void)
  259. {
  260. unsigned int eax, ebx, ecx, edx;
  261. unsigned long total_size = 0;
  262. unsigned int type;
  263. int section = 0;
  264. while (true) {
  265. __cpuid_count(SGX_CPUID, section + SGX_CPUID_EPC, eax, ebx, ecx, edx);
  266. type = eax & SGX_CPUID_EPC_MASK;
  267. if (type == SGX_CPUID_EPC_INVALID)
  268. break;
  269. if (type != SGX_CPUID_EPC_SECTION)
  270. break;
  271. total_size += sgx_calc_section_metric(ecx, edx);
  272. section++;
  273. }
  274. return total_size;
  275. }
  276. TEST_F(enclave, unclobbered_vdso_oversubscribed)
  277. {
  278. struct encl_op_get_from_buf get_op;
  279. struct encl_op_put_to_buf put_op;
  280. unsigned long total_mem;
  281. total_mem = get_total_epc_mem();
  282. ASSERT_NE(total_mem, 0);
  283. ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
  284. memset(&self->run, 0, sizeof(self->run));
  285. self->run.tcs = self->encl.encl_base;
  286. put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
  287. put_op.value = MAGIC;
  288. EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
  289. EXPECT_EEXIT(&self->run);
  290. EXPECT_EQ(self->run.user_data, 0);
  291. get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
  292. get_op.value = 0;
  293. EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
  294. EXPECT_EQ(get_op.value, MAGIC);
  295. EXPECT_EEXIT(&self->run);
  296. EXPECT_EQ(self->run.user_data, 0);
  297. }
  298. TEST_F_TIMEOUT(enclave, unclobbered_vdso_oversubscribed_remove, 900)
  299. {
  300. struct sgx_enclave_remove_pages remove_ioc;
  301. struct sgx_enclave_modify_types modt_ioc;
  302. struct encl_op_get_from_buf get_op;
  303. struct encl_op_eaccept eaccept_op;
  304. struct encl_op_put_to_buf put_op;
  305. struct encl_segment *heap;
  306. unsigned long total_mem;
  307. int ret, errno_save;
  308. unsigned long addr;
  309. unsigned long i;
  310. /*
  311. * Create enclave with additional heap that is as big as all
  312. * available physical SGX memory.
  313. */
  314. total_mem = get_total_epc_mem();
  315. ASSERT_NE(total_mem, 0);
  316. TH_LOG("Creating an enclave with %lu bytes heap may take a while ...",
  317. total_mem);
  318. ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
  319. /*
  320. * Hardware (SGX2) and kernel support is needed for this test. Start
  321. * with check that test has a chance of succeeding.
  322. */
  323. memset(&modt_ioc, 0, sizeof(modt_ioc));
  324. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
  325. if (ret == -1) {
  326. if (errno == ENOTTY)
  327. SKIP(return,
  328. "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
  329. else if (errno == ENODEV)
  330. SKIP(return, "System does not support SGX2");
  331. }
  332. /*
  333. * Invalid parameters were provided during sanity check,
  334. * expect command to fail.
  335. */
  336. EXPECT_EQ(ret, -1);
  337. /* SGX2 is supported by kernel and hardware, test can proceed. */
  338. memset(&self->run, 0, sizeof(self->run));
  339. self->run.tcs = self->encl.encl_base;
  340. heap = &self->encl.segment_tbl[self->encl.nr_segments - 1];
  341. put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
  342. put_op.value = MAGIC;
  343. EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
  344. EXPECT_EEXIT(&self->run);
  345. EXPECT_EQ(self->run.user_data, 0);
  346. get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
  347. get_op.value = 0;
  348. EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
  349. EXPECT_EQ(get_op.value, MAGIC);
  350. EXPECT_EEXIT(&self->run);
  351. EXPECT_EQ(self->run.user_data, 0);
  352. /* Trim entire heap. */
  353. memset(&modt_ioc, 0, sizeof(modt_ioc));
  354. modt_ioc.offset = heap->offset;
  355. modt_ioc.length = heap->size;
  356. modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
  357. TH_LOG("Changing type of %zd bytes to trimmed may take a while ...",
  358. heap->size);
  359. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
  360. errno_save = ret == -1 ? errno : 0;
  361. EXPECT_EQ(ret, 0);
  362. EXPECT_EQ(errno_save, 0);
  363. EXPECT_EQ(modt_ioc.result, 0);
  364. EXPECT_EQ(modt_ioc.count, heap->size);
  365. /* EACCEPT all removed pages. */
  366. addr = self->encl.encl_base + heap->offset;
  367. eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
  368. eaccept_op.header.type = ENCL_OP_EACCEPT;
  369. TH_LOG("Entering enclave to run EACCEPT for each page of %zd bytes may take a while ...",
  370. heap->size);
  371. for (i = 0; i < heap->size; i += 4096) {
  372. eaccept_op.epc_addr = addr + i;
  373. eaccept_op.ret = 0;
  374. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  375. EXPECT_EQ(self->run.exception_vector, 0);
  376. EXPECT_EQ(self->run.exception_error_code, 0);
  377. EXPECT_EQ(self->run.exception_addr, 0);
  378. ASSERT_EQ(eaccept_op.ret, 0);
  379. ASSERT_EQ(self->run.function, EEXIT);
  380. }
  381. /* Complete page removal. */
  382. memset(&remove_ioc, 0, sizeof(remove_ioc));
  383. remove_ioc.offset = heap->offset;
  384. remove_ioc.length = heap->size;
  385. TH_LOG("Removing %zd bytes from enclave may take a while ...",
  386. heap->size);
  387. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
  388. errno_save = ret == -1 ? errno : 0;
  389. EXPECT_EQ(ret, 0);
  390. EXPECT_EQ(errno_save, 0);
  391. EXPECT_EQ(remove_ioc.count, heap->size);
  392. }
  393. TEST_F(enclave, clobbered_vdso)
  394. {
  395. struct encl_op_get_from_buf get_op;
  396. struct encl_op_put_to_buf put_op;
  397. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  398. memset(&self->run, 0, sizeof(self->run));
  399. self->run.tcs = self->encl.encl_base;
  400. put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
  401. put_op.value = MAGIC;
  402. EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
  403. EXPECT_EEXIT(&self->run);
  404. EXPECT_EQ(self->run.user_data, 0);
  405. get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
  406. get_op.value = 0;
  407. EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
  408. EXPECT_EQ(get_op.value, MAGIC);
  409. EXPECT_EEXIT(&self->run);
  410. EXPECT_EQ(self->run.user_data, 0);
  411. }
  412. static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9,
  413. struct sgx_enclave_run *run)
  414. {
  415. run->user_data = 0;
  416. return 0;
  417. }
  418. TEST_F(enclave, clobbered_vdso_and_user_function)
  419. {
  420. struct encl_op_get_from_buf get_op;
  421. struct encl_op_put_to_buf put_op;
  422. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  423. memset(&self->run, 0, sizeof(self->run));
  424. self->run.tcs = self->encl.encl_base;
  425. self->run.user_handler = (__u64)test_handler;
  426. self->run.user_data = 0xdeadbeef;
  427. put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
  428. put_op.value = MAGIC;
  429. EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
  430. EXPECT_EEXIT(&self->run);
  431. EXPECT_EQ(self->run.user_data, 0);
  432. get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
  433. get_op.value = 0;
  434. EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
  435. EXPECT_EQ(get_op.value, MAGIC);
  436. EXPECT_EEXIT(&self->run);
  437. EXPECT_EQ(self->run.user_data, 0);
  438. }
  439. /*
  440. * Sanity check that it is possible to enter either of the two hardcoded TCS
  441. */
  442. TEST_F(enclave, tcs_entry)
  443. {
  444. struct encl_op_header op;
  445. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  446. memset(&self->run, 0, sizeof(self->run));
  447. self->run.tcs = self->encl.encl_base;
  448. op.type = ENCL_OP_NOP;
  449. EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
  450. EXPECT_EEXIT(&self->run);
  451. EXPECT_EQ(self->run.exception_vector, 0);
  452. EXPECT_EQ(self->run.exception_error_code, 0);
  453. EXPECT_EQ(self->run.exception_addr, 0);
  454. /* Move to the next TCS. */
  455. self->run.tcs = self->encl.encl_base + PAGE_SIZE;
  456. EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
  457. EXPECT_EEXIT(&self->run);
  458. EXPECT_EQ(self->run.exception_vector, 0);
  459. EXPECT_EQ(self->run.exception_error_code, 0);
  460. EXPECT_EQ(self->run.exception_addr, 0);
  461. }
  462. /*
  463. * Second page of .data segment is used to test changing PTE permissions.
  464. * This spans the local encl_buffer within the test enclave.
  465. *
  466. * 1) Start with a sanity check: a value is written to the target page within
  467. * the enclave and read back to ensure target page can be written to.
  468. * 2) Change PTE permissions (RW -> RO) of target page within enclave.
  469. * 3) Repeat (1) - this time expecting a regular #PF communicated via the
  470. * vDSO.
  471. * 4) Change PTE permissions of target page within enclave back to be RW.
  472. * 5) Repeat (1) by resuming enclave, now expected to be possible to write to
  473. * and read from target page within enclave.
  474. */
  475. TEST_F(enclave, pte_permissions)
  476. {
  477. struct encl_op_get_from_addr get_addr_op;
  478. struct encl_op_put_to_addr put_addr_op;
  479. unsigned long data_start;
  480. int ret;
  481. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  482. memset(&self->run, 0, sizeof(self->run));
  483. self->run.tcs = self->encl.encl_base;
  484. data_start = self->encl.encl_base +
  485. encl_get_data_offset(&self->encl) +
  486. PAGE_SIZE;
  487. /*
  488. * Sanity check to ensure it is possible to write to page that will
  489. * have its permissions manipulated.
  490. */
  491. /* Write MAGIC to page */
  492. put_addr_op.value = MAGIC;
  493. put_addr_op.addr = data_start;
  494. put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
  495. EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
  496. EXPECT_EEXIT(&self->run);
  497. EXPECT_EQ(self->run.exception_vector, 0);
  498. EXPECT_EQ(self->run.exception_error_code, 0);
  499. EXPECT_EQ(self->run.exception_addr, 0);
  500. /*
  501. * Read memory that was just written to, confirming that it is the
  502. * value previously written (MAGIC).
  503. */
  504. get_addr_op.value = 0;
  505. get_addr_op.addr = data_start;
  506. get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
  507. EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
  508. EXPECT_EQ(get_addr_op.value, MAGIC);
  509. EXPECT_EEXIT(&self->run);
  510. EXPECT_EQ(self->run.exception_vector, 0);
  511. EXPECT_EQ(self->run.exception_error_code, 0);
  512. EXPECT_EQ(self->run.exception_addr, 0);
  513. /* Change PTE permissions of target page within the enclave */
  514. ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ);
  515. if (ret)
  516. perror("mprotect");
  517. /*
  518. * PTE permissions of target page changed to read-only, EPCM
  519. * permissions unchanged (EPCM permissions are RW), attempt to
  520. * write to the page, expecting a regular #PF.
  521. */
  522. put_addr_op.value = MAGIC2;
  523. EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
  524. EXPECT_EQ(self->run.exception_vector, 14);
  525. EXPECT_EQ(self->run.exception_error_code, 0x7);
  526. EXPECT_EQ(self->run.exception_addr, data_start);
  527. self->run.exception_vector = 0;
  528. self->run.exception_error_code = 0;
  529. self->run.exception_addr = 0;
  530. /*
  531. * Change PTE permissions back to enable enclave to write to the
  532. * target page and resume enclave - do not expect any exceptions this
  533. * time.
  534. */
  535. ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE);
  536. if (ret)
  537. perror("mprotect");
  538. EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0,
  539. 0, ERESUME, 0, 0, &self->run),
  540. 0);
  541. EXPECT_EEXIT(&self->run);
  542. EXPECT_EQ(self->run.exception_vector, 0);
  543. EXPECT_EQ(self->run.exception_error_code, 0);
  544. EXPECT_EQ(self->run.exception_addr, 0);
  545. get_addr_op.value = 0;
  546. EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
  547. EXPECT_EQ(get_addr_op.value, MAGIC2);
  548. EXPECT_EEXIT(&self->run);
  549. EXPECT_EQ(self->run.exception_vector, 0);
  550. EXPECT_EQ(self->run.exception_error_code, 0);
  551. EXPECT_EQ(self->run.exception_addr, 0);
  552. }
  553. /*
  554. * Modifying permissions of TCS page should not be possible.
  555. */
  556. TEST_F(enclave, tcs_permissions)
  557. {
  558. struct sgx_enclave_restrict_permissions ioc;
  559. int ret, errno_save;
  560. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  561. memset(&self->run, 0, sizeof(self->run));
  562. self->run.tcs = self->encl.encl_base;
  563. memset(&ioc, 0, sizeof(ioc));
  564. /*
  565. * Ensure kernel supports needed ioctl() and system supports needed
  566. * commands.
  567. */
  568. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
  569. errno_save = ret == -1 ? errno : 0;
  570. /*
  571. * Invalid parameters were provided during sanity check,
  572. * expect command to fail.
  573. */
  574. ASSERT_EQ(ret, -1);
  575. /* ret == -1 */
  576. if (errno_save == ENOTTY)
  577. SKIP(return,
  578. "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
  579. else if (errno_save == ENODEV)
  580. SKIP(return, "System does not support SGX2");
  581. /*
  582. * Attempt to make TCS page read-only. This is not allowed and
  583. * should be prevented by the kernel.
  584. */
  585. ioc.offset = encl_get_tcs_offset(&self->encl);
  586. ioc.length = PAGE_SIZE;
  587. ioc.permissions = SGX_SECINFO_R;
  588. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
  589. errno_save = ret == -1 ? errno : 0;
  590. EXPECT_EQ(ret, -1);
  591. EXPECT_EQ(errno_save, EINVAL);
  592. EXPECT_EQ(ioc.result, 0);
  593. EXPECT_EQ(ioc.count, 0);
  594. }
  595. /*
  596. * Enclave page permission test.
  597. *
  598. * Modify and restore enclave page's EPCM (enclave) permissions from
  599. * outside enclave (ENCLS[EMODPR] via kernel) as well as from within
  600. * enclave (via ENCLU[EMODPE]). Check for page fault if
  601. * VMA allows access but EPCM permissions do not.
  602. */
  603. TEST_F(enclave, epcm_permissions)
  604. {
  605. struct sgx_enclave_restrict_permissions restrict_ioc;
  606. struct encl_op_get_from_addr get_addr_op;
  607. struct encl_op_put_to_addr put_addr_op;
  608. struct encl_op_eaccept eaccept_op;
  609. struct encl_op_emodpe emodpe_op;
  610. unsigned long data_start;
  611. int ret, errno_save;
  612. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  613. memset(&self->run, 0, sizeof(self->run));
  614. self->run.tcs = self->encl.encl_base;
  615. /*
  616. * Ensure kernel supports needed ioctl() and system supports needed
  617. * commands.
  618. */
  619. memset(&restrict_ioc, 0, sizeof(restrict_ioc));
  620. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
  621. &restrict_ioc);
  622. errno_save = ret == -1 ? errno : 0;
  623. /*
  624. * Invalid parameters were provided during sanity check,
  625. * expect command to fail.
  626. */
  627. ASSERT_EQ(ret, -1);
  628. /* ret == -1 */
  629. if (errno_save == ENOTTY)
  630. SKIP(return,
  631. "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
  632. else if (errno_save == ENODEV)
  633. SKIP(return, "System does not support SGX2");
  634. /*
  635. * Page that will have its permissions changed is the second data
  636. * page in the .data segment. This forms part of the local encl_buffer
  637. * within the enclave.
  638. *
  639. * At start of test @data_start should have EPCM as well as PTE and
  640. * VMA permissions of RW.
  641. */
  642. data_start = self->encl.encl_base +
  643. encl_get_data_offset(&self->encl) + PAGE_SIZE;
  644. /*
  645. * Sanity check that page at @data_start is writable before making
  646. * any changes to page permissions.
  647. *
  648. * Start by writing MAGIC to test page.
  649. */
  650. put_addr_op.value = MAGIC;
  651. put_addr_op.addr = data_start;
  652. put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
  653. EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
  654. EXPECT_EEXIT(&self->run);
  655. EXPECT_EQ(self->run.exception_vector, 0);
  656. EXPECT_EQ(self->run.exception_error_code, 0);
  657. EXPECT_EQ(self->run.exception_addr, 0);
  658. /*
  659. * Read memory that was just written to, confirming that
  660. * page is writable.
  661. */
  662. get_addr_op.value = 0;
  663. get_addr_op.addr = data_start;
  664. get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
  665. EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
  666. EXPECT_EQ(get_addr_op.value, MAGIC);
  667. EXPECT_EEXIT(&self->run);
  668. EXPECT_EQ(self->run.exception_vector, 0);
  669. EXPECT_EQ(self->run.exception_error_code, 0);
  670. EXPECT_EQ(self->run.exception_addr, 0);
  671. /*
  672. * Change EPCM permissions to read-only. Kernel still considers
  673. * the page writable.
  674. */
  675. memset(&restrict_ioc, 0, sizeof(restrict_ioc));
  676. restrict_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
  677. restrict_ioc.length = PAGE_SIZE;
  678. restrict_ioc.permissions = SGX_SECINFO_R;
  679. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
  680. &restrict_ioc);
  681. errno_save = ret == -1 ? errno : 0;
  682. EXPECT_EQ(ret, 0);
  683. EXPECT_EQ(errno_save, 0);
  684. EXPECT_EQ(restrict_ioc.result, 0);
  685. EXPECT_EQ(restrict_ioc.count, 4096);
  686. /*
  687. * EPCM permissions changed from kernel, need to EACCEPT from enclave.
  688. */
  689. eaccept_op.epc_addr = data_start;
  690. eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_REG | SGX_SECINFO_PR;
  691. eaccept_op.ret = 0;
  692. eaccept_op.header.type = ENCL_OP_EACCEPT;
  693. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  694. EXPECT_EEXIT(&self->run);
  695. EXPECT_EQ(self->run.exception_vector, 0);
  696. EXPECT_EQ(self->run.exception_error_code, 0);
  697. EXPECT_EQ(self->run.exception_addr, 0);
  698. EXPECT_EQ(eaccept_op.ret, 0);
  699. /*
  700. * EPCM permissions of page is now read-only, expect #PF
  701. * on EPCM when attempting to write to page from within enclave.
  702. */
  703. put_addr_op.value = MAGIC2;
  704. EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
  705. EXPECT_EQ(self->run.function, ERESUME);
  706. EXPECT_EQ(self->run.exception_vector, 14);
  707. EXPECT_EQ(self->run.exception_error_code, 0x8007);
  708. EXPECT_EQ(self->run.exception_addr, data_start);
  709. self->run.exception_vector = 0;
  710. self->run.exception_error_code = 0;
  711. self->run.exception_addr = 0;
  712. /*
  713. * Received AEX but cannot return to enclave at same entrypoint,
  714. * need different TCS from where EPCM permission can be made writable
  715. * again.
  716. */
  717. self->run.tcs = self->encl.encl_base + PAGE_SIZE;
  718. /*
  719. * Enter enclave at new TCS to change EPCM permissions to be
  720. * writable again and thus fix the page fault that triggered the
  721. * AEX.
  722. */
  723. emodpe_op.epc_addr = data_start;
  724. emodpe_op.flags = SGX_SECINFO_R | SGX_SECINFO_W;
  725. emodpe_op.header.type = ENCL_OP_EMODPE;
  726. EXPECT_EQ(ENCL_CALL(&emodpe_op, &self->run, true), 0);
  727. EXPECT_EEXIT(&self->run);
  728. EXPECT_EQ(self->run.exception_vector, 0);
  729. EXPECT_EQ(self->run.exception_error_code, 0);
  730. EXPECT_EQ(self->run.exception_addr, 0);
  731. /*
  732. * Attempt to return to main TCS to resume execution at faulting
  733. * instruction, PTE should continue to allow writing to the page.
  734. */
  735. self->run.tcs = self->encl.encl_base;
  736. /*
  737. * Wrong page permissions that caused original fault has
  738. * now been fixed via EPCM permissions.
  739. * Resume execution in main TCS to re-attempt the memory access.
  740. */
  741. self->run.tcs = self->encl.encl_base;
  742. EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
  743. ERESUME, 0, 0,
  744. &self->run),
  745. 0);
  746. EXPECT_EEXIT(&self->run);
  747. EXPECT_EQ(self->run.exception_vector, 0);
  748. EXPECT_EQ(self->run.exception_error_code, 0);
  749. EXPECT_EQ(self->run.exception_addr, 0);
  750. get_addr_op.value = 0;
  751. EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
  752. EXPECT_EQ(get_addr_op.value, MAGIC2);
  753. EXPECT_EEXIT(&self->run);
  754. EXPECT_EQ(self->run.user_data, 0);
  755. EXPECT_EQ(self->run.exception_vector, 0);
  756. EXPECT_EQ(self->run.exception_error_code, 0);
  757. EXPECT_EQ(self->run.exception_addr, 0);
  758. }
  759. /*
  760. * Test the addition of pages to an initialized enclave via writing to
  761. * a page belonging to the enclave's address space but was not added
  762. * during enclave creation.
  763. */
  764. TEST_F(enclave, augment)
  765. {
  766. struct encl_op_get_from_addr get_addr_op;
  767. struct encl_op_put_to_addr put_addr_op;
  768. struct encl_op_eaccept eaccept_op;
  769. size_t total_size = 0;
  770. void *addr;
  771. int i;
  772. if (!sgx2_supported())
  773. SKIP(return, "SGX2 not supported");
  774. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  775. memset(&self->run, 0, sizeof(self->run));
  776. self->run.tcs = self->encl.encl_base;
  777. for (i = 0; i < self->encl.nr_segments; i++) {
  778. struct encl_segment *seg = &self->encl.segment_tbl[i];
  779. total_size += seg->size;
  780. }
  781. /*
  782. * Actual enclave size is expected to be larger than the loaded
  783. * test enclave since enclave size must be a power of 2 in bytes
  784. * and test_encl does not consume it all.
  785. */
  786. EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
  787. /*
  788. * Create memory mapping for the page that will be added. New
  789. * memory mapping is for one page right after all existing
  790. * mappings.
  791. * Kernel will allow new mapping using any permissions if it
  792. * falls into the enclave's address range but not backed
  793. * by existing enclave pages.
  794. */
  795. addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
  796. PROT_READ | PROT_WRITE | PROT_EXEC,
  797. MAP_SHARED | MAP_FIXED, self->encl.fd, 0);
  798. EXPECT_NE(addr, MAP_FAILED);
  799. self->run.exception_vector = 0;
  800. self->run.exception_error_code = 0;
  801. self->run.exception_addr = 0;
  802. /*
  803. * Attempt to write to the new page from within enclave.
  804. * Expected to fail since page is not (yet) part of the enclave.
  805. * The first #PF will trigger the addition of the page to the
  806. * enclave, but since the new page needs an EACCEPT from within the
  807. * enclave before it can be used it would not be possible
  808. * to successfully return to the failing instruction. This is the
  809. * cause of the second #PF captured here having the SGX bit set,
  810. * it is from hardware preventing the page from being used.
  811. */
  812. put_addr_op.value = MAGIC;
  813. put_addr_op.addr = (unsigned long)addr;
  814. put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
  815. EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
  816. EXPECT_EQ(self->run.function, ERESUME);
  817. EXPECT_EQ(self->run.exception_vector, 14);
  818. EXPECT_EQ(self->run.exception_addr, (unsigned long)addr);
  819. if (self->run.exception_error_code == 0x6) {
  820. munmap(addr, PAGE_SIZE);
  821. SKIP(return, "Kernel does not support adding pages to initialized enclave");
  822. }
  823. EXPECT_EQ(self->run.exception_error_code, 0x8007);
  824. self->run.exception_vector = 0;
  825. self->run.exception_error_code = 0;
  826. self->run.exception_addr = 0;
  827. /* Handle AEX by running EACCEPT from new entry point. */
  828. self->run.tcs = self->encl.encl_base + PAGE_SIZE;
  829. eaccept_op.epc_addr = self->encl.encl_base + total_size;
  830. eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
  831. eaccept_op.ret = 0;
  832. eaccept_op.header.type = ENCL_OP_EACCEPT;
  833. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  834. EXPECT_EEXIT(&self->run);
  835. EXPECT_EQ(self->run.exception_vector, 0);
  836. EXPECT_EQ(self->run.exception_error_code, 0);
  837. EXPECT_EQ(self->run.exception_addr, 0);
  838. EXPECT_EQ(eaccept_op.ret, 0);
  839. /* Can now return to main TCS to resume execution. */
  840. self->run.tcs = self->encl.encl_base;
  841. EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
  842. ERESUME, 0, 0,
  843. &self->run),
  844. 0);
  845. EXPECT_EEXIT(&self->run);
  846. EXPECT_EQ(self->run.exception_vector, 0);
  847. EXPECT_EQ(self->run.exception_error_code, 0);
  848. EXPECT_EQ(self->run.exception_addr, 0);
  849. /*
  850. * Read memory from newly added page that was just written to,
  851. * confirming that data previously written (MAGIC) is present.
  852. */
  853. get_addr_op.value = 0;
  854. get_addr_op.addr = (unsigned long)addr;
  855. get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
  856. EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
  857. EXPECT_EQ(get_addr_op.value, MAGIC);
  858. EXPECT_EEXIT(&self->run);
  859. EXPECT_EQ(self->run.exception_vector, 0);
  860. EXPECT_EQ(self->run.exception_error_code, 0);
  861. EXPECT_EQ(self->run.exception_addr, 0);
  862. munmap(addr, PAGE_SIZE);
  863. }
  864. /*
  865. * Test for the addition of pages to an initialized enclave via a
  866. * pre-emptive run of EACCEPT on page to be added.
  867. */
  868. TEST_F(enclave, augment_via_eaccept)
  869. {
  870. struct encl_op_get_from_addr get_addr_op;
  871. struct encl_op_put_to_addr put_addr_op;
  872. struct encl_op_eaccept eaccept_op;
  873. size_t total_size = 0;
  874. void *addr;
  875. int i;
  876. if (!sgx2_supported())
  877. SKIP(return, "SGX2 not supported");
  878. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  879. memset(&self->run, 0, sizeof(self->run));
  880. self->run.tcs = self->encl.encl_base;
  881. for (i = 0; i < self->encl.nr_segments; i++) {
  882. struct encl_segment *seg = &self->encl.segment_tbl[i];
  883. total_size += seg->size;
  884. }
  885. /*
  886. * Actual enclave size is expected to be larger than the loaded
  887. * test enclave since enclave size must be a power of 2 in bytes while
  888. * test_encl does not consume it all.
  889. */
  890. EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
  891. /*
  892. * mmap() a page at end of existing enclave to be used for dynamic
  893. * EPC page.
  894. *
  895. * Kernel will allow new mapping using any permissions if it
  896. * falls into the enclave's address range but not backed
  897. * by existing enclave pages.
  898. */
  899. addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
  900. PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED,
  901. self->encl.fd, 0);
  902. EXPECT_NE(addr, MAP_FAILED);
  903. self->run.exception_vector = 0;
  904. self->run.exception_error_code = 0;
  905. self->run.exception_addr = 0;
  906. /*
  907. * Run EACCEPT on new page to trigger the #PF->EAUG->EACCEPT(again
  908. * without a #PF). All should be transparent to userspace.
  909. */
  910. eaccept_op.epc_addr = self->encl.encl_base + total_size;
  911. eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
  912. eaccept_op.ret = 0;
  913. eaccept_op.header.type = ENCL_OP_EACCEPT;
  914. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  915. if (self->run.exception_vector == 14 &&
  916. self->run.exception_error_code == 4 &&
  917. self->run.exception_addr == self->encl.encl_base + total_size) {
  918. munmap(addr, PAGE_SIZE);
  919. SKIP(return, "Kernel does not support adding pages to initialized enclave");
  920. }
  921. EXPECT_EEXIT(&self->run);
  922. EXPECT_EQ(self->run.exception_vector, 0);
  923. EXPECT_EQ(self->run.exception_error_code, 0);
  924. EXPECT_EQ(self->run.exception_addr, 0);
  925. EXPECT_EQ(eaccept_op.ret, 0);
  926. /*
  927. * New page should be accessible from within enclave - attempt to
  928. * write to it.
  929. */
  930. put_addr_op.value = MAGIC;
  931. put_addr_op.addr = (unsigned long)addr;
  932. put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
  933. EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
  934. EXPECT_EEXIT(&self->run);
  935. EXPECT_EQ(self->run.exception_vector, 0);
  936. EXPECT_EQ(self->run.exception_error_code, 0);
  937. EXPECT_EQ(self->run.exception_addr, 0);
  938. /*
  939. * Read memory from newly added page that was just written to,
  940. * confirming that data previously written (MAGIC) is present.
  941. */
  942. get_addr_op.value = 0;
  943. get_addr_op.addr = (unsigned long)addr;
  944. get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
  945. EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
  946. EXPECT_EQ(get_addr_op.value, MAGIC);
  947. EXPECT_EEXIT(&self->run);
  948. EXPECT_EQ(self->run.exception_vector, 0);
  949. EXPECT_EQ(self->run.exception_error_code, 0);
  950. EXPECT_EQ(self->run.exception_addr, 0);
  951. munmap(addr, PAGE_SIZE);
  952. }
  953. /*
  954. * SGX2 page type modification test in two phases:
  955. * Phase 1:
  956. * Create a new TCS, consisting out of three new pages (stack page with regular
  957. * page type, SSA page with regular page type, and TCS page with TCS page
  958. * type) in an initialized enclave and run a simple workload within it.
  959. * Phase 2:
  960. * Remove the three pages added in phase 1, add a new regular page at the
  961. * same address that previously hosted the TCS page and verify that it can
  962. * be modified.
  963. */
  964. TEST_F(enclave, tcs_create)
  965. {
  966. struct encl_op_init_tcs_page init_tcs_page_op;
  967. struct sgx_enclave_remove_pages remove_ioc;
  968. struct encl_op_get_from_addr get_addr_op;
  969. struct sgx_enclave_modify_types modt_ioc;
  970. struct encl_op_put_to_addr put_addr_op;
  971. struct encl_op_get_from_buf get_buf_op;
  972. struct encl_op_put_to_buf put_buf_op;
  973. void *addr, *tcs, *stack_end, *ssa;
  974. struct encl_op_eaccept eaccept_op;
  975. size_t total_size = 0;
  976. uint64_t val_64;
  977. int errno_save;
  978. int ret, i;
  979. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl,
  980. _metadata));
  981. memset(&self->run, 0, sizeof(self->run));
  982. self->run.tcs = self->encl.encl_base;
  983. /*
  984. * Hardware (SGX2) and kernel support is needed for this test. Start
  985. * with check that test has a chance of succeeding.
  986. */
  987. memset(&modt_ioc, 0, sizeof(modt_ioc));
  988. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
  989. if (ret == -1) {
  990. if (errno == ENOTTY)
  991. SKIP(return,
  992. "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
  993. else if (errno == ENODEV)
  994. SKIP(return, "System does not support SGX2");
  995. }
  996. /*
  997. * Invalid parameters were provided during sanity check,
  998. * expect command to fail.
  999. */
  1000. EXPECT_EQ(ret, -1);
  1001. /*
  1002. * Add three regular pages via EAUG: one will be the TCS stack, one
  1003. * will be the TCS SSA, and one will be the new TCS. The stack and
  1004. * SSA will remain as regular pages, the TCS page will need its
  1005. * type changed after populated with needed data.
  1006. */
  1007. for (i = 0; i < self->encl.nr_segments; i++) {
  1008. struct encl_segment *seg = &self->encl.segment_tbl[i];
  1009. total_size += seg->size;
  1010. }
  1011. /*
  1012. * Actual enclave size is expected to be larger than the loaded
  1013. * test enclave since enclave size must be a power of 2 in bytes while
  1014. * test_encl does not consume it all.
  1015. */
  1016. EXPECT_LT(total_size + 3 * PAGE_SIZE, self->encl.encl_size);
  1017. /*
  1018. * mmap() three pages at end of existing enclave to be used for the
  1019. * three new pages.
  1020. */
  1021. addr = mmap((void *)self->encl.encl_base + total_size, 3 * PAGE_SIZE,
  1022. PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED,
  1023. self->encl.fd, 0);
  1024. EXPECT_NE(addr, MAP_FAILED);
  1025. self->run.exception_vector = 0;
  1026. self->run.exception_error_code = 0;
  1027. self->run.exception_addr = 0;
  1028. stack_end = (void *)self->encl.encl_base + total_size;
  1029. tcs = (void *)self->encl.encl_base + total_size + PAGE_SIZE;
  1030. ssa = (void *)self->encl.encl_base + total_size + 2 * PAGE_SIZE;
  1031. /*
  1032. * Run EACCEPT on each new page to trigger the
  1033. * EACCEPT->(#PF)->EAUG->EACCEPT(again without a #PF) flow.
  1034. */
  1035. eaccept_op.epc_addr = (unsigned long)stack_end;
  1036. eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
  1037. eaccept_op.ret = 0;
  1038. eaccept_op.header.type = ENCL_OP_EACCEPT;
  1039. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  1040. if (self->run.exception_vector == 14 &&
  1041. self->run.exception_error_code == 4 &&
  1042. self->run.exception_addr == (unsigned long)stack_end) {
  1043. munmap(addr, 3 * PAGE_SIZE);
  1044. SKIP(return, "Kernel does not support adding pages to initialized enclave");
  1045. }
  1046. EXPECT_EEXIT(&self->run);
  1047. EXPECT_EQ(self->run.exception_vector, 0);
  1048. EXPECT_EQ(self->run.exception_error_code, 0);
  1049. EXPECT_EQ(self->run.exception_addr, 0);
  1050. EXPECT_EQ(eaccept_op.ret, 0);
  1051. eaccept_op.epc_addr = (unsigned long)ssa;
  1052. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  1053. EXPECT_EEXIT(&self->run);
  1054. EXPECT_EQ(self->run.exception_vector, 0);
  1055. EXPECT_EQ(self->run.exception_error_code, 0);
  1056. EXPECT_EQ(self->run.exception_addr, 0);
  1057. EXPECT_EQ(eaccept_op.ret, 0);
  1058. eaccept_op.epc_addr = (unsigned long)tcs;
  1059. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  1060. EXPECT_EEXIT(&self->run);
  1061. EXPECT_EQ(self->run.exception_vector, 0);
  1062. EXPECT_EQ(self->run.exception_error_code, 0);
  1063. EXPECT_EQ(self->run.exception_addr, 0);
  1064. EXPECT_EQ(eaccept_op.ret, 0);
  1065. /*
  1066. * Three new pages added to enclave. Now populate the TCS page with
  1067. * needed data. This should be done from within enclave. Provide
  1068. * the function that will do the actual data population with needed
  1069. * data.
  1070. */
  1071. /*
  1072. * New TCS will use the "encl_dyn_entry" entrypoint that expects
  1073. * stack to begin in page before TCS page.
  1074. */
  1075. val_64 = encl_get_entry(&self->encl, "encl_dyn_entry");
  1076. EXPECT_NE(val_64, 0);
  1077. init_tcs_page_op.tcs_page = (unsigned long)tcs;
  1078. init_tcs_page_op.ssa = (unsigned long)total_size + 2 * PAGE_SIZE;
  1079. init_tcs_page_op.entry = val_64;
  1080. init_tcs_page_op.header.type = ENCL_OP_INIT_TCS_PAGE;
  1081. EXPECT_EQ(ENCL_CALL(&init_tcs_page_op, &self->run, true), 0);
  1082. EXPECT_EEXIT(&self->run);
  1083. EXPECT_EQ(self->run.exception_vector, 0);
  1084. EXPECT_EQ(self->run.exception_error_code, 0);
  1085. EXPECT_EQ(self->run.exception_addr, 0);
  1086. /* Change TCS page type to TCS. */
  1087. memset(&modt_ioc, 0, sizeof(modt_ioc));
  1088. modt_ioc.offset = total_size + PAGE_SIZE;
  1089. modt_ioc.length = PAGE_SIZE;
  1090. modt_ioc.page_type = SGX_PAGE_TYPE_TCS;
  1091. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
  1092. errno_save = ret == -1 ? errno : 0;
  1093. EXPECT_EQ(ret, 0);
  1094. EXPECT_EQ(errno_save, 0);
  1095. EXPECT_EQ(modt_ioc.result, 0);
  1096. EXPECT_EQ(modt_ioc.count, 4096);
  1097. /* EACCEPT new TCS page from enclave. */
  1098. eaccept_op.epc_addr = (unsigned long)tcs;
  1099. eaccept_op.flags = SGX_SECINFO_TCS | SGX_SECINFO_MODIFIED;
  1100. eaccept_op.ret = 0;
  1101. eaccept_op.header.type = ENCL_OP_EACCEPT;
  1102. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  1103. EXPECT_EEXIT(&self->run);
  1104. EXPECT_EQ(self->run.exception_vector, 0);
  1105. EXPECT_EQ(self->run.exception_error_code, 0);
  1106. EXPECT_EQ(self->run.exception_addr, 0);
  1107. EXPECT_EQ(eaccept_op.ret, 0);
  1108. /* Run workload from new TCS. */
  1109. self->run.tcs = (unsigned long)tcs;
  1110. /*
  1111. * Simple workload to write to data buffer and read value back.
  1112. */
  1113. put_buf_op.header.type = ENCL_OP_PUT_TO_BUFFER;
  1114. put_buf_op.value = MAGIC;
  1115. EXPECT_EQ(ENCL_CALL(&put_buf_op, &self->run, true), 0);
  1116. EXPECT_EEXIT(&self->run);
  1117. EXPECT_EQ(self->run.exception_vector, 0);
  1118. EXPECT_EQ(self->run.exception_error_code, 0);
  1119. EXPECT_EQ(self->run.exception_addr, 0);
  1120. get_buf_op.header.type = ENCL_OP_GET_FROM_BUFFER;
  1121. get_buf_op.value = 0;
  1122. EXPECT_EQ(ENCL_CALL(&get_buf_op, &self->run, true), 0);
  1123. EXPECT_EQ(get_buf_op.value, MAGIC);
  1124. EXPECT_EEXIT(&self->run);
  1125. EXPECT_EQ(self->run.exception_vector, 0);
  1126. EXPECT_EQ(self->run.exception_error_code, 0);
  1127. EXPECT_EQ(self->run.exception_addr, 0);
  1128. /*
  1129. * Phase 2 of test:
  1130. * Remove pages associated with new TCS, create a regular page
  1131. * where TCS page used to be and verify it can be used as a regular
  1132. * page.
  1133. */
  1134. /* Start page removal by requesting change of page type to PT_TRIM. */
  1135. memset(&modt_ioc, 0, sizeof(modt_ioc));
  1136. modt_ioc.offset = total_size;
  1137. modt_ioc.length = 3 * PAGE_SIZE;
  1138. modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
  1139. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
  1140. errno_save = ret == -1 ? errno : 0;
  1141. EXPECT_EQ(ret, 0);
  1142. EXPECT_EQ(errno_save, 0);
  1143. EXPECT_EQ(modt_ioc.result, 0);
  1144. EXPECT_EQ(modt_ioc.count, 3 * PAGE_SIZE);
  1145. /*
  1146. * Enter enclave via TCS #1 and approve page removal by sending
  1147. * EACCEPT for each of three removed pages.
  1148. */
  1149. self->run.tcs = self->encl.encl_base;
  1150. eaccept_op.epc_addr = (unsigned long)stack_end;
  1151. eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
  1152. eaccept_op.ret = 0;
  1153. eaccept_op.header.type = ENCL_OP_EACCEPT;
  1154. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  1155. EXPECT_EEXIT(&self->run);
  1156. EXPECT_EQ(self->run.exception_vector, 0);
  1157. EXPECT_EQ(self->run.exception_error_code, 0);
  1158. EXPECT_EQ(self->run.exception_addr, 0);
  1159. EXPECT_EQ(eaccept_op.ret, 0);
  1160. eaccept_op.epc_addr = (unsigned long)tcs;
  1161. eaccept_op.ret = 0;
  1162. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  1163. EXPECT_EEXIT(&self->run);
  1164. EXPECT_EQ(self->run.exception_vector, 0);
  1165. EXPECT_EQ(self->run.exception_error_code, 0);
  1166. EXPECT_EQ(self->run.exception_addr, 0);
  1167. EXPECT_EQ(eaccept_op.ret, 0);
  1168. eaccept_op.epc_addr = (unsigned long)ssa;
  1169. eaccept_op.ret = 0;
  1170. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  1171. EXPECT_EEXIT(&self->run);
  1172. EXPECT_EQ(self->run.exception_vector, 0);
  1173. EXPECT_EQ(self->run.exception_error_code, 0);
  1174. EXPECT_EQ(self->run.exception_addr, 0);
  1175. EXPECT_EQ(eaccept_op.ret, 0);
  1176. /* Send final ioctl() to complete page removal. */
  1177. memset(&remove_ioc, 0, sizeof(remove_ioc));
  1178. remove_ioc.offset = total_size;
  1179. remove_ioc.length = 3 * PAGE_SIZE;
  1180. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
  1181. errno_save = ret == -1 ? errno : 0;
  1182. EXPECT_EQ(ret, 0);
  1183. EXPECT_EQ(errno_save, 0);
  1184. EXPECT_EQ(remove_ioc.count, 3 * PAGE_SIZE);
  1185. /*
  1186. * Enter enclave via TCS #1 and access location where TCS #3 was to
  1187. * trigger dynamic add of regular page at that location.
  1188. */
  1189. eaccept_op.epc_addr = (unsigned long)tcs;
  1190. eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
  1191. eaccept_op.ret = 0;
  1192. eaccept_op.header.type = ENCL_OP_EACCEPT;
  1193. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  1194. EXPECT_EEXIT(&self->run);
  1195. EXPECT_EQ(self->run.exception_vector, 0);
  1196. EXPECT_EQ(self->run.exception_error_code, 0);
  1197. EXPECT_EQ(self->run.exception_addr, 0);
  1198. EXPECT_EQ(eaccept_op.ret, 0);
  1199. /*
  1200. * New page should be accessible from within enclave - write to it.
  1201. */
  1202. put_addr_op.value = MAGIC;
  1203. put_addr_op.addr = (unsigned long)tcs;
  1204. put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
  1205. EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
  1206. EXPECT_EEXIT(&self->run);
  1207. EXPECT_EQ(self->run.exception_vector, 0);
  1208. EXPECT_EQ(self->run.exception_error_code, 0);
  1209. EXPECT_EQ(self->run.exception_addr, 0);
  1210. /*
  1211. * Read memory from newly added page that was just written to,
  1212. * confirming that data previously written (MAGIC) is present.
  1213. */
  1214. get_addr_op.value = 0;
  1215. get_addr_op.addr = (unsigned long)tcs;
  1216. get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
  1217. EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
  1218. EXPECT_EQ(get_addr_op.value, MAGIC);
  1219. EXPECT_EEXIT(&self->run);
  1220. EXPECT_EQ(self->run.exception_vector, 0);
  1221. EXPECT_EQ(self->run.exception_error_code, 0);
  1222. EXPECT_EQ(self->run.exception_addr, 0);
  1223. munmap(addr, 3 * PAGE_SIZE);
  1224. }
  1225. /*
  1226. * Ensure sane behavior if user requests page removal, does not run
  1227. * EACCEPT from within enclave but still attempts to finalize page removal
  1228. * with the SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl(). The latter should fail
  1229. * because the removal was not EACCEPTed from within the enclave.
  1230. */
  1231. TEST_F(enclave, remove_added_page_no_eaccept)
  1232. {
  1233. struct sgx_enclave_remove_pages remove_ioc;
  1234. struct encl_op_get_from_addr get_addr_op;
  1235. struct sgx_enclave_modify_types modt_ioc;
  1236. struct encl_op_put_to_addr put_addr_op;
  1237. unsigned long data_start;
  1238. int ret, errno_save;
  1239. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  1240. memset(&self->run, 0, sizeof(self->run));
  1241. self->run.tcs = self->encl.encl_base;
  1242. /*
  1243. * Hardware (SGX2) and kernel support is needed for this test. Start
  1244. * with check that test has a chance of succeeding.
  1245. */
  1246. memset(&modt_ioc, 0, sizeof(modt_ioc));
  1247. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
  1248. if (ret == -1) {
  1249. if (errno == ENOTTY)
  1250. SKIP(return,
  1251. "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
  1252. else if (errno == ENODEV)
  1253. SKIP(return, "System does not support SGX2");
  1254. }
  1255. /*
  1256. * Invalid parameters were provided during sanity check,
  1257. * expect command to fail.
  1258. */
  1259. EXPECT_EQ(ret, -1);
  1260. /*
  1261. * Page that will be removed is the second data page in the .data
  1262. * segment. This forms part of the local encl_buffer within the
  1263. * enclave.
  1264. */
  1265. data_start = self->encl.encl_base +
  1266. encl_get_data_offset(&self->encl) + PAGE_SIZE;
  1267. /*
  1268. * Sanity check that page at @data_start is writable before
  1269. * removing it.
  1270. *
  1271. * Start by writing MAGIC to test page.
  1272. */
  1273. put_addr_op.value = MAGIC;
  1274. put_addr_op.addr = data_start;
  1275. put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
  1276. EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
  1277. EXPECT_EEXIT(&self->run);
  1278. EXPECT_EQ(self->run.exception_vector, 0);
  1279. EXPECT_EQ(self->run.exception_error_code, 0);
  1280. EXPECT_EQ(self->run.exception_addr, 0);
  1281. /*
  1282. * Read memory that was just written to, confirming that data
  1283. * previously written (MAGIC) is present.
  1284. */
  1285. get_addr_op.value = 0;
  1286. get_addr_op.addr = data_start;
  1287. get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
  1288. EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
  1289. EXPECT_EQ(get_addr_op.value, MAGIC);
  1290. EXPECT_EEXIT(&self->run);
  1291. EXPECT_EQ(self->run.exception_vector, 0);
  1292. EXPECT_EQ(self->run.exception_error_code, 0);
  1293. EXPECT_EQ(self->run.exception_addr, 0);
  1294. /* Start page removal by requesting change of page type to PT_TRIM */
  1295. memset(&modt_ioc, 0, sizeof(modt_ioc));
  1296. modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
  1297. modt_ioc.length = PAGE_SIZE;
  1298. modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
  1299. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
  1300. errno_save = ret == -1 ? errno : 0;
  1301. EXPECT_EQ(ret, 0);
  1302. EXPECT_EQ(errno_save, 0);
  1303. EXPECT_EQ(modt_ioc.result, 0);
  1304. EXPECT_EQ(modt_ioc.count, 4096);
  1305. /* Skip EACCEPT */
  1306. /* Send final ioctl() to complete page removal */
  1307. memset(&remove_ioc, 0, sizeof(remove_ioc));
  1308. remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
  1309. remove_ioc.length = PAGE_SIZE;
  1310. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
  1311. errno_save = ret == -1 ? errno : 0;
  1312. /* Operation not permitted since EACCEPT was omitted. */
  1313. EXPECT_EQ(ret, -1);
  1314. EXPECT_EQ(errno_save, EPERM);
  1315. EXPECT_EQ(remove_ioc.count, 0);
  1316. }
  1317. /*
  1318. * Request enclave page removal but instead of correctly following with
  1319. * EACCEPT a read attempt to page is made from within the enclave.
  1320. */
  1321. TEST_F(enclave, remove_added_page_invalid_access)
  1322. {
  1323. struct encl_op_get_from_addr get_addr_op;
  1324. struct encl_op_put_to_addr put_addr_op;
  1325. struct sgx_enclave_modify_types ioc;
  1326. unsigned long data_start;
  1327. int ret, errno_save;
  1328. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  1329. memset(&self->run, 0, sizeof(self->run));
  1330. self->run.tcs = self->encl.encl_base;
  1331. /*
  1332. * Hardware (SGX2) and kernel support is needed for this test. Start
  1333. * with check that test has a chance of succeeding.
  1334. */
  1335. memset(&ioc, 0, sizeof(ioc));
  1336. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
  1337. if (ret == -1) {
  1338. if (errno == ENOTTY)
  1339. SKIP(return,
  1340. "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
  1341. else if (errno == ENODEV)
  1342. SKIP(return, "System does not support SGX2");
  1343. }
  1344. /*
  1345. * Invalid parameters were provided during sanity check,
  1346. * expect command to fail.
  1347. */
  1348. EXPECT_EQ(ret, -1);
  1349. /*
  1350. * Page that will be removed is the second data page in the .data
  1351. * segment. This forms part of the local encl_buffer within the
  1352. * enclave.
  1353. */
  1354. data_start = self->encl.encl_base +
  1355. encl_get_data_offset(&self->encl) + PAGE_SIZE;
  1356. /*
  1357. * Sanity check that page at @data_start is writable before
  1358. * removing it.
  1359. *
  1360. * Start by writing MAGIC to test page.
  1361. */
  1362. put_addr_op.value = MAGIC;
  1363. put_addr_op.addr = data_start;
  1364. put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
  1365. EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
  1366. EXPECT_EEXIT(&self->run);
  1367. EXPECT_EQ(self->run.exception_vector, 0);
  1368. EXPECT_EQ(self->run.exception_error_code, 0);
  1369. EXPECT_EQ(self->run.exception_addr, 0);
  1370. /*
  1371. * Read memory that was just written to, confirming that data
  1372. * previously written (MAGIC) is present.
  1373. */
  1374. get_addr_op.value = 0;
  1375. get_addr_op.addr = data_start;
  1376. get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
  1377. EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
  1378. EXPECT_EQ(get_addr_op.value, MAGIC);
  1379. EXPECT_EEXIT(&self->run);
  1380. EXPECT_EQ(self->run.exception_vector, 0);
  1381. EXPECT_EQ(self->run.exception_error_code, 0);
  1382. EXPECT_EQ(self->run.exception_addr, 0);
  1383. /* Start page removal by requesting change of page type to PT_TRIM. */
  1384. memset(&ioc, 0, sizeof(ioc));
  1385. ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
  1386. ioc.length = PAGE_SIZE;
  1387. ioc.page_type = SGX_PAGE_TYPE_TRIM;
  1388. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
  1389. errno_save = ret == -1 ? errno : 0;
  1390. EXPECT_EQ(ret, 0);
  1391. EXPECT_EQ(errno_save, 0);
  1392. EXPECT_EQ(ioc.result, 0);
  1393. EXPECT_EQ(ioc.count, 4096);
  1394. /*
  1395. * Read from page that was just removed.
  1396. */
  1397. get_addr_op.value = 0;
  1398. EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
  1399. /*
  1400. * From kernel perspective the page is present but according to SGX the
  1401. * page should not be accessible so a #PF with SGX bit set is
  1402. * expected.
  1403. */
  1404. EXPECT_EQ(self->run.function, ERESUME);
  1405. EXPECT_EQ(self->run.exception_vector, 14);
  1406. EXPECT_EQ(self->run.exception_error_code, 0x8005);
  1407. EXPECT_EQ(self->run.exception_addr, data_start);
  1408. }
  1409. /*
  1410. * Request enclave page removal and correctly follow with
  1411. * EACCEPT but do not follow with removal ioctl() but instead a read attempt
  1412. * to removed page is made from within the enclave.
  1413. */
  1414. TEST_F(enclave, remove_added_page_invalid_access_after_eaccept)
  1415. {
  1416. struct encl_op_get_from_addr get_addr_op;
  1417. struct encl_op_put_to_addr put_addr_op;
  1418. struct sgx_enclave_modify_types ioc;
  1419. struct encl_op_eaccept eaccept_op;
  1420. unsigned long data_start;
  1421. int ret, errno_save;
  1422. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  1423. memset(&self->run, 0, sizeof(self->run));
  1424. self->run.tcs = self->encl.encl_base;
  1425. /*
  1426. * Hardware (SGX2) and kernel support is needed for this test. Start
  1427. * with check that test has a chance of succeeding.
  1428. */
  1429. memset(&ioc, 0, sizeof(ioc));
  1430. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
  1431. if (ret == -1) {
  1432. if (errno == ENOTTY)
  1433. SKIP(return,
  1434. "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
  1435. else if (errno == ENODEV)
  1436. SKIP(return, "System does not support SGX2");
  1437. }
  1438. /*
  1439. * Invalid parameters were provided during sanity check,
  1440. * expect command to fail.
  1441. */
  1442. EXPECT_EQ(ret, -1);
  1443. /*
  1444. * Page that will be removed is the second data page in the .data
  1445. * segment. This forms part of the local encl_buffer within the
  1446. * enclave.
  1447. */
  1448. data_start = self->encl.encl_base +
  1449. encl_get_data_offset(&self->encl) + PAGE_SIZE;
  1450. /*
  1451. * Sanity check that page at @data_start is writable before
  1452. * removing it.
  1453. *
  1454. * Start by writing MAGIC to test page.
  1455. */
  1456. put_addr_op.value = MAGIC;
  1457. put_addr_op.addr = data_start;
  1458. put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
  1459. EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
  1460. EXPECT_EEXIT(&self->run);
  1461. EXPECT_EQ(self->run.exception_vector, 0);
  1462. EXPECT_EQ(self->run.exception_error_code, 0);
  1463. EXPECT_EQ(self->run.exception_addr, 0);
  1464. /*
  1465. * Read memory that was just written to, confirming that data
  1466. * previously written (MAGIC) is present.
  1467. */
  1468. get_addr_op.value = 0;
  1469. get_addr_op.addr = data_start;
  1470. get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
  1471. EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
  1472. EXPECT_EQ(get_addr_op.value, MAGIC);
  1473. EXPECT_EEXIT(&self->run);
  1474. EXPECT_EQ(self->run.exception_vector, 0);
  1475. EXPECT_EQ(self->run.exception_error_code, 0);
  1476. EXPECT_EQ(self->run.exception_addr, 0);
  1477. /* Start page removal by requesting change of page type to PT_TRIM. */
  1478. memset(&ioc, 0, sizeof(ioc));
  1479. ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
  1480. ioc.length = PAGE_SIZE;
  1481. ioc.page_type = SGX_PAGE_TYPE_TRIM;
  1482. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
  1483. errno_save = ret == -1 ? errno : 0;
  1484. EXPECT_EQ(ret, 0);
  1485. EXPECT_EQ(errno_save, 0);
  1486. EXPECT_EQ(ioc.result, 0);
  1487. EXPECT_EQ(ioc.count, 4096);
  1488. eaccept_op.epc_addr = (unsigned long)data_start;
  1489. eaccept_op.ret = 0;
  1490. eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
  1491. eaccept_op.header.type = ENCL_OP_EACCEPT;
  1492. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  1493. EXPECT_EEXIT(&self->run);
  1494. EXPECT_EQ(self->run.exception_vector, 0);
  1495. EXPECT_EQ(self->run.exception_error_code, 0);
  1496. EXPECT_EQ(self->run.exception_addr, 0);
  1497. EXPECT_EQ(eaccept_op.ret, 0);
  1498. /* Skip ioctl() to remove page. */
  1499. /*
  1500. * Read from page that was just removed.
  1501. */
  1502. get_addr_op.value = 0;
  1503. EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
  1504. /*
  1505. * From kernel perspective the page is present but according to SGX the
  1506. * page should not be accessible so a #PF with SGX bit set is
  1507. * expected.
  1508. */
  1509. EXPECT_EQ(self->run.function, ERESUME);
  1510. EXPECT_EQ(self->run.exception_vector, 14);
  1511. EXPECT_EQ(self->run.exception_error_code, 0x8005);
  1512. EXPECT_EQ(self->run.exception_addr, data_start);
  1513. }
  1514. TEST_F(enclave, remove_untouched_page)
  1515. {
  1516. struct sgx_enclave_remove_pages remove_ioc;
  1517. struct sgx_enclave_modify_types modt_ioc;
  1518. struct encl_op_eaccept eaccept_op;
  1519. unsigned long data_start;
  1520. int ret, errno_save;
  1521. ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
  1522. /*
  1523. * Hardware (SGX2) and kernel support is needed for this test. Start
  1524. * with check that test has a chance of succeeding.
  1525. */
  1526. memset(&modt_ioc, 0, sizeof(modt_ioc));
  1527. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
  1528. if (ret == -1) {
  1529. if (errno == ENOTTY)
  1530. SKIP(return,
  1531. "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
  1532. else if (errno == ENODEV)
  1533. SKIP(return, "System does not support SGX2");
  1534. }
  1535. /*
  1536. * Invalid parameters were provided during sanity check,
  1537. * expect command to fail.
  1538. */
  1539. EXPECT_EQ(ret, -1);
  1540. /* SGX2 is supported by kernel and hardware, test can proceed. */
  1541. memset(&self->run, 0, sizeof(self->run));
  1542. self->run.tcs = self->encl.encl_base;
  1543. data_start = self->encl.encl_base +
  1544. encl_get_data_offset(&self->encl) + PAGE_SIZE;
  1545. memset(&modt_ioc, 0, sizeof(modt_ioc));
  1546. modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
  1547. modt_ioc.length = PAGE_SIZE;
  1548. modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
  1549. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
  1550. errno_save = ret == -1 ? errno : 0;
  1551. EXPECT_EQ(ret, 0);
  1552. EXPECT_EQ(errno_save, 0);
  1553. EXPECT_EQ(modt_ioc.result, 0);
  1554. EXPECT_EQ(modt_ioc.count, 4096);
  1555. /*
  1556. * Enter enclave via TCS #1 and approve page removal by sending
  1557. * EACCEPT for removed page.
  1558. */
  1559. eaccept_op.epc_addr = data_start;
  1560. eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
  1561. eaccept_op.ret = 0;
  1562. eaccept_op.header.type = ENCL_OP_EACCEPT;
  1563. EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
  1564. EXPECT_EEXIT(&self->run);
  1565. EXPECT_EQ(self->run.exception_vector, 0);
  1566. EXPECT_EQ(self->run.exception_error_code, 0);
  1567. EXPECT_EQ(self->run.exception_addr, 0);
  1568. EXPECT_EQ(eaccept_op.ret, 0);
  1569. memset(&remove_ioc, 0, sizeof(remove_ioc));
  1570. remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
  1571. remove_ioc.length = PAGE_SIZE;
  1572. ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
  1573. errno_save = ret == -1 ? errno : 0;
  1574. EXPECT_EQ(ret, 0);
  1575. EXPECT_EQ(errno_save, 0);
  1576. EXPECT_EQ(remove_ioc.count, 4096);
  1577. }
  1578. TEST_HARNESS_MAIN