rkp_test.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. #include <linux/module.h>
  2. #include <linux/sched/signal.h>
  3. #include <linux/proc_fs.h>
  4. #include <linux/mm.h>
  5. #include <linux/rkp.h>
  6. #include <linux/delay.h>
  7. #include <linux/slab.h>
  8. /*
  9. * BIT[0:1] TYPE PXN BIT
  10. * 01 BLOCK 53 For LEVEL 0, 1, 2 //defined by L012_BLOCK_PXN
  11. * 11 TABLE 59 For LEVEL 0, 1, 2 //defined by L012_TABLE_PXN
  12. * 11 PAGE 53 For LEVEL 3 //defined by L3_PAGE_PXN
  13. */
  14. #define L012_BLOCK_PXN (_AT(pmdval_t, 1) << 53)
  15. #define L012_TABLE_PXN (_AT(pmdval_t, 1) << 59)
  16. #define L3_PAGE_PXN (_AT(pmdval_t, 1) << 53)
  17. #define MEM_END 0xfffffffffffff000 /* 4K aligned */
  18. #define DESC_MASK 0xFFFFFFFFF000
  19. #define RKP_PA_READ 0
  20. #define RKP_PA_WRITE 1
  21. /* BUF define */
  22. #define RKP_BUF_SIZE 8192
  23. #define RKP_LINE_MAX 80
  24. /* FIMC */
  25. #define CDH_SIZE SZ_128K /* CDH : Camera Debug Helper */
  26. #define IS_RCHECKER_SIZE_RO (SZ_4M + SZ_1M)
  27. #define IS_RCHECKER_SIZE_RW (SZ_256K)
  28. #define RCHECKER_SIZE (IS_RCHECKER_SIZE_RO + IS_RCHECKER_SIZE_RW)
  29. #ifdef CONFIG_KASAN
  30. #define LIB_OFFSET (VMALLOC_START + 0xF6000000 - 0x8000000)
  31. #else
  32. #define LIB_OFFSET (VMALLOC_START + 0x1000000000UL + 0xF6000000 - 0x8000000)
  33. #endif
  34. #define __LIB_START (LIB_OFFSET + 0x04000000 - CDH_SIZE)
  35. #define LIB_START (__LIB_START)
  36. #define VRA_LIB_ADDR (LIB_START + CDH_SIZE)
  37. #define VRA_LIB_SIZE (SZ_512K + SZ_256K)
  38. #define DDK_LIB_ADDR (LIB_START + VRA_LIB_SIZE + CDH_SIZE)
  39. #define DDK_LIB_SIZE ((SZ_2M + SZ_1M + SZ_256K) + SZ_1M + RCHECKER_SIZE)
  40. #define RTA_LIB_ADDR (LIB_START + VRA_LIB_SIZE + DDK_LIB_SIZE + CDH_SIZE)
  41. #define RTA_LIB_SIZE (SZ_2M + SZ_2M)
  42. #define VRA_CODE_SIZE SZ_512K
  43. #define VRA_DATA_SIZE SZ_256K
  44. #define DDK_CODE_SIZE (SZ_2M + SZ_1M + SZ_256K + IS_RCHECKER_SIZE_RO)
  45. #define DDK_DATA_SIZE SZ_1M
  46. #define RTA_CODE_SIZE SZ_2M
  47. #define RTA_DATA_SIZE SZ_2M
  48. #define LIB_END (RTA_LIB_ADDR + RTA_CODE_SIZE + RTA_DATA_SIZE)
  49. static char rkp_test_buf[RKP_BUF_SIZE];
  50. static unsigned long rkp_test_len = 0;
  51. static unsigned long prot_user_l2 = 1;
  52. static DEFINE_RAW_SPINLOCK(par_lock);
  53. static u64 *ha1;
  54. static u64 *ha2;
  55. struct test_data {
  56. u64 iter;
  57. u64 pxn;
  58. u64 no_pxn;
  59. u64 read;
  60. u64 write;
  61. u64 cred_bkptr_match;
  62. u64 cred_bkptr_mismatch;
  63. };
  64. static void buf_print(const char *fmt, ...)
  65. {
  66. va_list aptr;
  67. if (rkp_test_len > RKP_BUF_SIZE - RKP_LINE_MAX) {
  68. pr_err("RKP_TEST: Error Maximum buf");
  69. return;
  70. }
  71. va_start(aptr, fmt);
  72. rkp_test_len += vsprintf(rkp_test_buf+rkp_test_len, fmt, aptr);
  73. va_end(aptr);
  74. }
  75. //if RO, return true; RW return false
  76. static bool hyp_check_page_ro(u64 va)
  77. {
  78. unsigned long flags;
  79. u64 par = 0;
  80. raw_spin_lock_irqsave(&par_lock, flags);
  81. uh_call(UH_APP_RKP, RKP_TEST_GET_PAR, (unsigned long)va, RKP_PA_WRITE, 0, 0);
  82. par = *ha1;
  83. raw_spin_unlock_irqrestore(&par_lock, flags);
  84. return (par & 0x1) ? true : false;
  85. }
  86. static void hyp_check_l23pgt_rw(u64 *pg_l, unsigned int level, struct test_data *test)
  87. {
  88. unsigned int i;
  89. // Level is 1 2
  90. if (level >= 3)
  91. return;
  92. for (i = 0; i < 512; i++) {
  93. if ((pg_l[i] & 3) == 3) {
  94. test[level].iter++;
  95. if (hyp_check_page_ro((u64)phys_to_virt(pg_l[i] & DESC_MASK)))
  96. test[level].read++;
  97. else
  98. test[level].write++;
  99. hyp_check_l23pgt_rw((u64 *) (phys_to_virt(pg_l[i] & DESC_MASK)), level + 1, test);
  100. }
  101. }
  102. }
  103. static pmd_t *get_addr_pmd(struct mm_struct *mm, unsigned long addr)
  104. {
  105. pgd_t *pgd;
  106. pud_t *pud;
  107. pmd_t *pmd;
  108. pgd = pgd_offset(mm, addr);
  109. if (pgd_none(*pgd))
  110. return NULL;
  111. pud = pud_offset((p4d_t *)pgd, addr);
  112. if (pud_none(*pud))
  113. return NULL;
  114. pmd = pmd_offset(pud, addr);
  115. if (pmd_none(*pmd))
  116. return NULL;
  117. return pmd;
  118. }
  119. static int test_case_user_pgtable_ro(void)
  120. {
  121. struct task_struct *task;
  122. struct test_data test[3] = {{0}, {0}, {0} };
  123. struct mm_struct *mm = NULL;
  124. int i;
  125. for_each_process(task) {
  126. mm = task->active_mm;
  127. if (!(mm) || !(mm->context.id.counter) || !(mm->pgd))
  128. continue;
  129. if (hyp_check_page_ro((u64)(mm->pgd)))
  130. test[0].read++;
  131. else
  132. test[0].write++;
  133. test[0].iter++;
  134. hyp_check_l23pgt_rw(((u64 *) (mm->pgd)), 1, test);
  135. }
  136. for (i = 0; i < 3; i++) {
  137. buf_print("\t\tL%d TOTAL PAGES %6llu | READ ONLY %6llu | WRITABLE %6llu\n",
  138. i+1, test[i].iter, test[i].read, test[i].write);
  139. }
  140. //L1 and L2 pgtable should be RO
  141. if ((!prot_user_l2) && (test[0].write == 0))
  142. return 0;
  143. if ((test[0].write == 0) && (test[1].write == 0))
  144. return 0; //pass
  145. else
  146. return 1; //fail
  147. }
  148. static int test_case_kernel_pgtable_ro(void)
  149. {
  150. struct test_data test[3] = {{0}, {0}, {0} };
  151. int i = 0;
  152. // Check for swapper_pg_dir
  153. test[0].iter++;
  154. if (hyp_check_page_ro((u64)swapper_pg_dir))
  155. test[0].read++;
  156. else
  157. test[0].write++;
  158. hyp_check_l23pgt_rw((u64 *)swapper_pg_dir, 1, test);
  159. for (i = 0; i < 3; i++)
  160. buf_print("\t\tL%d TOTAL PAGE TABLES %6llu | READ ONLY %6llu |WRITABLE %6llu\n",
  161. i+1, test[i].iter, test[i].read, test[i].write);
  162. if ((test[0].write == 0) && (test[1].write == 0))
  163. return 0;
  164. else
  165. return 1;
  166. }
  167. static int test_case_kernel_l3pgt_ro(void)
  168. {
  169. int rw = 0, ro = 0, i = 0;
  170. u64 addrs[] = {
  171. (u64)_text,
  172. (u64)_etext
  173. };
  174. int len = sizeof(addrs)/sizeof(u64);
  175. pmd_t *pmd;
  176. u64 pgt_addr;
  177. for (i = 0; i < len; i++) {
  178. pmd = get_addr_pmd(&init_mm, addrs[i]);
  179. pgt_addr = (u64)phys_to_virt(((u64)(pmd_val(*pmd))) & DESC_MASK);
  180. if (hyp_check_page_ro(pgt_addr))
  181. ro++;
  182. else
  183. rw++;
  184. }
  185. buf_print("\t\tKERNEL TEXT HEAD TAIL L3PGT | RO %6u | RW %6u\n", ro, rw);
  186. return (rw == 0) ? 0 : 1;
  187. }
  188. // return true if addr mapped, otherwise return false
  189. static bool page_pxn_set(unsigned long addr, u64 *xn, u64 *x, u64 *v_x)
  190. {
  191. pgd_t *pgd;
  192. pud_t *pud;
  193. pmd_t *pmd;
  194. pte_t *pte;
  195. pgd = pgd_offset_k(addr);
  196. if (pgd_none(*pgd))
  197. return false;
  198. pud = pud_offset((p4d_t *)pgd, addr);
  199. if (pud_none(*pud))
  200. return false;
  201. if (pud_sect(*pud)) {
  202. if ((pud_val(*pud) & L012_BLOCK_PXN) > 0)
  203. *xn += 1;
  204. else
  205. *x += 1;
  206. return true;
  207. }
  208. pmd = pmd_offset(pud, addr);
  209. if (pmd_none(*pmd))
  210. return false;
  211. if (pmd_sect(*pmd)) {
  212. if ((pmd_val(*pmd) & L012_BLOCK_PXN) > 0)
  213. *xn += 1;
  214. else
  215. *x += 1;
  216. return true;
  217. }
  218. if ((pmd_val(*pmd) & L012_TABLE_PXN) > 0) {
  219. *xn += 1;
  220. return true;
  221. }
  222. // If pmd is table, such as kernel text head and tail, need to check L3
  223. pte = pte_offset_kernel(pmd, addr);
  224. if (pte_none(*pte))
  225. return false;
  226. if ((pte_val(*pte) & L3_PAGE_PXN) > 0)
  227. *xn += 1;
  228. else {
  229. if (addr >= (u64)__end_rodata) {
  230. u64 res = 0;
  231. uh_call(UH_APP_RKP, RKP_TEST_TEXT_VALID, addr, (u64)&res, 0, 0);
  232. if (res)
  233. *v_x += 1;
  234. }
  235. *x += 1;
  236. }
  237. return true;
  238. }
  239. static void count_pxn(unsigned long pxn, int level, struct test_data *test)
  240. {
  241. test[level].iter++;
  242. if (pxn)
  243. test[level].pxn++;
  244. else
  245. test[level].no_pxn++;
  246. }
  247. static void walk_pte(pmd_t *pmd, int level, struct test_data *test)
  248. {
  249. pte_t *pte = pte_offset_kernel(pmd, 0UL);
  250. unsigned int i;
  251. unsigned long prot;
  252. for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
  253. if (pte_none(*pte)) {
  254. continue;
  255. } else {
  256. prot = pte_val(*pte) & L3_PAGE_PXN;
  257. count_pxn(prot, level, test);
  258. }
  259. }
  260. }
  261. static void walk_pmd(pud_t *pud, int level, struct test_data *test)
  262. {
  263. pmd_t *pmd = pmd_offset(pud, 0UL);
  264. unsigned int i;
  265. unsigned long prot;
  266. for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
  267. if (pmd_none(*pmd)) {
  268. continue;
  269. } else if (pmd_sect(*pmd)) {
  270. prot = pmd_val(*pmd) & L012_BLOCK_PXN;
  271. count_pxn(prot, level, test);
  272. } else {
  273. /*
  274. * For user space, all L2 should have PXN, including block and
  275. * table. Only kernel text head and tail L2 table can have no
  276. * pxn, and kernel text middle L2 blocks can have no pxn
  277. */
  278. BUG_ON(pmd_bad(*pmd));
  279. prot = pmd_val(*pmd) & L012_TABLE_PXN;
  280. count_pxn(prot, level, test);
  281. walk_pte(pmd, level+1, test);
  282. }
  283. }
  284. }
  285. static void walk_pud(pgd_t *pgd, int level, struct test_data *test)
  286. {
  287. pud_t *pud = pud_offset((p4d_t *)pgd, 0UL);
  288. unsigned int i;
  289. for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
  290. if (pud_none(*pud) || pud_sect(*pud)) {
  291. continue;
  292. } else {
  293. BUG_ON(pud_bad(*pud));
  294. walk_pmd(pud, level, test);
  295. }
  296. }
  297. }
  298. #define rkp_pgd_table (_AT(pgdval_t, 1) << 1)
  299. #define rkp_pgd_bad(pgd) (!(pgd_val(pgd) & rkp_pgd_table))
  300. static void walk_pgd(struct mm_struct *mm, int level, struct test_data *test)
  301. {
  302. pgd_t *pgd = pgd_offset(mm, 0UL);
  303. unsigned int i;
  304. unsigned long prot;
  305. for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
  306. if (rkp_pgd_bad(*pgd)) {
  307. continue;
  308. } else { //table
  309. prot = pgd_val(*pgd) & L012_TABLE_PXN;
  310. count_pxn(prot, level, test);
  311. walk_pud(pgd, level+1, test);
  312. }
  313. }
  314. }
  315. #define MB (1024 * 1024)
  316. #define ROBUFS_MAX (20 * MB / PAGE_SIZE)
  317. #define ALLOC_DELAY 10
  318. #define ALLOC_TRY_MAX 20
  319. static int test_case_guest_mem_alloc_and_free(void) {
  320. u64* robufs;
  321. int i, size = 0, alloc_try_cnt;
  322. robufs = kmalloc_array(ROBUFS_MAX, sizeof(u64), GFP_KERNEL);
  323. buf_print("guest_mem allocation start\n");
  324. for (i = 0; i < ROBUFS_MAX; i++) {
  325. alloc_try_cnt = 0;
  326. robufs[i] = (u64)rkp_ro_alloc();
  327. while (!robufs[i] && alloc_try_cnt < ALLOC_TRY_MAX) {
  328. alloc_try_cnt++;
  329. printk("ALLOC_TRY_CNT %d, Let's wait %d ms", alloc_try_cnt, ALLOC_DELAY);
  330. buf_print("ALLOC_TRY_CNT %d, Let's wait %d ms\n", alloc_try_cnt, ALLOC_DELAY);
  331. msleep(ALLOC_DELAY);
  332. robufs[i] = (u64)rkp_ro_alloc();
  333. }
  334. if (alloc_try_cnt >= ALLOC_TRY_MAX) {
  335. break;
  336. }
  337. }
  338. size = i;
  339. printk("guest_mem allocation done. allocated size: 0x%lx", size * PAGE_SIZE);
  340. buf_print("guest_mem allocation done. allocated size: 0x%lx\n", size * PAGE_SIZE);
  341. msleep(100);
  342. printk("guest_mem free start");
  343. buf_print("guest_mem free start\n");
  344. for (i = 0; i < size; i++) {
  345. rkp_ro_free((void *) robufs[i]);
  346. }
  347. printk("guest_mem free done.");
  348. buf_print("guest_mem free done.\n");
  349. kfree(robufs);
  350. return 0;
  351. }
  352. static int test_case_user_pxn(void)
  353. {
  354. struct task_struct *task = NULL;
  355. struct mm_struct *mm = NULL;
  356. struct test_data test[3] = {{0}, {0}, {0} };
  357. int i = 0;
  358. for_each_process(task) {
  359. mm = task->active_mm;
  360. if (!(mm) || !(mm->context.id.counter) || !(mm->pgd))
  361. continue;
  362. /* Check if PXN bit is set */
  363. walk_pgd(mm, 0, test);
  364. }
  365. for (i = 0; i < 3; i++) {
  366. buf_print("\t\tL%d TOTAL ENTRIES %6llu | PXN %6llu | NO_PXN %6llu\n",
  367. i+1, test[i].iter, test[i].pxn, test[i].no_pxn);
  368. }
  369. //all 2nd level entries should be PXN
  370. if (test[0].no_pxn == 0) {
  371. prot_user_l2 = 0;
  372. return 0;
  373. } else if (test[1].no_pxn == 0) {
  374. prot_user_l2 = 1;
  375. return 0;
  376. } else {
  377. return 1;
  378. }
  379. }
  380. struct mem_range {
  381. u64 start_va;
  382. u64 size; //in bytes
  383. char *info;
  384. bool no_rw;
  385. bool no_x;
  386. };
  387. struct test_case {
  388. int (*fn)(void);
  389. char *describe;
  390. };
  391. static int test_case_kernel_range_rwx(void)
  392. {
  393. int ret = 0;
  394. u64 ro = 0, rw = 0;
  395. u64 xn = 0, x = 0;
  396. u64 v_x = 0;
  397. int i;
  398. u64 j;
  399. bool mapped = false;
  400. u64 va_temp;
  401. struct mem_range test_ranges[] = {
  402. {(u64)VMALLOC_START, ((u64)_text) - ((u64)VMALLOC_START), "VMALLOC - STEXT", false, true},
  403. {((u64)_text), ((u64)_etext) - ((u64)_text), "STEXT - ETEXT", true, false},
  404. {((u64)_etext), ((u64) __end_rodata) - ((u64)_etext), "ETEXT - ERODATA", true, true},
  405. #ifdef CONFIG_USE_DIRECT_IS_CONTROL /* FIMC */
  406. {((u64) __end_rodata), VRA_LIB_ADDR-((u64) __end_rodata), "ERODATA - S_FIMC", false, true},
  407. {VRA_LIB_ADDR, VRA_CODE_SIZE, "VRA CODE", true, false},
  408. {VRA_LIB_ADDR + VRA_CODE_SIZE, VRA_DATA_SIZE, "VRA DATA", false, true},
  409. {DDK_LIB_ADDR, DDK_CODE_SIZE, "DDK CODE", true, false},
  410. {DDK_LIB_ADDR + DDK_CODE_SIZE, DDK_DATA_SIZE, "DDK_DATA", false, true},
  411. {RTA_LIB_ADDR, RTA_CODE_SIZE, "RTA CODE", true, false},
  412. {RTA_LIB_ADDR + RTA_CODE_SIZE, RTA_DATA_SIZE, "RTA DATA", false, true},
  413. {LIB_END, MEM_END - LIB_END, "E_FIMC - MEM END", false, true},
  414. #else
  415. {((u64) __end_rodata), MEM_END-((u64) __end_rodata), "ERODATA -MEM_END", false, true},
  416. #endif
  417. };
  418. int len = sizeof(test_ranges)/sizeof(struct mem_range);
  419. buf_print("\t\t| MEMORY RANGES | %16s - %16s | %8s %8s %8s %8s\n",
  420. "START", "END", "RO", "RW", "PXN", "PX");
  421. for (i = 0; i < len; i++) {
  422. for (j = 0; j < test_ranges[i].size/PAGE_SIZE; j++) {
  423. va_temp = test_ranges[i].start_va + j*PAGE_SIZE;
  424. mapped = page_pxn_set(va_temp, &xn, &x, &v_x);
  425. if (!mapped)
  426. continue;
  427. // only for mapped pages
  428. if (hyp_check_page_ro(va_temp))
  429. ro += 1;
  430. else
  431. rw += 1;
  432. }
  433. buf_print("\t\t|%s| %016llx - %016llx | %8llu %8llu %8llu %8llu\n",
  434. test_ranges[i].info, test_ranges[i].start_va,
  435. test_ranges[i].start_va + test_ranges[i].size,
  436. ro, rw, xn, x);
  437. if (test_ranges[i].no_rw && (rw != 0)) {
  438. buf_print("RKP_TEST FAILED, NO RW PAGE ALLOWED, rw=%llu\n", rw);
  439. ret++;
  440. }
  441. if (test_ranges[i].no_x && (x != 0)) {
  442. if (x == v_x)
  443. break;
  444. buf_print("RKP_TEST FAILED, NO X PAGE ALLOWED, x=%llu\n", x);
  445. ret++;
  446. }
  447. if ((rw != 0) && (x != 0)) {
  448. if (x == v_x)
  449. break;
  450. buf_print("RKP_TEST FAILED, NO RWX PAGE ALLOWED, rw=%llu, x=%llu\n", rw, x);
  451. ret++;
  452. }
  453. ro = 0; rw = 0;
  454. xn = 0; x = 0;
  455. v_x = 0;
  456. }
  457. return ret;
  458. }
  459. ssize_t rkp_read(struct file *filep, char __user *buffer, size_t count, loff_t *ppos)
  460. {
  461. int ret = 0, temp_ret = 0, i = 0;
  462. struct test_case tc_funcs[] = {
  463. {test_case_user_pxn, "TEST USER_PXN"},
  464. {test_case_user_pgtable_ro, "TEST USER_PGTABLE_RO"},
  465. {test_case_kernel_pgtable_ro, "TEST KERNEL_PGTABLE_RO"},
  466. {test_case_kernel_l3pgt_ro, "TEST KERNEL TEXT HEAD TAIL L3PGT RO"},
  467. {test_case_kernel_range_rwx, "TEST KERNEL_RANGE_RWX"},
  468. {test_case_guest_mem_alloc_and_free, "TEST GUEST_MEM_ALLOC_AND_FREE"},
  469. };
  470. int tc_num = sizeof(tc_funcs)/sizeof(struct test_case);
  471. static bool done = false;
  472. if (done)
  473. return 0;
  474. done = true;
  475. if ((!ha1) || (!ha2)) {
  476. buf_print("ERROR RKP_TEST ha1 is NULL\n");
  477. goto error;
  478. }
  479. for (i = 0; i < tc_num; i++) {
  480. buf_print("RKP_TEST_CASE %d ===========> RUNNING %s\n", i, tc_funcs[i].describe);
  481. temp_ret = tc_funcs[i].fn();
  482. if (temp_ret) {
  483. buf_print("RKP_TEST_CASE %d ===========> %s FAILED WITH %d ERRORS\n",
  484. i, tc_funcs[i].describe, temp_ret);
  485. } else {
  486. buf_print("RKP_TEST_CASE %d ===========> %s PASSED\n", i, tc_funcs[i].describe);
  487. }
  488. ret += temp_ret;
  489. }
  490. if (ret)
  491. buf_print("RKP_TEST SUMMARY: FAILED WITH %d ERRORS\n", ret);
  492. else
  493. buf_print("RKP_TEST SUMMARY: PASSED\n");
  494. error:
  495. return simple_read_from_buffer(buffer, count, ppos, rkp_test_buf, rkp_test_len);
  496. }
  497. static const struct proc_ops rkp_proc_fops = {
  498. .proc_read = rkp_read,
  499. };
  500. static int __init rkp_test_init(void)
  501. {
  502. u64 va;
  503. if (proc_create("rkp_test", 0444, NULL, &rkp_proc_fops) == NULL) {
  504. pr_err("RKP_TEST: Error creating proc entry");
  505. return -1;
  506. }
  507. va = __get_free_page(GFP_KERNEL | __GFP_ZERO);
  508. if (!va)
  509. return -1;
  510. uh_call(UH_APP_RKP, RKP_TEST_INIT, va, 0, 0, 0);
  511. ha1 = (u64 *)va;
  512. ha2 = (u64 *)(va + 8);
  513. return 0;
  514. }
  515. static void __exit rkp_test_exit(void)
  516. {
  517. uh_call(UH_APP_RKP, RKP_TEST_EXIT, (u64)ha1, 0, 0, 0);
  518. free_page((unsigned long)ha1);
  519. remove_proc_entry("rkp_test", NULL);
  520. }
  521. module_init(rkp_test_init);
  522. module_exit(rkp_test_exit);