qcom_secure_hibernation.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/scatterlist.h>
  7. #include <crypto/aead.h>
  8. #include <soc/qcom/qcom_hibernation.h>
  9. #include <../../kernel/power/power.h>
  10. #include <linux/qseecom_kernel.h>
  11. #include <trace/hooks/bl_hib.h>
  12. #include <linux/reboot.h>
  13. #define AUTH_SIZE 16
  14. #define QSEECOM_ALIGN_SIZE 0x40
  15. #define QSEECOM_ALIGN_MASK (QSEECOM_ALIGN_SIZE - 1)
  16. #define QSEECOM_ALIGN(x) \
  17. ((x + QSEECOM_ALIGN_MASK) & (~QSEECOM_ALIGN_MASK))
  18. struct s4app_time {
  19. uint16_t year;
  20. uint8_t month;
  21. uint8_t day;
  22. uint8_t hour;
  23. uint8_t minute;
  24. };
  25. struct wrap_req {
  26. struct s4app_time save_time;
  27. };
  28. struct wrap_rsp {
  29. uint8_t wrapped_key_buffer[WRAPPED_KEY_SIZE];
  30. uint32_t wrapped_key_size;
  31. uint8_t key_buffer[PAYLOAD_KEY_SIZE];
  32. uint32_t key_size;
  33. };
  34. struct unwrap_req {
  35. uint8_t wrapped_key_buffer[WRAPPED_KEY_SIZE];
  36. uint32_t wrapped_key_size;
  37. struct s4app_time curr_time;
  38. };
  39. struct unwrap_rsp {
  40. uint8_t key_buffer[PAYLOAD_KEY_SIZE];
  41. uint32_t key_size;
  42. };
  43. enum cmd_id {
  44. WRAP_KEY_CMD = 0,
  45. UNWRAP_KEY_CMD = 1,
  46. };
  47. struct cmd_req {
  48. enum cmd_id cmd;
  49. union {
  50. struct wrap_req wrapkey_req;
  51. struct unwrap_req unwrapkey_req;
  52. };
  53. };
  54. struct cmd_rsp {
  55. enum cmd_id cmd;
  56. union {
  57. struct wrap_rsp wrapkey_rsp;
  58. struct unwrap_rsp unwrapkey_rsp;
  59. };
  60. uint32_t status;
  61. };
  62. static struct qcom_crypto_params *params;
  63. static struct crypto_aead *tfm;
  64. static struct aead_request *req;
  65. static u8 iv_size;
  66. static u8 key[AES256_KEY_SIZE];
  67. static struct qseecom_handle *app_handle;
  68. static int first_encrypt;
  69. static void *temp_out_buf;
  70. static int pos;
  71. static uint8_t *authslot_start;
  72. static unsigned short root_swap_dev;
  73. static struct work_struct save_params_work;
  74. static struct completion write_done;
  75. static unsigned char iv[IV_SIZE];
  76. static void init_sg(struct scatterlist *sg, void *data, unsigned int size)
  77. {
  78. sg_init_table(sg, 2);
  79. sg_set_buf(&sg[0], params->aad, sizeof(params->aad));
  80. sg_set_buf(&sg[1], data, size);
  81. }
  82. static void save_auth(uint8_t *out_buf)
  83. {
  84. memcpy(authslot_start + (pos * AUTH_SIZE), out_buf + PAGE_SIZE,
  85. AUTH_SIZE);
  86. pos++;
  87. }
  88. static void skip_swap_map_write(void *data, bool *skip)
  89. {
  90. *skip = true;
  91. }
  92. static void increment_iv(unsigned char *iv, u8 size)
  93. {
  94. int i;
  95. u16 num, carry = 1;
  96. i = size - 1;
  97. do {
  98. num = (u8)iv[i];
  99. num += carry;
  100. iv[i] = num & 0xFF;
  101. carry = (num > 0xFF) ? 1 : 0;
  102. i--;
  103. } while (i >= 0 && carry != 0);
  104. }
  105. static void encrypt_page(void *data, void *buf)
  106. {
  107. struct scatterlist sg_in[2], sg_out[2];
  108. struct crypto_wait wait;
  109. int ret = 0;
  110. /* Allocate a request object */
  111. req = aead_request_alloc(tfm, GFP_KERNEL);
  112. if (!req) {
  113. ret = -ENOMEM;
  114. goto err_aead;
  115. }
  116. crypto_init_wait(&wait);
  117. aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  118. crypto_req_done, &wait);
  119. ret = crypto_aead_setauthsize(tfm, AUTH_SIZE);
  120. iv_size = crypto_aead_ivsize(tfm);
  121. if (iv_size && first_encrypt) {
  122. get_random_bytes(params->iv, iv_size);
  123. memcpy((void *)iv, params->iv, IV_SIZE);
  124. }
  125. ret = crypto_aead_setkey(tfm, key, AES256_KEY_SIZE);
  126. if (ret) {
  127. pr_err("Error setting key: %d\n", ret);
  128. goto out;
  129. }
  130. crypto_aead_clear_flags(tfm, ~0);
  131. memset(temp_out_buf, 0, 2 * PAGE_SIZE);
  132. init_sg(sg_in, buf, PAGE_SIZE);
  133. init_sg(sg_out, temp_out_buf, PAGE_SIZE + AUTH_SIZE);
  134. aead_request_set_ad(req, sizeof(params->aad));
  135. increment_iv(iv, IV_SIZE);
  136. aead_request_set_crypt(req, sg_in, sg_out, PAGE_SIZE, iv);
  137. crypto_aead_encrypt(req);
  138. ret = crypto_wait_req(ret, &wait);
  139. if (ret) {
  140. pr_err("Error encrypting data: %d\n", ret);
  141. goto out;
  142. }
  143. memcpy(buf, temp_out_buf, PAGE_SIZE);
  144. save_auth(temp_out_buf);
  145. if (first_encrypt)
  146. first_encrypt = 0;
  147. out:
  148. aead_request_free(req);
  149. return;
  150. err_aead:
  151. free_pages((unsigned long)temp_out_buf, 1);
  152. }
  153. static int read_authpage_count(void)
  154. {
  155. unsigned long total_auth_size;
  156. unsigned int num_auth_pages;
  157. total_auth_size = params->authslot_count * AUTH_SIZE;
  158. num_auth_pages = total_auth_size / PAGE_SIZE;
  159. if (total_auth_size % PAGE_SIZE)
  160. num_auth_pages += 1;
  161. return num_auth_pages;
  162. }
  163. static void hib_init_batch(struct hib_bio_batch *hb)
  164. {
  165. atomic_set(&hb->count, 0);
  166. init_waitqueue_head(&hb->wait);
  167. hb->error = BLK_STS_OK;
  168. blk_start_plug(&hb->plug);
  169. }
  170. static void hib_finish_batch(struct hib_bio_batch *hb)
  171. {
  172. blk_finish_plug(&hb->plug);
  173. }
  174. static void hib_end_io(struct bio *bio)
  175. {
  176. struct hib_bio_batch *hb = bio->bi_private;
  177. struct page *page = bio_first_page_all(bio);
  178. if (bio->bi_status) {
  179. pr_alert("Read-error on swap-device (%u:%u:%lu)\n",
  180. MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
  181. (unsigned long long)bio->bi_iter.bi_sector);
  182. }
  183. if (bio_data_dir(bio) == WRITE)
  184. put_page(page);
  185. if (bio->bi_status && !hb->error)
  186. hb->error = bio->bi_status;
  187. if (atomic_dec_and_test(&hb->count))
  188. wake_up(&hb->wait);
  189. bio_put(bio);
  190. }
  191. static int hib_submit_io(blk_opf_t opf, int op_flags, pgoff_t page_off, void *addr,
  192. struct hib_bio_batch *hb)
  193. {
  194. struct page *page = virt_to_page(addr);
  195. struct bio *bio;
  196. int error = 0;
  197. bio = bio_alloc(hiber_bdev, 1, opf, GFP_NOIO | __GFP_HIGH);
  198. bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
  199. bio_set_dev(bio, hiber_bdev);
  200. bio_set_op_attrs(bio, REQ_OP_WRITE, op_flags);
  201. if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
  202. pr_err("Adding page to bio failed at %llu\n",
  203. (unsigned long long)bio->bi_iter.bi_sector);
  204. bio_put(bio);
  205. return -EFAULT;
  206. }
  207. if (hb) {
  208. bio->bi_end_io = hib_end_io;
  209. bio->bi_private = hb;
  210. atomic_inc(&hb->count);
  211. submit_bio(bio);
  212. } else {
  213. error = submit_bio_wait(bio);
  214. bio_put(bio);
  215. }
  216. return error;
  217. }
  218. static int hib_wait_io(struct hib_bio_batch *hb)
  219. {
  220. /*
  221. * We are relying on the behavior of blk_plug that a thread with
  222. * a plug will flush the plug list before sleeping.
  223. */
  224. wait_event(hb->wait, atomic_read(&hb->count) == 0);
  225. return blk_status_to_errno(hb->error);
  226. }
  227. static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
  228. {
  229. void *src;
  230. int ret;
  231. if (!offset)
  232. return -ENOSPC;
  233. if (hb) {
  234. src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
  235. __GFP_NORETRY);
  236. if (src) {
  237. copy_page(src, buf);
  238. } else {
  239. ret = hib_wait_io(hb); /* Free pages */
  240. if (ret)
  241. return ret;
  242. src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
  243. __GFP_NORETRY);
  244. if (src) {
  245. copy_page(src, buf);
  246. } else {
  247. WARN_ON_ONCE(1);
  248. hb = NULL;/* Go synchronous */
  249. src = buf;
  250. }
  251. }
  252. } else {
  253. src = buf;
  254. }
  255. return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
  256. }
  257. static void save_auth_and_params_to_disk(struct work_struct *work)
  258. {
  259. int cur_slot;
  260. void *authpage;
  261. int params_slot;
  262. int authslot_count = 0;
  263. int authpage_count = read_authpage_count();
  264. struct hib_bio_batch hb;
  265. int err2;
  266. hib_init_batch(&hb);
  267. /*
  268. * Allocate a page to save the encryption params
  269. */
  270. params_slot = alloc_swapdev_block(root_swap_dev);
  271. authpage = authslot_start;
  272. while (authslot_count < authpage_count) {
  273. cur_slot = alloc_swapdev_block(root_swap_dev);
  274. write_page(authpage, cur_slot, &hb);
  275. authpage = (unsigned char *)authpage + PAGE_SIZE;
  276. authslot_count++;
  277. }
  278. params->authslot_count = authslot_count;
  279. write_page(params, params_slot, &hb);
  280. err2 = hib_wait_io(&hb);
  281. hib_finish_batch(&hb);
  282. complete_all(&write_done);
  283. }
  284. static void save_params_to_disk(void *data, unsigned short root_swap)
  285. {
  286. root_swap_dev = root_swap;
  287. queue_work(system_wq, &save_params_work);
  288. }
  289. static int poweroff_notifier(struct notifier_block *nb,
  290. unsigned long event, void *unused)
  291. {
  292. switch (event) {
  293. case (SYS_POWER_OFF):
  294. if (authslot_start)
  295. wait_for_completion(&write_done);
  296. break;
  297. default:
  298. break;
  299. }
  300. return NOTIFY_DONE;
  301. }
  302. static struct notifier_block poweroff_nb = {
  303. .notifier_call = poweroff_notifier,
  304. };
  305. static int get_key_from_ta(void)
  306. {
  307. int ret;
  308. int req_len, rsp_len;
  309. struct cmd_req *req = (struct cmd_req *)app_handle->sbuf;
  310. struct cmd_rsp *rsp = NULL;
  311. req_len = sizeof(struct cmd_req);
  312. if (req_len & QSEECOM_ALIGN_MASK)
  313. req_len = QSEECOM_ALIGN(req_len);
  314. rsp = (struct cmd_rsp *)(app_handle->sbuf + req_len);
  315. rsp_len = sizeof(struct cmd_rsp);
  316. if (rsp_len & QSEECOM_ALIGN_MASK)
  317. rsp_len = QSEECOM_ALIGN(rsp_len);
  318. memset(req, 0, req_len);
  319. memset(rsp, 0, rsp_len);
  320. req->cmd = WRAP_KEY_CMD;
  321. req->wrapkey_req.save_time.hour = 4;
  322. rsp->wrapkey_rsp.wrapped_key_size = WRAPPED_KEY_SIZE;
  323. ret = qseecom_send_command(app_handle, req, req_len, rsp, rsp_len);
  324. if (!ret) {
  325. memcpy(params->key_blob, rsp->wrapkey_rsp.wrapped_key_buffer,
  326. WRAPPED_KEY_SIZE);
  327. memcpy(key, rsp->wrapkey_rsp.key_buffer, AES256_KEY_SIZE);
  328. }
  329. return ret;
  330. }
  331. static int init_aead(void)
  332. {
  333. if (!tfm) {
  334. tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
  335. if (IS_ERR(tfm)) {
  336. pr_err("Error crypto_alloc_aead: %d\n", PTR_ERR(tfm));
  337. return PTR_ERR(tfm);
  338. }
  339. }
  340. return 0;
  341. }
  342. static int init_ta_and_set_key(void)
  343. {
  344. const uint32_t shared_buffer_len = 4096;
  345. int ret;
  346. ret = qseecom_start_app(&app_handle, "secs2d", shared_buffer_len);
  347. if (ret) {
  348. pr_err("qseecom_start_app failed: %d\n", ret);
  349. return ret;
  350. }
  351. ret = get_key_from_ta();
  352. if (ret)
  353. pr_err("set_key returned %d\n", ret);
  354. ret = qseecom_shutdown_app(&app_handle);
  355. if (ret)
  356. pr_err("qseecom_shutdown_app failed: %d\n", ret);
  357. return ret;
  358. }
  359. static int alloc_auth_memory(void)
  360. {
  361. unsigned long total_auth_size;
  362. /* Number of Auth slots is equal to the number of image pages */
  363. params->authslot_count = snapshot_get_image_size();
  364. total_auth_size = params->authslot_count * AUTH_SIZE;
  365. authslot_start = vmalloc(total_auth_size);
  366. if (!authslot_start)
  367. return -ENOMEM;
  368. return 0;
  369. }
  370. void deinit_aes_encrypt(void)
  371. {
  372. if (temp_out_buf) {
  373. free_pages((unsigned long)temp_out_buf, 1);
  374. temp_out_buf = NULL;
  375. }
  376. if (tfm) {
  377. crypto_free_aead(tfm);
  378. tfm = NULL;
  379. }
  380. memset(key, 0, AES256_KEY_SIZE);
  381. memset(params->key_blob, 0, WRAPPED_KEY_SIZE);
  382. kfree(params);
  383. }
  384. static int hibernate_pm_notifier(struct notifier_block *nb,
  385. unsigned long event, void *unused)
  386. {
  387. int ret = NOTIFY_DONE;
  388. switch (event) {
  389. case (PM_HIBERNATION_PREPARE):
  390. params = kmalloc(sizeof(struct qcom_crypto_params), GFP_KERNEL);
  391. if (!params)
  392. return NOTIFY_BAD;
  393. ret = init_aead();
  394. if (ret) {
  395. pr_err("%s: Failed init_aead(): %d\n", __func__, ret);
  396. goto err_aead;
  397. }
  398. ret = init_ta_and_set_key();
  399. if (ret) {
  400. pr_err("%s: Failed to init TA: %d\n", __func__, ret);
  401. goto err_setkey;
  402. }
  403. temp_out_buf = (void *)__get_free_pages(GFP_KERNEL, 1);
  404. if (!temp_out_buf) {
  405. pr_err("%s: Failed alloc_auth_memory %d\n", __func__, ret);
  406. ret = -1;
  407. goto err_setkey;
  408. }
  409. init_completion(&write_done);
  410. break;
  411. case (PM_POST_HIBERNATION):
  412. deinit_aes_encrypt();
  413. break;
  414. default:
  415. WARN_ONCE(1, "Invalid PM Notifier\n");
  416. break;
  417. }
  418. return NOTIFY_DONE;
  419. err_setkey:
  420. memset(params->key_blob, 0, WRAPPED_KEY_SIZE);
  421. memset(key, 0, AES256_KEY_SIZE);
  422. crypto_free_aead(tfm);
  423. err_aead:
  424. kfree(params);
  425. return NOTIFY_BAD;
  426. }
  427. static struct notifier_block pm_nb = {
  428. .notifier_call = hibernate_pm_notifier,
  429. };
  430. static void init_aes_encrypt(void *data, void *unused)
  431. {
  432. int ret;
  433. /*
  434. * Encryption results in two things:
  435. * 1. Encrypted data
  436. * 2. Auth
  437. * Save the Auth data of all pages locally and return only the
  438. * encrypted page to the caller. Allocate memory to save the auth.
  439. */
  440. ret = alloc_auth_memory();
  441. if (ret) {
  442. pr_err("%s: Failed alloc_auth_memory %d\n", __func__, ret);
  443. goto err_auth;
  444. }
  445. first_encrypt = 1;
  446. pos = 0;
  447. memcpy(params->aad, "SECURE_S2D!!", sizeof(params->aad));
  448. params->authsize = AUTH_SIZE;
  449. return;
  450. err_auth:
  451. memset(params->key_blob, 0, WRAPPED_KEY_SIZE);
  452. memset(key, 0, AES256_KEY_SIZE);
  453. crypto_free_aead(tfm);
  454. kfree(params);
  455. }
  456. static int __init qcom_secure_hibernattion_init(void)
  457. {
  458. int ret;
  459. register_trace_android_vh_encrypt_page(encrypt_page, NULL);
  460. register_trace_android_vh_init_aes_encrypt(init_aes_encrypt, NULL);
  461. register_trace_android_vh_skip_swap_map_write(skip_swap_map_write, NULL);
  462. register_trace_android_vh_post_image_save(save_params_to_disk, NULL);
  463. ret = register_pm_notifier(&pm_nb);
  464. if (ret) {
  465. pr_err("%s: Failed to register nb: %d\n", __func__, ret);
  466. return ret;
  467. }
  468. ret = register_reboot_notifier(&poweroff_nb);
  469. if (ret) {
  470. pr_err("%s: Failed to register nb: %d\n", __func__, ret);
  471. return ret;
  472. }
  473. INIT_WORK(&save_params_work, save_auth_and_params_to_disk);
  474. return 0;
  475. }
  476. module_init(qcom_secure_hibernattion_init);
  477. MODULE_DESCRIPTION("Framework to encrypt a page using a trusted application");
  478. MODULE_LICENSE("GPL");