ota_crypto.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI Over the Air (OTA) Crypto driver
  4. *
  5. * Copyright (c) 2010-2014,2017-2020 The Linux Foundation. All rights reserved.
  6. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/module.h>
  10. #include <linux/device.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/kernel.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/init.h>
  18. #include <linux/module.h>
  19. #include <linux/fs.h>
  20. #include <linux/cdev.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/cache.h>
  24. #include <linux/version.h>
  25. #include "linux/qcota.h"
  26. #include "qce.h"
  27. #include "qce_ota.h"
  28. enum qce_ota_oper_enum {
  29. QCE_OTA_F8_OPER = 0,
  30. QCE_OTA_MPKT_F8_OPER = 1,
  31. QCE_OTA_F9_OPER = 2,
  32. QCE_OTA_VAR_MPKT_F8_OPER = 3,
  33. QCE_OTA_OPER_LAST
  34. };
  35. struct ota_dev_control;
  36. struct ota_async_req {
  37. struct list_head rlist;
  38. struct completion complete;
  39. int err;
  40. enum qce_ota_oper_enum op;
  41. union {
  42. struct qce_f9_req f9_req;
  43. struct qce_f8_req f8_req;
  44. struct qce_f8_multi_pkt_req f8_mp_req;
  45. struct qce_f8_variable_multi_pkt_req f8_v_mp_req;
  46. } req;
  47. unsigned int steps;
  48. struct ota_qce_dev *pqce;
  49. };
  50. /*
  51. * Register ourselves as a char device /dev/qcota0 to be able to access the ota
  52. * from userspace.
  53. */
  54. #define QCOTA_DEV "qcota0"
  55. struct ota_dev_control {
  56. /* char device */
  57. struct cdev cdev;
  58. int minor;
  59. struct list_head ready_commands;
  60. unsigned int magic;
  61. struct list_head qce_dev;
  62. spinlock_t lock;
  63. struct mutex register_lock;
  64. bool registered;
  65. uint32_t total_units;
  66. };
  67. struct ota_qce_dev {
  68. struct list_head qlist;
  69. /* qce handle */
  70. void *qce;
  71. /* platform device */
  72. struct platform_device *pdev;
  73. struct ota_async_req *active_command;
  74. struct tasklet_struct done_tasklet;
  75. struct ota_dev_control *podev;
  76. uint32_t unit;
  77. u64 total_req;
  78. u64 err_req;
  79. };
  80. #define OTA_MAGIC 0x4f544143
  81. static long qcota_ioctl(struct file *file,
  82. unsigned int cmd, unsigned long arg);
  83. static int qcota_open(struct inode *inode, struct file *file);
  84. static int qcota_release(struct inode *inode, struct file *file);
  85. static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
  86. static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
  87. static const struct file_operations qcota_fops = {
  88. .owner = THIS_MODULE,
  89. .unlocked_ioctl = qcota_ioctl,
  90. .open = qcota_open,
  91. .release = qcota_release,
  92. };
  93. static struct ota_dev_control qcota_dev = {
  94. .magic = OTA_MAGIC,
  95. };
  96. static dev_t qcota_device_no;
  97. static struct class *driver_class;
  98. static struct device *class_dev;
  99. #define DEBUG_MAX_FNAME 16
  100. #define DEBUG_MAX_RW_BUF 1024
  101. struct qcota_stat {
  102. u64 f8_req;
  103. u64 f8_mp_req;
  104. u64 f8_v_mp_req;
  105. u64 f9_req;
  106. u64 f8_op_success;
  107. u64 f8_op_fail;
  108. u64 f8_mp_op_success;
  109. u64 f8_mp_op_fail;
  110. u64 f8_v_mp_op_success;
  111. u64 f8_v_mp_op_fail;
  112. u64 f9_op_success;
  113. u64 f9_op_fail;
  114. };
  115. static struct qcota_stat _qcota_stat;
  116. static struct dentry *_debug_dent;
  117. static char _debug_read_buf[DEBUG_MAX_RW_BUF];
  118. static int _debug_qcota;
  119. static struct ota_dev_control *qcota_control(void)
  120. {
  121. return &qcota_dev;
  122. }
  123. static int qcota_open(struct inode *inode, struct file *file)
  124. {
  125. struct ota_dev_control *podev;
  126. podev = qcota_control();
  127. if (podev == NULL) {
  128. pr_err("%s: no such device %d\n", __func__,
  129. MINOR(inode->i_rdev));
  130. return -ENOENT;
  131. }
  132. file->private_data = podev;
  133. return 0;
  134. }
  135. static int qcota_release(struct inode *inode, struct file *file)
  136. {
  137. struct ota_dev_control *podev;
  138. podev = file->private_data;
  139. if (podev != NULL && podev->magic != OTA_MAGIC) {
  140. pr_err("%s: invalid handle %pK\n",
  141. __func__, podev);
  142. }
  143. file->private_data = NULL;
  144. return 0;
  145. }
  146. static bool _next_v_mp_req(struct ota_async_req *areq)
  147. {
  148. unsigned char *p;
  149. if (areq->err)
  150. return false;
  151. if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
  152. return false;
  153. p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
  154. p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
  155. p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
  156. areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
  157. areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
  158. areq->req.f8_v_mp_req.qce_f8_req.data_len =
  159. areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
  160. areq->req.f8_v_mp_req.qce_f8_req.count_c++;
  161. return true;
  162. }
  163. static void req_done(unsigned long data)
  164. {
  165. struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
  166. struct ota_dev_control *podev = pqce->podev;
  167. struct ota_async_req *areq;
  168. unsigned long flags;
  169. struct ota_async_req *new_req = NULL;
  170. int ret = 0;
  171. bool schedule = true;
  172. spin_lock_irqsave(&podev->lock, flags);
  173. areq = pqce->active_command;
  174. if (unlikely(areq == NULL))
  175. pr_err("ota_crypto: %s, no active request\n", __func__);
  176. else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
  177. if (_next_v_mp_req(areq)) {
  178. /* execute next subcommand */
  179. spin_unlock_irqrestore(&podev->lock, flags);
  180. ret = start_req(pqce, areq);
  181. if (unlikely(ret)) {
  182. areq->err = ret;
  183. schedule = true;
  184. spin_lock_irqsave(&podev->lock, flags);
  185. } else {
  186. areq = NULL;
  187. schedule = false;
  188. }
  189. } else {
  190. /* done with this variable mp req */
  191. schedule = true;
  192. }
  193. }
  194. while (schedule) {
  195. if (!list_empty(&podev->ready_commands)) {
  196. new_req = container_of(podev->ready_commands.next,
  197. struct ota_async_req, rlist);
  198. list_del(&new_req->rlist);
  199. pqce->active_command = new_req;
  200. spin_unlock_irqrestore(&podev->lock, flags);
  201. if (new_req) {
  202. new_req->err = 0;
  203. /* start a new request */
  204. ret = start_req(pqce, new_req);
  205. }
  206. if (unlikely(new_req && ret)) {
  207. new_req->err = ret;
  208. complete(&new_req->complete);
  209. ret = 0;
  210. new_req = NULL;
  211. spin_lock_irqsave(&podev->lock, flags);
  212. } else {
  213. schedule = false;
  214. }
  215. } else {
  216. pqce->active_command = NULL;
  217. spin_unlock_irqrestore(&podev->lock, flags);
  218. schedule = false;
  219. }
  220. }
  221. if (areq)
  222. complete(&areq->complete);
  223. }
  224. static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
  225. int ret)
  226. {
  227. struct ota_async_req *areq = (struct ota_async_req *) cookie;
  228. struct ota_qce_dev *pqce;
  229. pqce = areq->pqce;
  230. areq->req.f9_req.mac_i = *((uint32_t *)icv);
  231. if (ret) {
  232. pqce->err_req++;
  233. areq->err = -ENXIO;
  234. } else
  235. areq->err = 0;
  236. tasklet_schedule(&pqce->done_tasklet);
  237. }
  238. static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
  239. int ret)
  240. {
  241. struct ota_async_req *areq = (struct ota_async_req *) cookie;
  242. struct ota_qce_dev *pqce;
  243. pqce = areq->pqce;
  244. if (ret) {
  245. pqce->err_req++;
  246. areq->err = -ENXIO;
  247. } else {
  248. areq->err = 0;
  249. }
  250. tasklet_schedule(&pqce->done_tasklet);
  251. }
  252. static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
  253. {
  254. struct qce_f9_req *pf9;
  255. struct qce_f8_multi_pkt_req *p_mp_f8;
  256. struct qce_f8_req *pf8;
  257. int ret = 0;
  258. /* command should be on the podev->active_command */
  259. areq->pqce = pqce;
  260. switch (areq->op) {
  261. case QCE_OTA_F8_OPER:
  262. pf8 = &areq->req.f8_req;
  263. ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
  264. break;
  265. case QCE_OTA_MPKT_F8_OPER:
  266. p_mp_f8 = &areq->req.f8_mp_req;
  267. ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
  268. break;
  269. case QCE_OTA_F9_OPER:
  270. pf9 = &areq->req.f9_req;
  271. ret = qce_f9_req(pqce->qce, pf9, areq, f9_cb);
  272. break;
  273. case QCE_OTA_VAR_MPKT_F8_OPER:
  274. pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
  275. ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
  276. break;
  277. default:
  278. ret = -ENOTSUPP;
  279. break;
  280. }
  281. areq->err = ret;
  282. pqce->total_req++;
  283. if (ret)
  284. pqce->err_req++;
  285. return ret;
  286. }
  287. static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
  288. {
  289. /* do this function with spinlock set */
  290. struct ota_qce_dev *p;
  291. if (unlikely(list_empty(&podev->qce_dev))) {
  292. pr_err("%s: no valid qce to schedule\n", __func__);
  293. return NULL;
  294. }
  295. list_for_each_entry(p, &podev->qce_dev, qlist) {
  296. if (p->active_command == NULL)
  297. return p;
  298. }
  299. return NULL;
  300. }
  301. static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
  302. {
  303. unsigned long flags;
  304. int ret = 0;
  305. struct qcota_stat *pstat;
  306. struct ota_qce_dev *pqce;
  307. areq->err = 0;
  308. spin_lock_irqsave(&podev->lock, flags);
  309. pqce = schedule_qce(podev);
  310. if (pqce) {
  311. pqce->active_command = areq;
  312. spin_unlock_irqrestore(&podev->lock, flags);
  313. ret = start_req(pqce, areq);
  314. if (ret != 0) {
  315. spin_lock_irqsave(&podev->lock, flags);
  316. pqce->active_command = NULL;
  317. spin_unlock_irqrestore(&podev->lock, flags);
  318. }
  319. } else {
  320. list_add_tail(&areq->rlist, &podev->ready_commands);
  321. spin_unlock_irqrestore(&podev->lock, flags);
  322. }
  323. if (ret == 0)
  324. wait_for_completion(&areq->complete);
  325. pstat = &_qcota_stat;
  326. switch (areq->op) {
  327. case QCE_OTA_F8_OPER:
  328. if (areq->err)
  329. pstat->f8_op_fail++;
  330. else
  331. pstat->f8_op_success++;
  332. break;
  333. case QCE_OTA_MPKT_F8_OPER:
  334. if (areq->err)
  335. pstat->f8_mp_op_fail++;
  336. else
  337. pstat->f8_mp_op_success++;
  338. break;
  339. case QCE_OTA_F9_OPER:
  340. if (areq->err)
  341. pstat->f9_op_fail++;
  342. else
  343. pstat->f9_op_success++;
  344. break;
  345. case QCE_OTA_VAR_MPKT_F8_OPER:
  346. default:
  347. if (areq->err)
  348. pstat->f8_v_mp_op_fail++;
  349. else
  350. pstat->f8_v_mp_op_success++;
  351. break;
  352. }
  353. return areq->err;
  354. }
  355. static long qcota_ioctl(struct file *file,
  356. unsigned int cmd, unsigned long arg)
  357. {
  358. int err = 0;
  359. struct ota_dev_control *podev;
  360. uint8_t *user_src;
  361. uint8_t *user_dst;
  362. uint8_t *k_buf = NULL;
  363. struct ota_async_req areq;
  364. uint32_t total, temp;
  365. struct qcota_stat *pstat;
  366. int i;
  367. uint8_t *p = NULL;
  368. podev = file->private_data;
  369. if (podev == NULL || podev->magic != OTA_MAGIC) {
  370. pr_err("%s: invalid handle %pK\n",
  371. __func__, podev);
  372. return -ENOENT;
  373. }
  374. /* Verify user arguments. */
  375. if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
  376. return -ENOTTY;
  377. init_completion(&areq.complete);
  378. pstat = &_qcota_stat;
  379. switch (cmd) {
  380. case QCOTA_F9_REQ:
  381. if (!access_ok(VERIFY_WRITE, (void __user *)arg,
  382. sizeof(struct qce_f9_req)))
  383. return -EFAULT;
  384. if (copy_from_user(&areq.req.f9_req, (void __user *)arg,
  385. sizeof(struct qce_f9_req)))
  386. return -EFAULT;
  387. user_src = areq.req.f9_req.message;
  388. if (!access_ok(VERIFY_READ, (void __user *)user_src,
  389. areq.req.f9_req.msize))
  390. return -EFAULT;
  391. if (areq.req.f9_req.msize == 0)
  392. return 0;
  393. k_buf = memdup_user((const void __user *)user_src,
  394. areq.req.f9_req.msize);
  395. if (IS_ERR(k_buf))
  396. return -EFAULT;
  397. areq.req.f9_req.message = k_buf;
  398. areq.op = QCE_OTA_F9_OPER;
  399. pstat->f9_req++;
  400. err = submit_req(&areq, podev);
  401. areq.req.f9_req.message = user_src;
  402. if (err == 0 && copy_to_user((void __user *)arg,
  403. &areq.req.f9_req, sizeof(struct qce_f9_req))) {
  404. err = -EFAULT;
  405. }
  406. kfree(k_buf);
  407. break;
  408. case QCOTA_F8_REQ:
  409. if (!access_ok(VERIFY_WRITE, (void __user *)arg,
  410. sizeof(struct qce_f8_req)))
  411. return -EFAULT;
  412. if (copy_from_user(&areq.req.f8_req, (void __user *)arg,
  413. sizeof(struct qce_f8_req)))
  414. return -EFAULT;
  415. total = areq.req.f8_req.data_len;
  416. user_src = areq.req.f8_req.data_in;
  417. if (user_src != NULL) {
  418. if (!access_ok(VERIFY_READ, (void __user *)
  419. user_src, total))
  420. return -EFAULT;
  421. }
  422. user_dst = areq.req.f8_req.data_out;
  423. if (!access_ok(VERIFY_WRITE, (void __user *)
  424. user_dst, total))
  425. return -EFAULT;
  426. if (!total)
  427. return 0;
  428. k_buf = kmalloc(total, GFP_KERNEL);
  429. if (k_buf == NULL)
  430. return -ENOMEM;
  431. /* k_buf returned from kmalloc should be cache line aligned */
  432. if (user_src && copy_from_user(k_buf,
  433. (void __user *)user_src, total)) {
  434. kfree(k_buf);
  435. return -EFAULT;
  436. }
  437. if (user_src)
  438. areq.req.f8_req.data_in = k_buf;
  439. else
  440. areq.req.f8_req.data_in = NULL;
  441. areq.req.f8_req.data_out = k_buf;
  442. areq.op = QCE_OTA_F8_OPER;
  443. pstat->f8_req++;
  444. err = submit_req(&areq, podev);
  445. if (err == 0 && copy_to_user(user_dst, k_buf, total))
  446. err = -EFAULT;
  447. kfree(k_buf);
  448. break;
  449. case QCOTA_F8_MPKT_REQ:
  450. if (!access_ok(VERIFY_WRITE, (void __user *)arg,
  451. sizeof(struct qce_f8_multi_pkt_req)))
  452. return -EFAULT;
  453. if (copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
  454. sizeof(struct qce_f8_multi_pkt_req)))
  455. return -EFAULT;
  456. temp = areq.req.f8_mp_req.qce_f8_req.data_len;
  457. if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
  458. areq.req.f8_mp_req.cipher_size)
  459. return -EINVAL;
  460. total = (uint32_t) areq.req.f8_mp_req.num_pkt *
  461. areq.req.f8_mp_req.qce_f8_req.data_len;
  462. user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
  463. if (!access_ok(VERIFY_READ, (void __user *)
  464. user_src, total))
  465. return -EFAULT;
  466. user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
  467. if (!access_ok(VERIFY_WRITE, (void __user *)
  468. user_dst, total))
  469. return -EFAULT;
  470. if (!total)
  471. return 0;
  472. /* k_buf should be cache line aligned */
  473. k_buf = memdup_user((const void __user *)user_src, total);
  474. if (IS_ERR(k_buf))
  475. return -EFAULT;
  476. areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
  477. areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
  478. areq.op = QCE_OTA_MPKT_F8_OPER;
  479. pstat->f8_mp_req++;
  480. err = submit_req(&areq, podev);
  481. if (err == 0 && copy_to_user(user_dst, k_buf, total))
  482. err = -EFAULT;
  483. kfree(k_buf);
  484. break;
  485. case QCOTA_F8_V_MPKT_REQ:
  486. if (!access_ok(VERIFY_WRITE, (void __user *)arg,
  487. sizeof(struct qce_f8_variable_multi_pkt_req)))
  488. return -EFAULT;
  489. if (copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
  490. sizeof(struct qce_f8_variable_multi_pkt_req)))
  491. return -EFAULT;
  492. if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
  493. return -EINVAL;
  494. for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
  495. if (!access_ok(VERIFY_WRITE, (void __user *)
  496. areq.req.f8_v_mp_req.cipher_iov[i].addr,
  497. areq.req.f8_v_mp_req.cipher_iov[i].size))
  498. return -EFAULT;
  499. total += areq.req.f8_v_mp_req.cipher_iov[i].size;
  500. total = ALIGN(total, L1_CACHE_BYTES);
  501. }
  502. if (!total)
  503. return 0;
  504. k_buf = kmalloc(total, GFP_KERNEL);
  505. if (k_buf == NULL)
  506. return -ENOMEM;
  507. for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
  508. user_src = areq.req.f8_v_mp_req.cipher_iov[i].addr;
  509. if (copy_from_user(p, (void __user *)user_src,
  510. areq.req.f8_v_mp_req.cipher_iov[i].size)) {
  511. kfree(k_buf);
  512. return -EFAULT;
  513. }
  514. p += areq.req.f8_v_mp_req.cipher_iov[i].size;
  515. p = (uint8_t *) ALIGN(((uintptr_t)p),
  516. L1_CACHE_BYTES);
  517. }
  518. areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
  519. areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
  520. areq.req.f8_v_mp_req.qce_f8_req.data_len =
  521. areq.req.f8_v_mp_req.cipher_iov[0].size;
  522. areq.steps = 0;
  523. areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
  524. pstat->f8_v_mp_req++;
  525. err = submit_req(&areq, podev);
  526. if (err != 0) {
  527. kfree(k_buf);
  528. return err;
  529. }
  530. for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
  531. user_dst = areq.req.f8_v_mp_req.cipher_iov[i].addr;
  532. if (copy_to_user(user_dst, p,
  533. areq.req.f8_v_mp_req.cipher_iov[i].size)) {
  534. kfree(k_buf);
  535. return -EFAULT;
  536. }
  537. p += areq.req.f8_v_mp_req.cipher_iov[i].size;
  538. p = (uint8_t *) ALIGN(((uintptr_t)p),
  539. L1_CACHE_BYTES);
  540. }
  541. kfree(k_buf);
  542. break;
  543. default:
  544. return -ENOTTY;
  545. }
  546. return err;
  547. }
  548. static int qcota_probe(struct platform_device *pdev)
  549. {
  550. void *handle = NULL;
  551. int rc = 0;
  552. struct ota_dev_control *podev;
  553. struct ce_hw_support ce_support;
  554. struct ota_qce_dev *pqce;
  555. unsigned long flags;
  556. podev = &qcota_dev;
  557. pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
  558. if (!pqce)
  559. return -ENOMEM;
  560. rc = alloc_chrdev_region(&qcota_device_no, 0, 1, QCOTA_DEV);
  561. if (rc < 0) {
  562. pr_err("alloc_chrdev_region failed %d\n", rc);
  563. return rc;
  564. }
  565. #if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
  566. driver_class = class_create(QCOTA_DEV);
  567. #else
  568. driver_class = class_create(THIS_MODULE, QCOTA_DEV);
  569. #endif
  570. if (IS_ERR(driver_class)) {
  571. rc = -ENOMEM;
  572. pr_err("class_create failed %d\n", rc);
  573. goto exit_unreg_chrdev_region;
  574. }
  575. class_dev = device_create(driver_class, NULL, qcota_device_no, NULL,
  576. QCOTA_DEV);
  577. if (IS_ERR(class_dev)) {
  578. pr_err("class_device_create failed %d\n", rc);
  579. rc = -ENOMEM;
  580. goto exit_destroy_class;
  581. }
  582. cdev_init(&podev->cdev, &qcota_fops);
  583. podev->cdev.owner = THIS_MODULE;
  584. rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcota_device_no), 0), 1);
  585. if (rc < 0) {
  586. pr_err("cdev_add failed %d\n", rc);
  587. goto exit_destroy_device;
  588. }
  589. podev->minor = 0;
  590. pqce->podev = podev;
  591. pqce->active_command = NULL;
  592. tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
  593. /* open qce */
  594. handle = qce_open(pdev, &rc);
  595. if (handle == NULL) {
  596. pr_err("%s: device %s, can not open qce\n",
  597. __func__, pdev->name);
  598. goto exit_del_cdev;
  599. }
  600. if (qce_hw_support(handle, &ce_support) < 0 ||
  601. !ce_support.ota) {
  602. pr_err("%s: device %s, qce does not support ota capability\n",
  603. __func__, pdev->name);
  604. rc = -ENODEV;
  605. goto err;
  606. }
  607. pqce->qce = handle;
  608. pqce->pdev = pdev;
  609. pqce->total_req = 0;
  610. pqce->err_req = 0;
  611. platform_set_drvdata(pdev, pqce);
  612. mutex_lock(&podev->register_lock);
  613. rc = 0;
  614. if (!podev->registered) {
  615. if (rc == 0) {
  616. pqce->unit = podev->total_units;
  617. podev->total_units++;
  618. podev->registered = true;
  619. }
  620. } else {
  621. pqce->unit = podev->total_units;
  622. podev->total_units++;
  623. }
  624. mutex_unlock(&podev->register_lock);
  625. if (rc) {
  626. pr_err("ion: failed to register misc device.\n");
  627. goto err;
  628. }
  629. spin_lock_irqsave(&podev->lock, flags);
  630. list_add_tail(&pqce->qlist, &podev->qce_dev);
  631. spin_unlock_irqrestore(&podev->lock, flags);
  632. return 0;
  633. err:
  634. if (handle)
  635. qce_close(handle);
  636. platform_set_drvdata(pdev, NULL);
  637. tasklet_kill(&pqce->done_tasklet);
  638. exit_del_cdev:
  639. cdev_del(&podev->cdev);
  640. exit_destroy_device:
  641. device_destroy(driver_class, qcota_device_no);
  642. exit_destroy_class:
  643. class_destroy(driver_class);
  644. exit_unreg_chrdev_region:
  645. unregister_chrdev_region(qcota_device_no, 1);
  646. kfree(pqce);
  647. return rc;
  648. }
  649. static int qcota_remove(struct platform_device *pdev)
  650. {
  651. struct ota_dev_control *podev;
  652. struct ota_qce_dev *pqce;
  653. unsigned long flags;
  654. pqce = platform_get_drvdata(pdev);
  655. if (!pqce)
  656. return 0;
  657. if (pqce->qce)
  658. qce_close(pqce->qce);
  659. podev = pqce->podev;
  660. if (!podev)
  661. goto ret;
  662. spin_lock_irqsave(&podev->lock, flags);
  663. list_del(&pqce->qlist);
  664. spin_unlock_irqrestore(&podev->lock, flags);
  665. mutex_lock(&podev->register_lock);
  666. if (--podev->total_units == 0) {
  667. cdev_del(&podev->cdev);
  668. device_destroy(driver_class, qcota_device_no);
  669. class_destroy(driver_class);
  670. unregister_chrdev_region(qcota_device_no, 1);
  671. podev->registered = false;
  672. }
  673. mutex_unlock(&podev->register_lock);
  674. ret:
  675. tasklet_kill(&pqce->done_tasklet);
  676. kfree(pqce);
  677. return 0;
  678. }
  679. static const struct of_device_id qcota_match[] = {
  680. { .compatible = "qcom,qcota",
  681. },
  682. {}
  683. };
  684. static struct platform_driver qcota_plat_driver = {
  685. .probe = qcota_probe,
  686. .remove = qcota_remove,
  687. .driver = {
  688. .name = "qcota",
  689. .of_match_table = qcota_match,
  690. },
  691. };
  692. static int _disp_stats(void)
  693. {
  694. struct qcota_stat *pstat;
  695. int len = 0;
  696. struct ota_dev_control *podev = &qcota_dev;
  697. unsigned long flags;
  698. struct ota_qce_dev *p;
  699. pstat = &_qcota_stat;
  700. len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
  701. "\nQTI OTA crypto accelerator Statistics:\n");
  702. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  703. " F8 request : %llu\n",
  704. pstat->f8_req);
  705. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  706. " F8 operation success : %llu\n",
  707. pstat->f8_op_success);
  708. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  709. " F8 operation fail : %llu\n",
  710. pstat->f8_op_fail);
  711. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  712. " F8 MP request : %llu\n",
  713. pstat->f8_mp_req);
  714. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  715. " F8 MP operation success : %llu\n",
  716. pstat->f8_mp_op_success);
  717. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  718. " F8 MP operation fail : %llu\n",
  719. pstat->f8_mp_op_fail);
  720. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  721. " F8 Variable MP request : %llu\n",
  722. pstat->f8_v_mp_req);
  723. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  724. " F8 Variable MP operation success: %llu\n",
  725. pstat->f8_v_mp_op_success);
  726. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  727. " F8 Variable MP operation fail : %llu\n",
  728. pstat->f8_v_mp_op_fail);
  729. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  730. " F9 request : %llu\n",
  731. pstat->f9_req);
  732. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  733. " F9 operation success : %llu\n",
  734. pstat->f9_op_success);
  735. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  736. " F9 operation fail : %llu\n",
  737. pstat->f9_op_fail);
  738. spin_lock_irqsave(&podev->lock, flags);
  739. list_for_each_entry(p, &podev->qce_dev, qlist) {
  740. len += scnprintf(
  741. _debug_read_buf + len,
  742. DEBUG_MAX_RW_BUF - len - 1,
  743. " Engine %4d Req : %llu\n",
  744. p->unit,
  745. p->total_req
  746. );
  747. len += scnprintf(
  748. _debug_read_buf + len,
  749. DEBUG_MAX_RW_BUF - len - 1,
  750. " Engine %4d Req Error : %llu\n",
  751. p->unit,
  752. p->err_req
  753. );
  754. }
  755. spin_unlock_irqrestore(&podev->lock, flags);
  756. return len;
  757. }
  758. static ssize_t _debug_stats_read(struct file *file, char __user *buf,
  759. size_t count, loff_t *ppos)
  760. {
  761. int rc = -EINVAL;
  762. int len;
  763. len = _disp_stats();
  764. if (len <= count)
  765. rc = simple_read_from_buffer((void __user *) buf, len,
  766. ppos, (void *) _debug_read_buf, len);
  767. return rc;
  768. }
  769. static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
  770. size_t count, loff_t *ppos)
  771. {
  772. struct ota_dev_control *podev = &qcota_dev;
  773. unsigned long flags;
  774. struct ota_qce_dev *p;
  775. memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
  776. spin_lock_irqsave(&podev->lock, flags);
  777. list_for_each_entry(p, &podev->qce_dev, qlist) {
  778. p->total_req = 0;
  779. p->err_req = 0;
  780. }
  781. spin_unlock_irqrestore(&podev->lock, flags);
  782. return count;
  783. }
  784. static const struct file_operations _debug_stats_ops = {
  785. .open = simple_open,
  786. .read = _debug_stats_read,
  787. .write = _debug_stats_write,
  788. };
  789. static int _qcota_debug_init(void)
  790. {
  791. int rc;
  792. char name[DEBUG_MAX_FNAME];
  793. struct dentry *dent;
  794. _debug_dent = debugfs_create_dir("qcota", NULL);
  795. if (IS_ERR(_debug_dent)) {
  796. pr_err("qcota debugfs_create_dir fail, error %ld\n",
  797. PTR_ERR(_debug_dent));
  798. return PTR_ERR(_debug_dent);
  799. }
  800. snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
  801. _debug_qcota = 0;
  802. dent = debugfs_create_file(name, 0644, _debug_dent,
  803. &_debug_qcota, &_debug_stats_ops);
  804. if (dent == NULL) {
  805. pr_err("qcota debugfs_create_file fail, error %ld\n",
  806. PTR_ERR(dent));
  807. rc = PTR_ERR(dent);
  808. goto err;
  809. }
  810. return 0;
  811. err:
  812. debugfs_remove_recursive(_debug_dent);
  813. return rc;
  814. }
  815. static int __init qcota_init(void)
  816. {
  817. int rc;
  818. struct ota_dev_control *podev;
  819. rc = _qcota_debug_init();
  820. if (rc)
  821. return rc;
  822. podev = &qcota_dev;
  823. INIT_LIST_HEAD(&podev->ready_commands);
  824. INIT_LIST_HEAD(&podev->qce_dev);
  825. spin_lock_init(&podev->lock);
  826. mutex_init(&podev->register_lock);
  827. podev->registered = false;
  828. podev->total_units = 0;
  829. return platform_driver_register(&qcota_plat_driver);
  830. }
  831. static void __exit qcota_exit(void)
  832. {
  833. debugfs_remove_recursive(_debug_dent);
  834. platform_driver_unregister(&qcota_plat_driver);
  835. }
  836. MODULE_LICENSE("GPL v2");
  837. MODULE_DESCRIPTION("QTI Ota Crypto driver");
  838. module_init(qcota_init);
  839. module_exit(qcota_exit);