ota_crypto.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI Over the Air (OTA) Crypto driver
  4. *
  5. * Copyright (c) 2010-2014,2017-2020 The Linux Foundation. All rights reserved.
  6. */
  7. #include <linux/types.h>
  8. #include <linux/module.h>
  9. #include <linux/device.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/kernel.h>
  13. #include <linux/dmapool.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/init.h>
  17. #include <linux/module.h>
  18. #include <linux/fs.h>
  19. #include <linux/cdev.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/debugfs.h>
  22. #include <linux/cache.h>
  23. #include "linux/qcota.h"
  24. #include "qce.h"
  25. #include "qce_ota.h"
  26. enum qce_ota_oper_enum {
  27. QCE_OTA_F8_OPER = 0,
  28. QCE_OTA_MPKT_F8_OPER = 1,
  29. QCE_OTA_F9_OPER = 2,
  30. QCE_OTA_VAR_MPKT_F8_OPER = 3,
  31. QCE_OTA_OPER_LAST
  32. };
  33. struct ota_dev_control;
  34. struct ota_async_req {
  35. struct list_head rlist;
  36. struct completion complete;
  37. int err;
  38. enum qce_ota_oper_enum op;
  39. union {
  40. struct qce_f9_req f9_req;
  41. struct qce_f8_req f8_req;
  42. struct qce_f8_multi_pkt_req f8_mp_req;
  43. struct qce_f8_variable_multi_pkt_req f8_v_mp_req;
  44. } req;
  45. unsigned int steps;
  46. struct ota_qce_dev *pqce;
  47. };
  48. /*
  49. * Register ourselves as a char device /dev/qcota0 to be able to access the ota
  50. * from userspace.
  51. */
  52. #define QCOTA_DEV "qcota0"
  53. struct ota_dev_control {
  54. /* char device */
  55. struct cdev cdev;
  56. int minor;
  57. struct list_head ready_commands;
  58. unsigned int magic;
  59. struct list_head qce_dev;
  60. spinlock_t lock;
  61. struct mutex register_lock;
  62. bool registered;
  63. uint32_t total_units;
  64. };
  65. struct ota_qce_dev {
  66. struct list_head qlist;
  67. /* qce handle */
  68. void *qce;
  69. /* platform device */
  70. struct platform_device *pdev;
  71. struct ota_async_req *active_command;
  72. struct tasklet_struct done_tasklet;
  73. struct ota_dev_control *podev;
  74. uint32_t unit;
  75. u64 total_req;
  76. u64 err_req;
  77. };
  78. #define OTA_MAGIC 0x4f544143
  79. static long qcota_ioctl(struct file *file,
  80. unsigned int cmd, unsigned long arg);
  81. static int qcota_open(struct inode *inode, struct file *file);
  82. static int qcota_release(struct inode *inode, struct file *file);
  83. static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
  84. static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
  85. static const struct file_operations qcota_fops = {
  86. .owner = THIS_MODULE,
  87. .unlocked_ioctl = qcota_ioctl,
  88. .open = qcota_open,
  89. .release = qcota_release,
  90. };
  91. static struct ota_dev_control qcota_dev = {
  92. .magic = OTA_MAGIC,
  93. };
  94. static dev_t qcota_device_no;
  95. static struct class *driver_class;
  96. static struct device *class_dev;
  97. #define DEBUG_MAX_FNAME 16
  98. #define DEBUG_MAX_RW_BUF 1024
  99. struct qcota_stat {
  100. u64 f8_req;
  101. u64 f8_mp_req;
  102. u64 f8_v_mp_req;
  103. u64 f9_req;
  104. u64 f8_op_success;
  105. u64 f8_op_fail;
  106. u64 f8_mp_op_success;
  107. u64 f8_mp_op_fail;
  108. u64 f8_v_mp_op_success;
  109. u64 f8_v_mp_op_fail;
  110. u64 f9_op_success;
  111. u64 f9_op_fail;
  112. };
  113. static struct qcota_stat _qcota_stat;
  114. static struct dentry *_debug_dent;
  115. static char _debug_read_buf[DEBUG_MAX_RW_BUF];
  116. static int _debug_qcota;
  117. static struct ota_dev_control *qcota_control(void)
  118. {
  119. return &qcota_dev;
  120. }
  121. static int qcota_open(struct inode *inode, struct file *file)
  122. {
  123. struct ota_dev_control *podev;
  124. podev = qcota_control();
  125. if (podev == NULL) {
  126. pr_err("%s: no such device %d\n", __func__,
  127. MINOR(inode->i_rdev));
  128. return -ENOENT;
  129. }
  130. file->private_data = podev;
  131. return 0;
  132. }
  133. static int qcota_release(struct inode *inode, struct file *file)
  134. {
  135. struct ota_dev_control *podev;
  136. podev = file->private_data;
  137. if (podev != NULL && podev->magic != OTA_MAGIC) {
  138. pr_err("%s: invalid handle %pK\n",
  139. __func__, podev);
  140. }
  141. file->private_data = NULL;
  142. return 0;
  143. }
  144. static bool _next_v_mp_req(struct ota_async_req *areq)
  145. {
  146. unsigned char *p;
  147. if (areq->err)
  148. return false;
  149. if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
  150. return false;
  151. p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
  152. p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
  153. p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
  154. areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
  155. areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
  156. areq->req.f8_v_mp_req.qce_f8_req.data_len =
  157. areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
  158. areq->req.f8_v_mp_req.qce_f8_req.count_c++;
  159. return true;
  160. }
  161. static void req_done(unsigned long data)
  162. {
  163. struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
  164. struct ota_dev_control *podev = pqce->podev;
  165. struct ota_async_req *areq;
  166. unsigned long flags;
  167. struct ota_async_req *new_req = NULL;
  168. int ret = 0;
  169. bool schedule = true;
  170. spin_lock_irqsave(&podev->lock, flags);
  171. areq = pqce->active_command;
  172. if (unlikely(areq == NULL))
  173. pr_err("ota_crypto: %s, no active request\n", __func__);
  174. else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
  175. if (_next_v_mp_req(areq)) {
  176. /* execute next subcommand */
  177. spin_unlock_irqrestore(&podev->lock, flags);
  178. ret = start_req(pqce, areq);
  179. if (unlikely(ret)) {
  180. areq->err = ret;
  181. schedule = true;
  182. spin_lock_irqsave(&podev->lock, flags);
  183. } else {
  184. areq = NULL;
  185. schedule = false;
  186. }
  187. } else {
  188. /* done with this variable mp req */
  189. schedule = true;
  190. }
  191. }
  192. while (schedule) {
  193. if (!list_empty(&podev->ready_commands)) {
  194. new_req = container_of(podev->ready_commands.next,
  195. struct ota_async_req, rlist);
  196. list_del(&new_req->rlist);
  197. pqce->active_command = new_req;
  198. spin_unlock_irqrestore(&podev->lock, flags);
  199. if (new_req) {
  200. new_req->err = 0;
  201. /* start a new request */
  202. ret = start_req(pqce, new_req);
  203. }
  204. if (unlikely(new_req && ret)) {
  205. new_req->err = ret;
  206. complete(&new_req->complete);
  207. ret = 0;
  208. new_req = NULL;
  209. spin_lock_irqsave(&podev->lock, flags);
  210. } else {
  211. schedule = false;
  212. }
  213. } else {
  214. pqce->active_command = NULL;
  215. spin_unlock_irqrestore(&podev->lock, flags);
  216. schedule = false;
  217. }
  218. }
  219. if (areq)
  220. complete(&areq->complete);
  221. }
  222. static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
  223. int ret)
  224. {
  225. struct ota_async_req *areq = (struct ota_async_req *) cookie;
  226. struct ota_qce_dev *pqce;
  227. pqce = areq->pqce;
  228. areq->req.f9_req.mac_i = *((uint32_t *)icv);
  229. if (ret) {
  230. pqce->err_req++;
  231. areq->err = -ENXIO;
  232. } else
  233. areq->err = 0;
  234. tasklet_schedule(&pqce->done_tasklet);
  235. }
  236. static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
  237. int ret)
  238. {
  239. struct ota_async_req *areq = (struct ota_async_req *) cookie;
  240. struct ota_qce_dev *pqce;
  241. pqce = areq->pqce;
  242. if (ret) {
  243. pqce->err_req++;
  244. areq->err = -ENXIO;
  245. } else {
  246. areq->err = 0;
  247. }
  248. tasklet_schedule(&pqce->done_tasklet);
  249. }
  250. static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
  251. {
  252. struct qce_f9_req *pf9;
  253. struct qce_f8_multi_pkt_req *p_mp_f8;
  254. struct qce_f8_req *pf8;
  255. int ret = 0;
  256. /* command should be on the podev->active_command */
  257. areq->pqce = pqce;
  258. switch (areq->op) {
  259. case QCE_OTA_F8_OPER:
  260. pf8 = &areq->req.f8_req;
  261. ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
  262. break;
  263. case QCE_OTA_MPKT_F8_OPER:
  264. p_mp_f8 = &areq->req.f8_mp_req;
  265. ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
  266. break;
  267. case QCE_OTA_F9_OPER:
  268. pf9 = &areq->req.f9_req;
  269. ret = qce_f9_req(pqce->qce, pf9, areq, f9_cb);
  270. break;
  271. case QCE_OTA_VAR_MPKT_F8_OPER:
  272. pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
  273. ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
  274. break;
  275. default:
  276. ret = -ENOTSUPP;
  277. break;
  278. }
  279. areq->err = ret;
  280. pqce->total_req++;
  281. if (ret)
  282. pqce->err_req++;
  283. return ret;
  284. }
  285. static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
  286. {
  287. /* do this function with spinlock set */
  288. struct ota_qce_dev *p;
  289. if (unlikely(list_empty(&podev->qce_dev))) {
  290. pr_err("%s: no valid qce to schedule\n", __func__);
  291. return NULL;
  292. }
  293. list_for_each_entry(p, &podev->qce_dev, qlist) {
  294. if (p->active_command == NULL)
  295. return p;
  296. }
  297. return NULL;
  298. }
  299. static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
  300. {
  301. unsigned long flags;
  302. int ret = 0;
  303. struct qcota_stat *pstat;
  304. struct ota_qce_dev *pqce;
  305. areq->err = 0;
  306. spin_lock_irqsave(&podev->lock, flags);
  307. pqce = schedule_qce(podev);
  308. if (pqce) {
  309. pqce->active_command = areq;
  310. spin_unlock_irqrestore(&podev->lock, flags);
  311. ret = start_req(pqce, areq);
  312. if (ret != 0) {
  313. spin_lock_irqsave(&podev->lock, flags);
  314. pqce->active_command = NULL;
  315. spin_unlock_irqrestore(&podev->lock, flags);
  316. }
  317. } else {
  318. list_add_tail(&areq->rlist, &podev->ready_commands);
  319. spin_unlock_irqrestore(&podev->lock, flags);
  320. }
  321. if (ret == 0)
  322. wait_for_completion(&areq->complete);
  323. pstat = &_qcota_stat;
  324. switch (areq->op) {
  325. case QCE_OTA_F8_OPER:
  326. if (areq->err)
  327. pstat->f8_op_fail++;
  328. else
  329. pstat->f8_op_success++;
  330. break;
  331. case QCE_OTA_MPKT_F8_OPER:
  332. if (areq->err)
  333. pstat->f8_mp_op_fail++;
  334. else
  335. pstat->f8_mp_op_success++;
  336. break;
  337. case QCE_OTA_F9_OPER:
  338. if (areq->err)
  339. pstat->f9_op_fail++;
  340. else
  341. pstat->f9_op_success++;
  342. break;
  343. case QCE_OTA_VAR_MPKT_F8_OPER:
  344. default:
  345. if (areq->err)
  346. pstat->f8_v_mp_op_fail++;
  347. else
  348. pstat->f8_v_mp_op_success++;
  349. break;
  350. }
  351. return areq->err;
  352. }
  353. static long qcota_ioctl(struct file *file,
  354. unsigned int cmd, unsigned long arg)
  355. {
  356. int err = 0;
  357. struct ota_dev_control *podev;
  358. uint8_t *user_src;
  359. uint8_t *user_dst;
  360. uint8_t *k_buf = NULL;
  361. struct ota_async_req areq;
  362. uint32_t total, temp;
  363. struct qcota_stat *pstat;
  364. int i;
  365. uint8_t *p = NULL;
  366. podev = file->private_data;
  367. if (podev == NULL || podev->magic != OTA_MAGIC) {
  368. pr_err("%s: invalid handle %pK\n",
  369. __func__, podev);
  370. return -ENOENT;
  371. }
  372. /* Verify user arguments. */
  373. if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
  374. return -ENOTTY;
  375. init_completion(&areq.complete);
  376. pstat = &_qcota_stat;
  377. switch (cmd) {
  378. case QCOTA_F9_REQ:
  379. if (!access_ok(VERIFY_WRITE, (void __user *)arg,
  380. sizeof(struct qce_f9_req)))
  381. return -EFAULT;
  382. if (copy_from_user(&areq.req.f9_req, (void __user *)arg,
  383. sizeof(struct qce_f9_req)))
  384. return -EFAULT;
  385. user_src = areq.req.f9_req.message;
  386. if (!access_ok(VERIFY_READ, (void __user *)user_src,
  387. areq.req.f9_req.msize))
  388. return -EFAULT;
  389. if (areq.req.f9_req.msize == 0)
  390. return 0;
  391. k_buf = memdup_user((const void __user *)user_src,
  392. areq.req.f9_req.msize);
  393. if (IS_ERR(k_buf))
  394. return -EFAULT;
  395. areq.req.f9_req.message = k_buf;
  396. areq.op = QCE_OTA_F9_OPER;
  397. pstat->f9_req++;
  398. err = submit_req(&areq, podev);
  399. areq.req.f9_req.message = user_src;
  400. if (err == 0 && copy_to_user((void __user *)arg,
  401. &areq.req.f9_req, sizeof(struct qce_f9_req))) {
  402. err = -EFAULT;
  403. }
  404. kfree(k_buf);
  405. break;
  406. case QCOTA_F8_REQ:
  407. if (!access_ok(VERIFY_WRITE, (void __user *)arg,
  408. sizeof(struct qce_f8_req)))
  409. return -EFAULT;
  410. if (copy_from_user(&areq.req.f8_req, (void __user *)arg,
  411. sizeof(struct qce_f8_req)))
  412. return -EFAULT;
  413. total = areq.req.f8_req.data_len;
  414. user_src = areq.req.f8_req.data_in;
  415. if (user_src != NULL) {
  416. if (!access_ok(VERIFY_READ, (void __user *)
  417. user_src, total))
  418. return -EFAULT;
  419. }
  420. user_dst = areq.req.f8_req.data_out;
  421. if (!access_ok(VERIFY_WRITE, (void __user *)
  422. user_dst, total))
  423. return -EFAULT;
  424. if (!total)
  425. return 0;
  426. k_buf = kmalloc(total, GFP_KERNEL);
  427. if (k_buf == NULL)
  428. return -ENOMEM;
  429. /* k_buf returned from kmalloc should be cache line aligned */
  430. if (user_src && copy_from_user(k_buf,
  431. (void __user *)user_src, total)) {
  432. kfree(k_buf);
  433. return -EFAULT;
  434. }
  435. if (user_src)
  436. areq.req.f8_req.data_in = k_buf;
  437. else
  438. areq.req.f8_req.data_in = NULL;
  439. areq.req.f8_req.data_out = k_buf;
  440. areq.op = QCE_OTA_F8_OPER;
  441. pstat->f8_req++;
  442. err = submit_req(&areq, podev);
  443. if (err == 0 && copy_to_user(user_dst, k_buf, total))
  444. err = -EFAULT;
  445. kfree(k_buf);
  446. break;
  447. case QCOTA_F8_MPKT_REQ:
  448. if (!access_ok(VERIFY_WRITE, (void __user *)arg,
  449. sizeof(struct qce_f8_multi_pkt_req)))
  450. return -EFAULT;
  451. if (copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
  452. sizeof(struct qce_f8_multi_pkt_req)))
  453. return -EFAULT;
  454. temp = areq.req.f8_mp_req.qce_f8_req.data_len;
  455. if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
  456. areq.req.f8_mp_req.cipher_size)
  457. return -EINVAL;
  458. total = (uint32_t) areq.req.f8_mp_req.num_pkt *
  459. areq.req.f8_mp_req.qce_f8_req.data_len;
  460. user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
  461. if (!access_ok(VERIFY_READ, (void __user *)
  462. user_src, total))
  463. return -EFAULT;
  464. user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
  465. if (!access_ok(VERIFY_WRITE, (void __user *)
  466. user_dst, total))
  467. return -EFAULT;
  468. if (!total)
  469. return 0;
  470. /* k_buf should be cache line aligned */
  471. k_buf = memdup_user((const void __user *)user_src, total);
  472. if (IS_ERR(k_buf))
  473. return -EFAULT;
  474. areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
  475. areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
  476. areq.op = QCE_OTA_MPKT_F8_OPER;
  477. pstat->f8_mp_req++;
  478. err = submit_req(&areq, podev);
  479. if (err == 0 && copy_to_user(user_dst, k_buf, total))
  480. err = -EFAULT;
  481. kfree(k_buf);
  482. break;
  483. case QCOTA_F8_V_MPKT_REQ:
  484. if (!access_ok(VERIFY_WRITE, (void __user *)arg,
  485. sizeof(struct qce_f8_variable_multi_pkt_req)))
  486. return -EFAULT;
  487. if (copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
  488. sizeof(struct qce_f8_variable_multi_pkt_req)))
  489. return -EFAULT;
  490. if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
  491. return -EINVAL;
  492. for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
  493. if (!access_ok(VERIFY_WRITE, (void __user *)
  494. areq.req.f8_v_mp_req.cipher_iov[i].addr,
  495. areq.req.f8_v_mp_req.cipher_iov[i].size))
  496. return -EFAULT;
  497. total += areq.req.f8_v_mp_req.cipher_iov[i].size;
  498. total = ALIGN(total, L1_CACHE_BYTES);
  499. }
  500. if (!total)
  501. return 0;
  502. k_buf = kmalloc(total, GFP_KERNEL);
  503. if (k_buf == NULL)
  504. return -ENOMEM;
  505. for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
  506. user_src = areq.req.f8_v_mp_req.cipher_iov[i].addr;
  507. if (copy_from_user(p, (void __user *)user_src,
  508. areq.req.f8_v_mp_req.cipher_iov[i].size)) {
  509. kfree(k_buf);
  510. return -EFAULT;
  511. }
  512. p += areq.req.f8_v_mp_req.cipher_iov[i].size;
  513. p = (uint8_t *) ALIGN(((uintptr_t)p),
  514. L1_CACHE_BYTES);
  515. }
  516. areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
  517. areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
  518. areq.req.f8_v_mp_req.qce_f8_req.data_len =
  519. areq.req.f8_v_mp_req.cipher_iov[0].size;
  520. areq.steps = 0;
  521. areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
  522. pstat->f8_v_mp_req++;
  523. err = submit_req(&areq, podev);
  524. if (err != 0) {
  525. kfree(k_buf);
  526. return err;
  527. }
  528. for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
  529. user_dst = areq.req.f8_v_mp_req.cipher_iov[i].addr;
  530. if (copy_to_user(user_dst, p,
  531. areq.req.f8_v_mp_req.cipher_iov[i].size)) {
  532. kfree(k_buf);
  533. return -EFAULT;
  534. }
  535. p += areq.req.f8_v_mp_req.cipher_iov[i].size;
  536. p = (uint8_t *) ALIGN(((uintptr_t)p),
  537. L1_CACHE_BYTES);
  538. }
  539. kfree(k_buf);
  540. break;
  541. default:
  542. return -ENOTTY;
  543. }
  544. return err;
  545. }
  546. static int qcota_probe(struct platform_device *pdev)
  547. {
  548. void *handle = NULL;
  549. int rc = 0;
  550. struct ota_dev_control *podev;
  551. struct ce_hw_support ce_support;
  552. struct ota_qce_dev *pqce;
  553. unsigned long flags;
  554. podev = &qcota_dev;
  555. pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
  556. if (!pqce)
  557. return -ENOMEM;
  558. rc = alloc_chrdev_region(&qcota_device_no, 0, 1, QCOTA_DEV);
  559. if (rc < 0) {
  560. pr_err("alloc_chrdev_region failed %d\n", rc);
  561. return rc;
  562. }
  563. driver_class = class_create(THIS_MODULE, QCOTA_DEV);
  564. if (IS_ERR(driver_class)) {
  565. rc = -ENOMEM;
  566. pr_err("class_create failed %d\n", rc);
  567. goto exit_unreg_chrdev_region;
  568. }
  569. class_dev = device_create(driver_class, NULL, qcota_device_no, NULL,
  570. QCOTA_DEV);
  571. if (IS_ERR(class_dev)) {
  572. pr_err("class_device_create failed %d\n", rc);
  573. rc = -ENOMEM;
  574. goto exit_destroy_class;
  575. }
  576. cdev_init(&podev->cdev, &qcota_fops);
  577. podev->cdev.owner = THIS_MODULE;
  578. rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcota_device_no), 0), 1);
  579. if (rc < 0) {
  580. pr_err("cdev_add failed %d\n", rc);
  581. goto exit_destroy_device;
  582. }
  583. podev->minor = 0;
  584. pqce->podev = podev;
  585. pqce->active_command = NULL;
  586. tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
  587. /* open qce */
  588. handle = qce_open(pdev, &rc);
  589. if (handle == NULL) {
  590. pr_err("%s: device %s, can not open qce\n",
  591. __func__, pdev->name);
  592. goto exit_del_cdev;
  593. }
  594. if (qce_hw_support(handle, &ce_support) < 0 ||
  595. !ce_support.ota) {
  596. pr_err("%s: device %s, qce does not support ota capability\n",
  597. __func__, pdev->name);
  598. rc = -ENODEV;
  599. goto err;
  600. }
  601. pqce->qce = handle;
  602. pqce->pdev = pdev;
  603. pqce->total_req = 0;
  604. pqce->err_req = 0;
  605. platform_set_drvdata(pdev, pqce);
  606. mutex_lock(&podev->register_lock);
  607. rc = 0;
  608. if (!podev->registered) {
  609. if (rc == 0) {
  610. pqce->unit = podev->total_units;
  611. podev->total_units++;
  612. podev->registered = true;
  613. }
  614. } else {
  615. pqce->unit = podev->total_units;
  616. podev->total_units++;
  617. }
  618. mutex_unlock(&podev->register_lock);
  619. if (rc) {
  620. pr_err("ion: failed to register misc device.\n");
  621. goto err;
  622. }
  623. spin_lock_irqsave(&podev->lock, flags);
  624. list_add_tail(&pqce->qlist, &podev->qce_dev);
  625. spin_unlock_irqrestore(&podev->lock, flags);
  626. return 0;
  627. err:
  628. if (handle)
  629. qce_close(handle);
  630. platform_set_drvdata(pdev, NULL);
  631. tasklet_kill(&pqce->done_tasklet);
  632. exit_del_cdev:
  633. cdev_del(&podev->cdev);
  634. exit_destroy_device:
  635. device_destroy(driver_class, qcota_device_no);
  636. exit_destroy_class:
  637. class_destroy(driver_class);
  638. exit_unreg_chrdev_region:
  639. unregister_chrdev_region(qcota_device_no, 1);
  640. kfree(pqce);
  641. return rc;
  642. }
  643. static int qcota_remove(struct platform_device *pdev)
  644. {
  645. struct ota_dev_control *podev;
  646. struct ota_qce_dev *pqce;
  647. unsigned long flags;
  648. pqce = platform_get_drvdata(pdev);
  649. if (!pqce)
  650. return 0;
  651. if (pqce->qce)
  652. qce_close(pqce->qce);
  653. podev = pqce->podev;
  654. if (!podev)
  655. goto ret;
  656. spin_lock_irqsave(&podev->lock, flags);
  657. list_del(&pqce->qlist);
  658. spin_unlock_irqrestore(&podev->lock, flags);
  659. mutex_lock(&podev->register_lock);
  660. if (--podev->total_units == 0) {
  661. cdev_del(&podev->cdev);
  662. device_destroy(driver_class, qcota_device_no);
  663. class_destroy(driver_class);
  664. unregister_chrdev_region(qcota_device_no, 1);
  665. podev->registered = false;
  666. }
  667. mutex_unlock(&podev->register_lock);
  668. ret:
  669. tasklet_kill(&pqce->done_tasklet);
  670. kfree(pqce);
  671. return 0;
  672. }
  673. static const struct of_device_id qcota_match[] = {
  674. { .compatible = "qcom,qcota",
  675. },
  676. {}
  677. };
  678. static struct platform_driver qcota_plat_driver = {
  679. .probe = qcota_probe,
  680. .remove = qcota_remove,
  681. .driver = {
  682. .name = "qcota",
  683. .of_match_table = qcota_match,
  684. },
  685. };
  686. static int _disp_stats(void)
  687. {
  688. struct qcota_stat *pstat;
  689. int len = 0;
  690. struct ota_dev_control *podev = &qcota_dev;
  691. unsigned long flags;
  692. struct ota_qce_dev *p;
  693. pstat = &_qcota_stat;
  694. len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
  695. "\nQTI OTA crypto accelerator Statistics:\n");
  696. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  697. " F8 request : %llu\n",
  698. pstat->f8_req);
  699. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  700. " F8 operation success : %llu\n",
  701. pstat->f8_op_success);
  702. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  703. " F8 operation fail : %llu\n",
  704. pstat->f8_op_fail);
  705. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  706. " F8 MP request : %llu\n",
  707. pstat->f8_mp_req);
  708. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  709. " F8 MP operation success : %llu\n",
  710. pstat->f8_mp_op_success);
  711. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  712. " F8 MP operation fail : %llu\n",
  713. pstat->f8_mp_op_fail);
  714. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  715. " F8 Variable MP request : %llu\n",
  716. pstat->f8_v_mp_req);
  717. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  718. " F8 Variable MP operation success: %llu\n",
  719. pstat->f8_v_mp_op_success);
  720. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  721. " F8 Variable MP operation fail : %llu\n",
  722. pstat->f8_v_mp_op_fail);
  723. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  724. " F9 request : %llu\n",
  725. pstat->f9_req);
  726. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  727. " F9 operation success : %llu\n",
  728. pstat->f9_op_success);
  729. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  730. " F9 operation fail : %llu\n",
  731. pstat->f9_op_fail);
  732. spin_lock_irqsave(&podev->lock, flags);
  733. list_for_each_entry(p, &podev->qce_dev, qlist) {
  734. len += scnprintf(
  735. _debug_read_buf + len,
  736. DEBUG_MAX_RW_BUF - len - 1,
  737. " Engine %4d Req : %llu\n",
  738. p->unit,
  739. p->total_req
  740. );
  741. len += scnprintf(
  742. _debug_read_buf + len,
  743. DEBUG_MAX_RW_BUF - len - 1,
  744. " Engine %4d Req Error : %llu\n",
  745. p->unit,
  746. p->err_req
  747. );
  748. }
  749. spin_unlock_irqrestore(&podev->lock, flags);
  750. return len;
  751. }
  752. static ssize_t _debug_stats_read(struct file *file, char __user *buf,
  753. size_t count, loff_t *ppos)
  754. {
  755. int rc = -EINVAL;
  756. int len;
  757. len = _disp_stats();
  758. if (len <= count)
  759. rc = simple_read_from_buffer((void __user *) buf, len,
  760. ppos, (void *) _debug_read_buf, len);
  761. return rc;
  762. }
  763. static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
  764. size_t count, loff_t *ppos)
  765. {
  766. struct ota_dev_control *podev = &qcota_dev;
  767. unsigned long flags;
  768. struct ota_qce_dev *p;
  769. memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
  770. spin_lock_irqsave(&podev->lock, flags);
  771. list_for_each_entry(p, &podev->qce_dev, qlist) {
  772. p->total_req = 0;
  773. p->err_req = 0;
  774. }
  775. spin_unlock_irqrestore(&podev->lock, flags);
  776. return count;
  777. }
  778. static const struct file_operations _debug_stats_ops = {
  779. .open = simple_open,
  780. .read = _debug_stats_read,
  781. .write = _debug_stats_write,
  782. };
  783. static int _qcota_debug_init(void)
  784. {
  785. int rc;
  786. char name[DEBUG_MAX_FNAME];
  787. struct dentry *dent;
  788. _debug_dent = debugfs_create_dir("qcota", NULL);
  789. if (IS_ERR(_debug_dent)) {
  790. pr_err("qcota debugfs_create_dir fail, error %ld\n",
  791. PTR_ERR(_debug_dent));
  792. return PTR_ERR(_debug_dent);
  793. }
  794. snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
  795. _debug_qcota = 0;
  796. dent = debugfs_create_file(name, 0644, _debug_dent,
  797. &_debug_qcota, &_debug_stats_ops);
  798. if (dent == NULL) {
  799. pr_err("qcota debugfs_create_file fail, error %ld\n",
  800. PTR_ERR(dent));
  801. rc = PTR_ERR(dent);
  802. goto err;
  803. }
  804. return 0;
  805. err:
  806. debugfs_remove_recursive(_debug_dent);
  807. return rc;
  808. }
  809. static int __init qcota_init(void)
  810. {
  811. int rc;
  812. struct ota_dev_control *podev;
  813. rc = _qcota_debug_init();
  814. if (rc)
  815. return rc;
  816. podev = &qcota_dev;
  817. INIT_LIST_HEAD(&podev->ready_commands);
  818. INIT_LIST_HEAD(&podev->qce_dev);
  819. spin_lock_init(&podev->lock);
  820. mutex_init(&podev->register_lock);
  821. podev->registered = false;
  822. podev->total_units = 0;
  823. return platform_driver_register(&qcota_plat_driver);
  824. }
  825. static void __exit qcota_exit(void)
  826. {
  827. debugfs_remove_recursive(_debug_dent);
  828. platform_driver_unregister(&qcota_plat_driver);
  829. }
  830. MODULE_LICENSE("GPL v2");
  831. MODULE_DESCRIPTION("QTI Ota Crypto driver");
  832. module_init(qcota_init);
  833. module_exit(qcota_exit);