img-hash.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014 Imagination Technologies
  4. * Authors: Will Thomas, James Hartley
  5. *
  6. * Interface structure taken from omap-sham driver
  7. */
  8. #include <linux/clk.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/dmaengine.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/of_device.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/scatterlist.h>
  18. #include <crypto/internal/hash.h>
  19. #include <crypto/md5.h>
  20. #include <crypto/sha1.h>
  21. #include <crypto/sha2.h>
  22. #define CR_RESET 0
  23. #define CR_RESET_SET 1
  24. #define CR_RESET_UNSET 0
  25. #define CR_MESSAGE_LENGTH_H 0x4
  26. #define CR_MESSAGE_LENGTH_L 0x8
  27. #define CR_CONTROL 0xc
  28. #define CR_CONTROL_BYTE_ORDER_3210 0
  29. #define CR_CONTROL_BYTE_ORDER_0123 1
  30. #define CR_CONTROL_BYTE_ORDER_2310 2
  31. #define CR_CONTROL_BYTE_ORDER_1032 3
  32. #define CR_CONTROL_BYTE_ORDER_SHIFT 8
  33. #define CR_CONTROL_ALGO_MD5 0
  34. #define CR_CONTROL_ALGO_SHA1 1
  35. #define CR_CONTROL_ALGO_SHA224 2
  36. #define CR_CONTROL_ALGO_SHA256 3
  37. #define CR_INTSTAT 0x10
  38. #define CR_INTENAB 0x14
  39. #define CR_INTCLEAR 0x18
  40. #define CR_INT_RESULTS_AVAILABLE BIT(0)
  41. #define CR_INT_NEW_RESULTS_SET BIT(1)
  42. #define CR_INT_RESULT_READ_ERR BIT(2)
  43. #define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
  44. #define CR_INT_STATUS BIT(8)
  45. #define CR_RESULT_QUEUE 0x1c
  46. #define CR_RSD0 0x40
  47. #define CR_CORE_REV 0x50
  48. #define CR_CORE_DES1 0x60
  49. #define CR_CORE_DES2 0x70
  50. #define DRIVER_FLAGS_BUSY BIT(0)
  51. #define DRIVER_FLAGS_FINAL BIT(1)
  52. #define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
  53. #define DRIVER_FLAGS_OUTPUT_READY BIT(3)
  54. #define DRIVER_FLAGS_INIT BIT(4)
  55. #define DRIVER_FLAGS_CPU BIT(5)
  56. #define DRIVER_FLAGS_DMA_READY BIT(6)
  57. #define DRIVER_FLAGS_ERROR BIT(7)
  58. #define DRIVER_FLAGS_SG BIT(8)
  59. #define DRIVER_FLAGS_SHA1 BIT(18)
  60. #define DRIVER_FLAGS_SHA224 BIT(19)
  61. #define DRIVER_FLAGS_SHA256 BIT(20)
  62. #define DRIVER_FLAGS_MD5 BIT(21)
  63. #define IMG_HASH_QUEUE_LENGTH 20
  64. #define IMG_HASH_DMA_BURST 4
  65. #define IMG_HASH_DMA_THRESHOLD 64
  66. #ifdef __LITTLE_ENDIAN
  67. #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
  68. #else
  69. #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
  70. #endif
  71. struct img_hash_dev;
  72. struct img_hash_request_ctx {
  73. struct img_hash_dev *hdev;
  74. u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
  75. unsigned long flags;
  76. size_t digsize;
  77. dma_addr_t dma_addr;
  78. size_t dma_ct;
  79. /* sg root */
  80. struct scatterlist *sgfirst;
  81. /* walk state */
  82. struct scatterlist *sg;
  83. size_t nents;
  84. size_t offset;
  85. unsigned int total;
  86. size_t sent;
  87. unsigned long op;
  88. size_t bufcnt;
  89. struct ahash_request fallback_req;
  90. /* Zero length buffer must remain last member of struct */
  91. u8 buffer[] __aligned(sizeof(u32));
  92. };
  93. struct img_hash_ctx {
  94. struct img_hash_dev *hdev;
  95. unsigned long flags;
  96. struct crypto_ahash *fallback;
  97. };
  98. struct img_hash_dev {
  99. struct list_head list;
  100. struct device *dev;
  101. struct clk *hash_clk;
  102. struct clk *sys_clk;
  103. void __iomem *io_base;
  104. phys_addr_t bus_addr;
  105. void __iomem *cpu_addr;
  106. spinlock_t lock;
  107. int err;
  108. struct tasklet_struct done_task;
  109. struct tasklet_struct dma_task;
  110. unsigned long flags;
  111. struct crypto_queue queue;
  112. struct ahash_request *req;
  113. struct dma_chan *dma_lch;
  114. };
  115. struct img_hash_drv {
  116. struct list_head dev_list;
  117. spinlock_t lock;
  118. };
  119. static struct img_hash_drv img_hash = {
  120. .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
  121. .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
  122. };
  123. static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
  124. {
  125. return readl_relaxed(hdev->io_base + offset);
  126. }
  127. static inline void img_hash_write(struct img_hash_dev *hdev,
  128. u32 offset, u32 value)
  129. {
  130. writel_relaxed(value, hdev->io_base + offset);
  131. }
  132. static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
  133. {
  134. return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
  135. }
  136. static void img_hash_start(struct img_hash_dev *hdev, bool dma)
  137. {
  138. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  139. u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
  140. if (ctx->flags & DRIVER_FLAGS_MD5)
  141. cr |= CR_CONTROL_ALGO_MD5;
  142. else if (ctx->flags & DRIVER_FLAGS_SHA1)
  143. cr |= CR_CONTROL_ALGO_SHA1;
  144. else if (ctx->flags & DRIVER_FLAGS_SHA224)
  145. cr |= CR_CONTROL_ALGO_SHA224;
  146. else if (ctx->flags & DRIVER_FLAGS_SHA256)
  147. cr |= CR_CONTROL_ALGO_SHA256;
  148. dev_dbg(hdev->dev, "Starting hash process\n");
  149. img_hash_write(hdev, CR_CONTROL, cr);
  150. /*
  151. * The hardware block requires two cycles between writing the control
  152. * register and writing the first word of data in non DMA mode, to
  153. * ensure the first data write is not grouped in burst with the control
  154. * register write a read is issued to 'flush' the bus.
  155. */
  156. if (!dma)
  157. img_hash_read(hdev, CR_CONTROL);
  158. }
  159. static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
  160. size_t length, int final)
  161. {
  162. u32 count, len32;
  163. const u32 *buffer = (const u32 *)buf;
  164. dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length);
  165. if (final)
  166. hdev->flags |= DRIVER_FLAGS_FINAL;
  167. len32 = DIV_ROUND_UP(length, sizeof(u32));
  168. for (count = 0; count < len32; count++)
  169. writel_relaxed(buffer[count], hdev->cpu_addr);
  170. return -EINPROGRESS;
  171. }
  172. static void img_hash_dma_callback(void *data)
  173. {
  174. struct img_hash_dev *hdev = (struct img_hash_dev *)data;
  175. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  176. if (ctx->bufcnt) {
  177. img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
  178. ctx->bufcnt = 0;
  179. }
  180. if (ctx->sg)
  181. tasklet_schedule(&hdev->dma_task);
  182. }
  183. static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
  184. {
  185. struct dma_async_tx_descriptor *desc;
  186. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  187. ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
  188. if (ctx->dma_ct == 0) {
  189. dev_err(hdev->dev, "Invalid DMA sg\n");
  190. hdev->err = -EINVAL;
  191. return -EINVAL;
  192. }
  193. desc = dmaengine_prep_slave_sg(hdev->dma_lch,
  194. sg,
  195. ctx->dma_ct,
  196. DMA_MEM_TO_DEV,
  197. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  198. if (!desc) {
  199. dev_err(hdev->dev, "Null DMA descriptor\n");
  200. hdev->err = -EINVAL;
  201. dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
  202. return -EINVAL;
  203. }
  204. desc->callback = img_hash_dma_callback;
  205. desc->callback_param = hdev;
  206. dmaengine_submit(desc);
  207. dma_async_issue_pending(hdev->dma_lch);
  208. return 0;
  209. }
  210. static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
  211. {
  212. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  213. ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
  214. ctx->buffer, hdev->req->nbytes);
  215. ctx->total = hdev->req->nbytes;
  216. ctx->bufcnt = 0;
  217. hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
  218. img_hash_start(hdev, false);
  219. return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
  220. }
  221. static int img_hash_finish(struct ahash_request *req)
  222. {
  223. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  224. if (!req->result)
  225. return -EINVAL;
  226. memcpy(req->result, ctx->digest, ctx->digsize);
  227. return 0;
  228. }
  229. static void img_hash_copy_hash(struct ahash_request *req)
  230. {
  231. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  232. u32 *hash = (u32 *)ctx->digest;
  233. int i;
  234. for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
  235. hash[i] = img_hash_read_result_queue(ctx->hdev);
  236. }
  237. static void img_hash_finish_req(struct ahash_request *req, int err)
  238. {
  239. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  240. struct img_hash_dev *hdev = ctx->hdev;
  241. if (!err) {
  242. img_hash_copy_hash(req);
  243. if (DRIVER_FLAGS_FINAL & hdev->flags)
  244. err = img_hash_finish(req);
  245. } else {
  246. dev_warn(hdev->dev, "Hash failed with error %d\n", err);
  247. ctx->flags |= DRIVER_FLAGS_ERROR;
  248. }
  249. hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
  250. DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
  251. if (req->base.complete)
  252. req->base.complete(&req->base, err);
  253. }
  254. static int img_hash_write_via_dma(struct img_hash_dev *hdev)
  255. {
  256. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  257. img_hash_start(hdev, true);
  258. dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
  259. if (!ctx->total)
  260. hdev->flags |= DRIVER_FLAGS_FINAL;
  261. hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
  262. tasklet_schedule(&hdev->dma_task);
  263. return -EINPROGRESS;
  264. }
  265. static int img_hash_dma_init(struct img_hash_dev *hdev)
  266. {
  267. struct dma_slave_config dma_conf;
  268. int err;
  269. hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
  270. if (IS_ERR(hdev->dma_lch)) {
  271. dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
  272. return PTR_ERR(hdev->dma_lch);
  273. }
  274. dma_conf.direction = DMA_MEM_TO_DEV;
  275. dma_conf.dst_addr = hdev->bus_addr;
  276. dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  277. dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
  278. dma_conf.device_fc = false;
  279. err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
  280. if (err) {
  281. dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
  282. dma_release_channel(hdev->dma_lch);
  283. return err;
  284. }
  285. return 0;
  286. }
  287. static void img_hash_dma_task(unsigned long d)
  288. {
  289. struct img_hash_dev *hdev = (struct img_hash_dev *)d;
  290. struct img_hash_request_ctx *ctx;
  291. u8 *addr;
  292. size_t nbytes, bleft, wsend, len, tbc;
  293. struct scatterlist tsg;
  294. if (!hdev->req)
  295. return;
  296. ctx = ahash_request_ctx(hdev->req);
  297. if (!ctx->sg)
  298. return;
  299. addr = sg_virt(ctx->sg);
  300. nbytes = ctx->sg->length - ctx->offset;
  301. /*
  302. * The hash accelerator does not support a data valid mask. This means
  303. * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
  304. * padding bytes in the last word written by that dma would erroneously
  305. * be included in the hash. To avoid this we round down the transfer,
  306. * and add the excess to the start of the next dma. It does not matter
  307. * that the final dma may not be a multiple of 4 bytes as the hashing
  308. * block is programmed to accept the correct number of bytes.
  309. */
  310. bleft = nbytes % 4;
  311. wsend = (nbytes / 4);
  312. if (wsend) {
  313. sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
  314. if (img_hash_xmit_dma(hdev, &tsg)) {
  315. dev_err(hdev->dev, "DMA failed, falling back to CPU");
  316. ctx->flags |= DRIVER_FLAGS_CPU;
  317. hdev->err = 0;
  318. img_hash_xmit_cpu(hdev, addr + ctx->offset,
  319. wsend * 4, 0);
  320. ctx->sent += wsend * 4;
  321. wsend = 0;
  322. } else {
  323. ctx->sent += wsend * 4;
  324. }
  325. }
  326. if (bleft) {
  327. ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
  328. ctx->buffer, bleft, ctx->sent);
  329. tbc = 0;
  330. ctx->sg = sg_next(ctx->sg);
  331. while (ctx->sg && (ctx->bufcnt < 4)) {
  332. len = ctx->sg->length;
  333. if (likely(len > (4 - ctx->bufcnt)))
  334. len = 4 - ctx->bufcnt;
  335. tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
  336. ctx->buffer + ctx->bufcnt, len,
  337. ctx->sent + ctx->bufcnt);
  338. ctx->bufcnt += tbc;
  339. if (tbc >= ctx->sg->length) {
  340. ctx->sg = sg_next(ctx->sg);
  341. tbc = 0;
  342. }
  343. }
  344. ctx->sent += ctx->bufcnt;
  345. ctx->offset = tbc;
  346. if (!wsend)
  347. img_hash_dma_callback(hdev);
  348. } else {
  349. ctx->offset = 0;
  350. ctx->sg = sg_next(ctx->sg);
  351. }
  352. }
  353. static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
  354. {
  355. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  356. if (ctx->flags & DRIVER_FLAGS_SG)
  357. dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
  358. return 0;
  359. }
  360. static int img_hash_process_data(struct img_hash_dev *hdev)
  361. {
  362. struct ahash_request *req = hdev->req;
  363. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  364. int err = 0;
  365. ctx->bufcnt = 0;
  366. if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
  367. dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
  368. req->nbytes);
  369. err = img_hash_write_via_dma(hdev);
  370. } else {
  371. dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
  372. req->nbytes);
  373. err = img_hash_write_via_cpu(hdev);
  374. }
  375. return err;
  376. }
  377. static int img_hash_hw_init(struct img_hash_dev *hdev)
  378. {
  379. unsigned long long nbits;
  380. u32 u, l;
  381. img_hash_write(hdev, CR_RESET, CR_RESET_SET);
  382. img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
  383. img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
  384. nbits = (u64)hdev->req->nbytes << 3;
  385. u = nbits >> 32;
  386. l = nbits;
  387. img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
  388. img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
  389. if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
  390. hdev->flags |= DRIVER_FLAGS_INIT;
  391. hdev->err = 0;
  392. }
  393. dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
  394. return 0;
  395. }
  396. static int img_hash_init(struct ahash_request *req)
  397. {
  398. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  399. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  400. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  401. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  402. rctx->fallback_req.base.flags = req->base.flags
  403. & CRYPTO_TFM_REQ_MAY_SLEEP;
  404. return crypto_ahash_init(&rctx->fallback_req);
  405. }
  406. static int img_hash_handle_queue(struct img_hash_dev *hdev,
  407. struct ahash_request *req)
  408. {
  409. struct crypto_async_request *async_req, *backlog;
  410. struct img_hash_request_ctx *ctx;
  411. unsigned long flags;
  412. int err = 0, res = 0;
  413. spin_lock_irqsave(&hdev->lock, flags);
  414. if (req)
  415. res = ahash_enqueue_request(&hdev->queue, req);
  416. if (DRIVER_FLAGS_BUSY & hdev->flags) {
  417. spin_unlock_irqrestore(&hdev->lock, flags);
  418. return res;
  419. }
  420. backlog = crypto_get_backlog(&hdev->queue);
  421. async_req = crypto_dequeue_request(&hdev->queue);
  422. if (async_req)
  423. hdev->flags |= DRIVER_FLAGS_BUSY;
  424. spin_unlock_irqrestore(&hdev->lock, flags);
  425. if (!async_req)
  426. return res;
  427. if (backlog)
  428. backlog->complete(backlog, -EINPROGRESS);
  429. req = ahash_request_cast(async_req);
  430. hdev->req = req;
  431. ctx = ahash_request_ctx(req);
  432. dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
  433. ctx->op, req->nbytes);
  434. err = img_hash_hw_init(hdev);
  435. if (!err)
  436. err = img_hash_process_data(hdev);
  437. if (err != -EINPROGRESS) {
  438. /* done_task will not finish so do it here */
  439. img_hash_finish_req(req, err);
  440. }
  441. return res;
  442. }
  443. static int img_hash_update(struct ahash_request *req)
  444. {
  445. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  446. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  447. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  448. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  449. rctx->fallback_req.base.flags = req->base.flags
  450. & CRYPTO_TFM_REQ_MAY_SLEEP;
  451. rctx->fallback_req.nbytes = req->nbytes;
  452. rctx->fallback_req.src = req->src;
  453. return crypto_ahash_update(&rctx->fallback_req);
  454. }
  455. static int img_hash_final(struct ahash_request *req)
  456. {
  457. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  458. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  459. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  460. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  461. rctx->fallback_req.base.flags = req->base.flags
  462. & CRYPTO_TFM_REQ_MAY_SLEEP;
  463. rctx->fallback_req.result = req->result;
  464. return crypto_ahash_final(&rctx->fallback_req);
  465. }
  466. static int img_hash_finup(struct ahash_request *req)
  467. {
  468. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  469. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  470. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  471. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  472. rctx->fallback_req.base.flags = req->base.flags
  473. & CRYPTO_TFM_REQ_MAY_SLEEP;
  474. rctx->fallback_req.nbytes = req->nbytes;
  475. rctx->fallback_req.src = req->src;
  476. rctx->fallback_req.result = req->result;
  477. return crypto_ahash_finup(&rctx->fallback_req);
  478. }
  479. static int img_hash_import(struct ahash_request *req, const void *in)
  480. {
  481. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  482. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  483. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  484. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  485. rctx->fallback_req.base.flags = req->base.flags
  486. & CRYPTO_TFM_REQ_MAY_SLEEP;
  487. return crypto_ahash_import(&rctx->fallback_req, in);
  488. }
  489. static int img_hash_export(struct ahash_request *req, void *out)
  490. {
  491. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  492. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  493. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  494. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  495. rctx->fallback_req.base.flags = req->base.flags
  496. & CRYPTO_TFM_REQ_MAY_SLEEP;
  497. return crypto_ahash_export(&rctx->fallback_req, out);
  498. }
  499. static int img_hash_digest(struct ahash_request *req)
  500. {
  501. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  502. struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  503. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  504. struct img_hash_dev *hdev = NULL;
  505. struct img_hash_dev *tmp;
  506. int err;
  507. spin_lock(&img_hash.lock);
  508. if (!tctx->hdev) {
  509. list_for_each_entry(tmp, &img_hash.dev_list, list) {
  510. hdev = tmp;
  511. break;
  512. }
  513. tctx->hdev = hdev;
  514. } else {
  515. hdev = tctx->hdev;
  516. }
  517. spin_unlock(&img_hash.lock);
  518. ctx->hdev = hdev;
  519. ctx->flags = 0;
  520. ctx->digsize = crypto_ahash_digestsize(tfm);
  521. switch (ctx->digsize) {
  522. case SHA1_DIGEST_SIZE:
  523. ctx->flags |= DRIVER_FLAGS_SHA1;
  524. break;
  525. case SHA256_DIGEST_SIZE:
  526. ctx->flags |= DRIVER_FLAGS_SHA256;
  527. break;
  528. case SHA224_DIGEST_SIZE:
  529. ctx->flags |= DRIVER_FLAGS_SHA224;
  530. break;
  531. case MD5_DIGEST_SIZE:
  532. ctx->flags |= DRIVER_FLAGS_MD5;
  533. break;
  534. default:
  535. return -EINVAL;
  536. }
  537. ctx->bufcnt = 0;
  538. ctx->offset = 0;
  539. ctx->sent = 0;
  540. ctx->total = req->nbytes;
  541. ctx->sg = req->src;
  542. ctx->sgfirst = req->src;
  543. ctx->nents = sg_nents(ctx->sg);
  544. err = img_hash_handle_queue(tctx->hdev, req);
  545. return err;
  546. }
  547. static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
  548. {
  549. struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  550. ctx->fallback = crypto_alloc_ahash(alg_name, 0,
  551. CRYPTO_ALG_NEED_FALLBACK);
  552. if (IS_ERR(ctx->fallback)) {
  553. pr_err("img_hash: Could not load fallback driver.\n");
  554. return PTR_ERR(ctx->fallback);
  555. }
  556. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  557. sizeof(struct img_hash_request_ctx) +
  558. crypto_ahash_reqsize(ctx->fallback) +
  559. IMG_HASH_DMA_THRESHOLD);
  560. return 0;
  561. }
  562. static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
  563. {
  564. return img_hash_cra_init(tfm, "md5-generic");
  565. }
  566. static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
  567. {
  568. return img_hash_cra_init(tfm, "sha1-generic");
  569. }
  570. static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
  571. {
  572. return img_hash_cra_init(tfm, "sha224-generic");
  573. }
  574. static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
  575. {
  576. return img_hash_cra_init(tfm, "sha256-generic");
  577. }
  578. static void img_hash_cra_exit(struct crypto_tfm *tfm)
  579. {
  580. struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  581. crypto_free_ahash(tctx->fallback);
  582. }
  583. static irqreturn_t img_irq_handler(int irq, void *dev_id)
  584. {
  585. struct img_hash_dev *hdev = dev_id;
  586. u32 reg;
  587. reg = img_hash_read(hdev, CR_INTSTAT);
  588. img_hash_write(hdev, CR_INTCLEAR, reg);
  589. if (reg & CR_INT_NEW_RESULTS_SET) {
  590. dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
  591. if (DRIVER_FLAGS_BUSY & hdev->flags) {
  592. hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
  593. if (!(DRIVER_FLAGS_CPU & hdev->flags))
  594. hdev->flags |= DRIVER_FLAGS_DMA_READY;
  595. tasklet_schedule(&hdev->done_task);
  596. } else {
  597. dev_warn(hdev->dev,
  598. "HASH interrupt when no active requests.\n");
  599. }
  600. } else if (reg & CR_INT_RESULTS_AVAILABLE) {
  601. dev_warn(hdev->dev,
  602. "IRQ triggered before the hash had completed\n");
  603. } else if (reg & CR_INT_RESULT_READ_ERR) {
  604. dev_warn(hdev->dev,
  605. "Attempt to read from an empty result queue\n");
  606. } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
  607. dev_warn(hdev->dev,
  608. "Data written before the hardware was configured\n");
  609. }
  610. return IRQ_HANDLED;
  611. }
  612. static struct ahash_alg img_algs[] = {
  613. {
  614. .init = img_hash_init,
  615. .update = img_hash_update,
  616. .final = img_hash_final,
  617. .finup = img_hash_finup,
  618. .export = img_hash_export,
  619. .import = img_hash_import,
  620. .digest = img_hash_digest,
  621. .halg = {
  622. .digestsize = MD5_DIGEST_SIZE,
  623. .statesize = sizeof(struct md5_state),
  624. .base = {
  625. .cra_name = "md5",
  626. .cra_driver_name = "img-md5",
  627. .cra_priority = 300,
  628. .cra_flags =
  629. CRYPTO_ALG_ASYNC |
  630. CRYPTO_ALG_NEED_FALLBACK,
  631. .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
  632. .cra_ctxsize = sizeof(struct img_hash_ctx),
  633. .cra_init = img_hash_cra_md5_init,
  634. .cra_exit = img_hash_cra_exit,
  635. .cra_module = THIS_MODULE,
  636. }
  637. }
  638. },
  639. {
  640. .init = img_hash_init,
  641. .update = img_hash_update,
  642. .final = img_hash_final,
  643. .finup = img_hash_finup,
  644. .export = img_hash_export,
  645. .import = img_hash_import,
  646. .digest = img_hash_digest,
  647. .halg = {
  648. .digestsize = SHA1_DIGEST_SIZE,
  649. .statesize = sizeof(struct sha1_state),
  650. .base = {
  651. .cra_name = "sha1",
  652. .cra_driver_name = "img-sha1",
  653. .cra_priority = 300,
  654. .cra_flags =
  655. CRYPTO_ALG_ASYNC |
  656. CRYPTO_ALG_NEED_FALLBACK,
  657. .cra_blocksize = SHA1_BLOCK_SIZE,
  658. .cra_ctxsize = sizeof(struct img_hash_ctx),
  659. .cra_init = img_hash_cra_sha1_init,
  660. .cra_exit = img_hash_cra_exit,
  661. .cra_module = THIS_MODULE,
  662. }
  663. }
  664. },
  665. {
  666. .init = img_hash_init,
  667. .update = img_hash_update,
  668. .final = img_hash_final,
  669. .finup = img_hash_finup,
  670. .export = img_hash_export,
  671. .import = img_hash_import,
  672. .digest = img_hash_digest,
  673. .halg = {
  674. .digestsize = SHA224_DIGEST_SIZE,
  675. .statesize = sizeof(struct sha256_state),
  676. .base = {
  677. .cra_name = "sha224",
  678. .cra_driver_name = "img-sha224",
  679. .cra_priority = 300,
  680. .cra_flags =
  681. CRYPTO_ALG_ASYNC |
  682. CRYPTO_ALG_NEED_FALLBACK,
  683. .cra_blocksize = SHA224_BLOCK_SIZE,
  684. .cra_ctxsize = sizeof(struct img_hash_ctx),
  685. .cra_init = img_hash_cra_sha224_init,
  686. .cra_exit = img_hash_cra_exit,
  687. .cra_module = THIS_MODULE,
  688. }
  689. }
  690. },
  691. {
  692. .init = img_hash_init,
  693. .update = img_hash_update,
  694. .final = img_hash_final,
  695. .finup = img_hash_finup,
  696. .export = img_hash_export,
  697. .import = img_hash_import,
  698. .digest = img_hash_digest,
  699. .halg = {
  700. .digestsize = SHA256_DIGEST_SIZE,
  701. .statesize = sizeof(struct sha256_state),
  702. .base = {
  703. .cra_name = "sha256",
  704. .cra_driver_name = "img-sha256",
  705. .cra_priority = 300,
  706. .cra_flags =
  707. CRYPTO_ALG_ASYNC |
  708. CRYPTO_ALG_NEED_FALLBACK,
  709. .cra_blocksize = SHA256_BLOCK_SIZE,
  710. .cra_ctxsize = sizeof(struct img_hash_ctx),
  711. .cra_init = img_hash_cra_sha256_init,
  712. .cra_exit = img_hash_cra_exit,
  713. .cra_module = THIS_MODULE,
  714. }
  715. }
  716. }
  717. };
  718. static int img_register_algs(struct img_hash_dev *hdev)
  719. {
  720. int i, err;
  721. for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
  722. err = crypto_register_ahash(&img_algs[i]);
  723. if (err)
  724. goto err_reg;
  725. }
  726. return 0;
  727. err_reg:
  728. for (; i--; )
  729. crypto_unregister_ahash(&img_algs[i]);
  730. return err;
  731. }
  732. static int img_unregister_algs(struct img_hash_dev *hdev)
  733. {
  734. int i;
  735. for (i = 0; i < ARRAY_SIZE(img_algs); i++)
  736. crypto_unregister_ahash(&img_algs[i]);
  737. return 0;
  738. }
  739. static void img_hash_done_task(unsigned long data)
  740. {
  741. struct img_hash_dev *hdev = (struct img_hash_dev *)data;
  742. int err = 0;
  743. if (hdev->err == -EINVAL) {
  744. err = hdev->err;
  745. goto finish;
  746. }
  747. if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
  748. img_hash_handle_queue(hdev, NULL);
  749. return;
  750. }
  751. if (DRIVER_FLAGS_CPU & hdev->flags) {
  752. if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
  753. hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
  754. goto finish;
  755. }
  756. } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
  757. if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
  758. hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
  759. img_hash_write_via_dma_stop(hdev);
  760. if (hdev->err) {
  761. err = hdev->err;
  762. goto finish;
  763. }
  764. }
  765. if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
  766. hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
  767. DRIVER_FLAGS_OUTPUT_READY);
  768. goto finish;
  769. }
  770. }
  771. return;
  772. finish:
  773. img_hash_finish_req(hdev->req, err);
  774. }
  775. static const struct of_device_id img_hash_match[] = {
  776. { .compatible = "img,hash-accelerator" },
  777. {}
  778. };
  779. MODULE_DEVICE_TABLE(of, img_hash_match);
  780. static int img_hash_probe(struct platform_device *pdev)
  781. {
  782. struct img_hash_dev *hdev;
  783. struct device *dev = &pdev->dev;
  784. struct resource *hash_res;
  785. int irq;
  786. int err;
  787. hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
  788. if (hdev == NULL)
  789. return -ENOMEM;
  790. spin_lock_init(&hdev->lock);
  791. hdev->dev = dev;
  792. platform_set_drvdata(pdev, hdev);
  793. INIT_LIST_HEAD(&hdev->list);
  794. tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
  795. tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
  796. crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
  797. /* Register bank */
  798. hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
  799. if (IS_ERR(hdev->io_base)) {
  800. err = PTR_ERR(hdev->io_base);
  801. goto res_err;
  802. }
  803. /* Write port (DMA or CPU) */
  804. hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  805. hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
  806. if (IS_ERR(hdev->cpu_addr)) {
  807. err = PTR_ERR(hdev->cpu_addr);
  808. goto res_err;
  809. }
  810. hdev->bus_addr = hash_res->start;
  811. irq = platform_get_irq(pdev, 0);
  812. if (irq < 0) {
  813. err = irq;
  814. goto res_err;
  815. }
  816. err = devm_request_irq(dev, irq, img_irq_handler, 0,
  817. dev_name(dev), hdev);
  818. if (err) {
  819. dev_err(dev, "unable to request irq\n");
  820. goto res_err;
  821. }
  822. dev_dbg(dev, "using IRQ channel %d\n", irq);
  823. hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
  824. if (IS_ERR(hdev->hash_clk)) {
  825. dev_err(dev, "clock initialization failed.\n");
  826. err = PTR_ERR(hdev->hash_clk);
  827. goto res_err;
  828. }
  829. hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
  830. if (IS_ERR(hdev->sys_clk)) {
  831. dev_err(dev, "clock initialization failed.\n");
  832. err = PTR_ERR(hdev->sys_clk);
  833. goto res_err;
  834. }
  835. err = clk_prepare_enable(hdev->hash_clk);
  836. if (err)
  837. goto res_err;
  838. err = clk_prepare_enable(hdev->sys_clk);
  839. if (err)
  840. goto clk_err;
  841. err = img_hash_dma_init(hdev);
  842. if (err)
  843. goto dma_err;
  844. dev_dbg(dev, "using %s for DMA transfers\n",
  845. dma_chan_name(hdev->dma_lch));
  846. spin_lock(&img_hash.lock);
  847. list_add_tail(&hdev->list, &img_hash.dev_list);
  848. spin_unlock(&img_hash.lock);
  849. err = img_register_algs(hdev);
  850. if (err)
  851. goto err_algs;
  852. dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
  853. return 0;
  854. err_algs:
  855. spin_lock(&img_hash.lock);
  856. list_del(&hdev->list);
  857. spin_unlock(&img_hash.lock);
  858. dma_release_channel(hdev->dma_lch);
  859. dma_err:
  860. clk_disable_unprepare(hdev->sys_clk);
  861. clk_err:
  862. clk_disable_unprepare(hdev->hash_clk);
  863. res_err:
  864. tasklet_kill(&hdev->done_task);
  865. tasklet_kill(&hdev->dma_task);
  866. return err;
  867. }
  868. static int img_hash_remove(struct platform_device *pdev)
  869. {
  870. struct img_hash_dev *hdev;
  871. hdev = platform_get_drvdata(pdev);
  872. spin_lock(&img_hash.lock);
  873. list_del(&hdev->list);
  874. spin_unlock(&img_hash.lock);
  875. img_unregister_algs(hdev);
  876. tasklet_kill(&hdev->done_task);
  877. tasklet_kill(&hdev->dma_task);
  878. dma_release_channel(hdev->dma_lch);
  879. clk_disable_unprepare(hdev->hash_clk);
  880. clk_disable_unprepare(hdev->sys_clk);
  881. return 0;
  882. }
  883. #ifdef CONFIG_PM_SLEEP
  884. static int img_hash_suspend(struct device *dev)
  885. {
  886. struct img_hash_dev *hdev = dev_get_drvdata(dev);
  887. clk_disable_unprepare(hdev->hash_clk);
  888. clk_disable_unprepare(hdev->sys_clk);
  889. return 0;
  890. }
  891. static int img_hash_resume(struct device *dev)
  892. {
  893. struct img_hash_dev *hdev = dev_get_drvdata(dev);
  894. int ret;
  895. ret = clk_prepare_enable(hdev->hash_clk);
  896. if (ret)
  897. return ret;
  898. ret = clk_prepare_enable(hdev->sys_clk);
  899. if (ret) {
  900. clk_disable_unprepare(hdev->hash_clk);
  901. return ret;
  902. }
  903. return 0;
  904. }
  905. #endif /* CONFIG_PM_SLEEP */
  906. static const struct dev_pm_ops img_hash_pm_ops = {
  907. SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
  908. };
  909. static struct platform_driver img_hash_driver = {
  910. .probe = img_hash_probe,
  911. .remove = img_hash_remove,
  912. .driver = {
  913. .name = "img-hash-accelerator",
  914. .pm = &img_hash_pm_ops,
  915. .of_match_table = of_match_ptr(img_hash_match),
  916. }
  917. };
  918. module_platform_driver(img_hash_driver);
  919. MODULE_LICENSE("GPL v2");
  920. MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
  921. MODULE_AUTHOR("Will Thomas.");
  922. MODULE_AUTHOR("James Hartley <[email protected]>");