atmel-tdes.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Cryptographic API.
  4. *
  5. * Support for ATMEL DES/TDES HW acceleration.
  6. *
  7. * Copyright (c) 2012 Eukréa Electromatique - ATMEL
  8. * Author: Nicolas Royer <[email protected]>
  9. *
  10. * Some ideas are from omap-aes.c drivers.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/slab.h>
  15. #include <linux/err.h>
  16. #include <linux/clk.h>
  17. #include <linux/io.h>
  18. #include <linux/hw_random.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/device.h>
  21. #include <linux/dmaengine.h>
  22. #include <linux/init.h>
  23. #include <linux/errno.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/irq.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/of_device.h>
  29. #include <linux/delay.h>
  30. #include <linux/crypto.h>
  31. #include <crypto/scatterwalk.h>
  32. #include <crypto/algapi.h>
  33. #include <crypto/internal/des.h>
  34. #include <crypto/internal/skcipher.h>
  35. #include "atmel-tdes-regs.h"
  36. #define ATMEL_TDES_PRIORITY 300
  37. /* TDES flags */
  38. /* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
  39. #define TDES_FLAGS_ENCRYPT TDES_MR_CYPHER_ENC
  40. #define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
  41. #define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
  42. #define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
  43. #define TDES_FLAGS_OFB TDES_MR_OPMOD_OFB
  44. #define TDES_FLAGS_CFB64 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
  45. #define TDES_FLAGS_CFB32 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
  46. #define TDES_FLAGS_CFB16 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
  47. #define TDES_FLAGS_CFB8 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
  48. #define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
  49. #define TDES_FLAGS_INIT BIT(3)
  50. #define TDES_FLAGS_FAST BIT(4)
  51. #define TDES_FLAGS_BUSY BIT(5)
  52. #define TDES_FLAGS_DMA BIT(6)
  53. #define ATMEL_TDES_QUEUE_LENGTH 50
  54. #define CFB8_BLOCK_SIZE 1
  55. #define CFB16_BLOCK_SIZE 2
  56. #define CFB32_BLOCK_SIZE 4
  57. struct atmel_tdes_caps {
  58. bool has_dma;
  59. u32 has_cfb_3keys;
  60. };
  61. struct atmel_tdes_dev;
  62. struct atmel_tdes_ctx {
  63. struct atmel_tdes_dev *dd;
  64. int keylen;
  65. u32 key[DES3_EDE_KEY_SIZE / sizeof(u32)];
  66. unsigned long flags;
  67. u16 block_size;
  68. };
  69. struct atmel_tdes_reqctx {
  70. unsigned long mode;
  71. u8 lastc[DES_BLOCK_SIZE];
  72. };
  73. struct atmel_tdes_dma {
  74. struct dma_chan *chan;
  75. struct dma_slave_config dma_conf;
  76. };
  77. struct atmel_tdes_dev {
  78. struct list_head list;
  79. unsigned long phys_base;
  80. void __iomem *io_base;
  81. struct atmel_tdes_ctx *ctx;
  82. struct device *dev;
  83. struct clk *iclk;
  84. int irq;
  85. unsigned long flags;
  86. spinlock_t lock;
  87. struct crypto_queue queue;
  88. struct tasklet_struct done_task;
  89. struct tasklet_struct queue_task;
  90. struct skcipher_request *req;
  91. size_t total;
  92. struct scatterlist *in_sg;
  93. unsigned int nb_in_sg;
  94. size_t in_offset;
  95. struct scatterlist *out_sg;
  96. unsigned int nb_out_sg;
  97. size_t out_offset;
  98. size_t buflen;
  99. size_t dma_size;
  100. void *buf_in;
  101. int dma_in;
  102. dma_addr_t dma_addr_in;
  103. struct atmel_tdes_dma dma_lch_in;
  104. void *buf_out;
  105. int dma_out;
  106. dma_addr_t dma_addr_out;
  107. struct atmel_tdes_dma dma_lch_out;
  108. struct atmel_tdes_caps caps;
  109. u32 hw_version;
  110. };
  111. struct atmel_tdes_drv {
  112. struct list_head dev_list;
  113. spinlock_t lock;
  114. };
  115. static struct atmel_tdes_drv atmel_tdes = {
  116. .dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
  117. .lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
  118. };
  119. static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
  120. void *buf, size_t buflen, size_t total, int out)
  121. {
  122. size_t count, off = 0;
  123. while (buflen && total) {
  124. count = min((*sg)->length - *offset, total);
  125. count = min(count, buflen);
  126. if (!count)
  127. return off;
  128. scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
  129. off += count;
  130. buflen -= count;
  131. *offset += count;
  132. total -= count;
  133. if (*offset == (*sg)->length) {
  134. *sg = sg_next(*sg);
  135. if (*sg)
  136. *offset = 0;
  137. else
  138. total = 0;
  139. }
  140. }
  141. return off;
  142. }
  143. static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
  144. {
  145. return readl_relaxed(dd->io_base + offset);
  146. }
  147. static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
  148. u32 offset, u32 value)
  149. {
  150. writel_relaxed(value, dd->io_base + offset);
  151. }
  152. static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
  153. const u32 *value, int count)
  154. {
  155. for (; count--; value++, offset += 4)
  156. atmel_tdes_write(dd, offset, *value);
  157. }
  158. static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void)
  159. {
  160. struct atmel_tdes_dev *tdes_dd;
  161. spin_lock_bh(&atmel_tdes.lock);
  162. /* One TDES IP per SoC. */
  163. tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list,
  164. struct atmel_tdes_dev, list);
  165. spin_unlock_bh(&atmel_tdes.lock);
  166. return tdes_dd;
  167. }
  168. static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
  169. {
  170. int err;
  171. err = clk_prepare_enable(dd->iclk);
  172. if (err)
  173. return err;
  174. if (!(dd->flags & TDES_FLAGS_INIT)) {
  175. atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
  176. dd->flags |= TDES_FLAGS_INIT;
  177. }
  178. return 0;
  179. }
  180. static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
  181. {
  182. return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
  183. }
  184. static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
  185. {
  186. int err;
  187. err = atmel_tdes_hw_init(dd);
  188. if (err)
  189. return err;
  190. dd->hw_version = atmel_tdes_get_version(dd);
  191. dev_info(dd->dev,
  192. "version: 0x%x\n", dd->hw_version);
  193. clk_disable_unprepare(dd->iclk);
  194. return 0;
  195. }
  196. static void atmel_tdes_dma_callback(void *data)
  197. {
  198. struct atmel_tdes_dev *dd = data;
  199. /* dma_lch_out - completed */
  200. tasklet_schedule(&dd->done_task);
  201. }
  202. static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
  203. {
  204. int err;
  205. u32 valmr = TDES_MR_SMOD_PDC;
  206. err = atmel_tdes_hw_init(dd);
  207. if (err)
  208. return err;
  209. if (!dd->caps.has_dma)
  210. atmel_tdes_write(dd, TDES_PTCR,
  211. TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
  212. /* MR register must be set before IV registers */
  213. if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
  214. valmr |= TDES_MR_KEYMOD_3KEY;
  215. valmr |= TDES_MR_TDESMOD_TDES;
  216. } else if (dd->ctx->keylen > DES_KEY_SIZE) {
  217. valmr |= TDES_MR_KEYMOD_2KEY;
  218. valmr |= TDES_MR_TDESMOD_TDES;
  219. } else {
  220. valmr |= TDES_MR_TDESMOD_DES;
  221. }
  222. valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
  223. atmel_tdes_write(dd, TDES_MR, valmr);
  224. atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
  225. dd->ctx->keylen >> 2);
  226. if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
  227. atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
  228. return 0;
  229. }
  230. static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
  231. {
  232. int err = 0;
  233. size_t count;
  234. atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
  235. if (dd->flags & TDES_FLAGS_FAST) {
  236. dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
  237. dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  238. } else {
  239. dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
  240. dd->dma_size, DMA_FROM_DEVICE);
  241. /* copy data */
  242. count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
  243. dd->buf_out, dd->buflen, dd->dma_size, 1);
  244. if (count != dd->dma_size) {
  245. err = -EINVAL;
  246. dev_dbg(dd->dev, "not all data converted: %zu\n", count);
  247. }
  248. }
  249. return err;
  250. }
  251. static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
  252. {
  253. int err = -ENOMEM;
  254. dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
  255. dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
  256. dd->buflen = PAGE_SIZE;
  257. dd->buflen &= ~(DES_BLOCK_SIZE - 1);
  258. if (!dd->buf_in || !dd->buf_out) {
  259. dev_dbg(dd->dev, "unable to alloc pages.\n");
  260. goto err_alloc;
  261. }
  262. /* MAP here */
  263. dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
  264. dd->buflen, DMA_TO_DEVICE);
  265. err = dma_mapping_error(dd->dev, dd->dma_addr_in);
  266. if (err) {
  267. dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
  268. goto err_map_in;
  269. }
  270. dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
  271. dd->buflen, DMA_FROM_DEVICE);
  272. err = dma_mapping_error(dd->dev, dd->dma_addr_out);
  273. if (err) {
  274. dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
  275. goto err_map_out;
  276. }
  277. return 0;
  278. err_map_out:
  279. dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
  280. DMA_TO_DEVICE);
  281. err_map_in:
  282. err_alloc:
  283. free_page((unsigned long)dd->buf_out);
  284. free_page((unsigned long)dd->buf_in);
  285. return err;
  286. }
  287. static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
  288. {
  289. dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
  290. DMA_FROM_DEVICE);
  291. dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
  292. DMA_TO_DEVICE);
  293. free_page((unsigned long)dd->buf_out);
  294. free_page((unsigned long)dd->buf_in);
  295. }
  296. static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
  297. dma_addr_t dma_addr_in,
  298. dma_addr_t dma_addr_out, int length)
  299. {
  300. struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
  301. int len32;
  302. dd->dma_size = length;
  303. if (!(dd->flags & TDES_FLAGS_FAST)) {
  304. dma_sync_single_for_device(dd->dev, dma_addr_in, length,
  305. DMA_TO_DEVICE);
  306. }
  307. switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
  308. case TDES_FLAGS_CFB8:
  309. len32 = DIV_ROUND_UP(length, sizeof(u8));
  310. break;
  311. case TDES_FLAGS_CFB16:
  312. len32 = DIV_ROUND_UP(length, sizeof(u16));
  313. break;
  314. default:
  315. len32 = DIV_ROUND_UP(length, sizeof(u32));
  316. break;
  317. }
  318. atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
  319. atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
  320. atmel_tdes_write(dd, TDES_TCR, len32);
  321. atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
  322. atmel_tdes_write(dd, TDES_RCR, len32);
  323. /* Enable Interrupt */
  324. atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
  325. /* Start DMA transfer */
  326. atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
  327. return 0;
  328. }
  329. static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
  330. dma_addr_t dma_addr_in,
  331. dma_addr_t dma_addr_out, int length)
  332. {
  333. struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
  334. struct scatterlist sg[2];
  335. struct dma_async_tx_descriptor *in_desc, *out_desc;
  336. enum dma_slave_buswidth addr_width;
  337. dd->dma_size = length;
  338. if (!(dd->flags & TDES_FLAGS_FAST)) {
  339. dma_sync_single_for_device(dd->dev, dma_addr_in, length,
  340. DMA_TO_DEVICE);
  341. }
  342. switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
  343. case TDES_FLAGS_CFB8:
  344. addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  345. break;
  346. case TDES_FLAGS_CFB16:
  347. addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  348. break;
  349. default:
  350. addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  351. break;
  352. }
  353. dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
  354. dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
  355. dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
  356. dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
  357. dd->flags |= TDES_FLAGS_DMA;
  358. sg_init_table(&sg[0], 1);
  359. sg_dma_address(&sg[0]) = dma_addr_in;
  360. sg_dma_len(&sg[0]) = length;
  361. sg_init_table(&sg[1], 1);
  362. sg_dma_address(&sg[1]) = dma_addr_out;
  363. sg_dma_len(&sg[1]) = length;
  364. in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
  365. 1, DMA_MEM_TO_DEV,
  366. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  367. if (!in_desc)
  368. return -EINVAL;
  369. out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
  370. 1, DMA_DEV_TO_MEM,
  371. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  372. if (!out_desc)
  373. return -EINVAL;
  374. out_desc->callback = atmel_tdes_dma_callback;
  375. out_desc->callback_param = dd;
  376. dmaengine_submit(out_desc);
  377. dma_async_issue_pending(dd->dma_lch_out.chan);
  378. dmaengine_submit(in_desc);
  379. dma_async_issue_pending(dd->dma_lch_in.chan);
  380. return 0;
  381. }
  382. static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
  383. {
  384. int err, fast = 0, in, out;
  385. size_t count;
  386. dma_addr_t addr_in, addr_out;
  387. if ((!dd->in_offset) && (!dd->out_offset)) {
  388. /* check for alignment */
  389. in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
  390. IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
  391. out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
  392. IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
  393. fast = in && out;
  394. if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
  395. fast = 0;
  396. }
  397. if (fast) {
  398. count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
  399. count = min_t(size_t, count, sg_dma_len(dd->out_sg));
  400. err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  401. if (!err) {
  402. dev_dbg(dd->dev, "dma_map_sg() error\n");
  403. return -EINVAL;
  404. }
  405. err = dma_map_sg(dd->dev, dd->out_sg, 1,
  406. DMA_FROM_DEVICE);
  407. if (!err) {
  408. dev_dbg(dd->dev, "dma_map_sg() error\n");
  409. dma_unmap_sg(dd->dev, dd->in_sg, 1,
  410. DMA_TO_DEVICE);
  411. return -EINVAL;
  412. }
  413. addr_in = sg_dma_address(dd->in_sg);
  414. addr_out = sg_dma_address(dd->out_sg);
  415. dd->flags |= TDES_FLAGS_FAST;
  416. } else {
  417. /* use cache buffers */
  418. count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
  419. dd->buf_in, dd->buflen, dd->total, 0);
  420. addr_in = dd->dma_addr_in;
  421. addr_out = dd->dma_addr_out;
  422. dd->flags &= ~TDES_FLAGS_FAST;
  423. }
  424. dd->total -= count;
  425. if (dd->caps.has_dma)
  426. err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
  427. else
  428. err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
  429. if (err && (dd->flags & TDES_FLAGS_FAST)) {
  430. dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  431. dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
  432. }
  433. return err;
  434. }
  435. static void
  436. atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
  437. {
  438. struct skcipher_request *req = dd->req;
  439. struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
  440. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  441. unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
  442. if (req->cryptlen < ivsize)
  443. return;
  444. if (rctx->mode & TDES_FLAGS_ENCRYPT) {
  445. scatterwalk_map_and_copy(req->iv, req->dst,
  446. req->cryptlen - ivsize, ivsize, 0);
  447. } else {
  448. if (req->src == req->dst)
  449. memcpy(req->iv, rctx->lastc, ivsize);
  450. else
  451. scatterwalk_map_and_copy(req->iv, req->src,
  452. req->cryptlen - ivsize,
  453. ivsize, 0);
  454. }
  455. }
  456. static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
  457. {
  458. struct skcipher_request *req = dd->req;
  459. struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
  460. clk_disable_unprepare(dd->iclk);
  461. dd->flags &= ~TDES_FLAGS_BUSY;
  462. if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
  463. atmel_tdes_set_iv_as_last_ciphertext_block(dd);
  464. req->base.complete(&req->base, err);
  465. }
  466. static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
  467. struct skcipher_request *req)
  468. {
  469. struct crypto_async_request *async_req, *backlog;
  470. struct atmel_tdes_ctx *ctx;
  471. struct atmel_tdes_reqctx *rctx;
  472. unsigned long flags;
  473. int err, ret = 0;
  474. spin_lock_irqsave(&dd->lock, flags);
  475. if (req)
  476. ret = crypto_enqueue_request(&dd->queue, &req->base);
  477. if (dd->flags & TDES_FLAGS_BUSY) {
  478. spin_unlock_irqrestore(&dd->lock, flags);
  479. return ret;
  480. }
  481. backlog = crypto_get_backlog(&dd->queue);
  482. async_req = crypto_dequeue_request(&dd->queue);
  483. if (async_req)
  484. dd->flags |= TDES_FLAGS_BUSY;
  485. spin_unlock_irqrestore(&dd->lock, flags);
  486. if (!async_req)
  487. return ret;
  488. if (backlog)
  489. backlog->complete(backlog, -EINPROGRESS);
  490. req = skcipher_request_cast(async_req);
  491. /* assign new request to device */
  492. dd->req = req;
  493. dd->total = req->cryptlen;
  494. dd->in_offset = 0;
  495. dd->in_sg = req->src;
  496. dd->out_offset = 0;
  497. dd->out_sg = req->dst;
  498. rctx = skcipher_request_ctx(req);
  499. ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
  500. rctx->mode &= TDES_FLAGS_MODE_MASK;
  501. dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
  502. dd->ctx = ctx;
  503. err = atmel_tdes_write_ctrl(dd);
  504. if (!err)
  505. err = atmel_tdes_crypt_start(dd);
  506. if (err) {
  507. /* des_task will not finish it, so do it here */
  508. atmel_tdes_finish_req(dd, err);
  509. tasklet_schedule(&dd->queue_task);
  510. }
  511. return ret;
  512. }
  513. static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
  514. {
  515. int err = -EINVAL;
  516. size_t count;
  517. if (dd->flags & TDES_FLAGS_DMA) {
  518. err = 0;
  519. if (dd->flags & TDES_FLAGS_FAST) {
  520. dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
  521. dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  522. } else {
  523. dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
  524. dd->dma_size, DMA_FROM_DEVICE);
  525. /* copy data */
  526. count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
  527. dd->buf_out, dd->buflen, dd->dma_size, 1);
  528. if (count != dd->dma_size) {
  529. err = -EINVAL;
  530. dev_dbg(dd->dev, "not all data converted: %zu\n", count);
  531. }
  532. }
  533. }
  534. return err;
  535. }
  536. static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
  537. {
  538. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  539. struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
  540. struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
  541. struct device *dev = ctx->dd->dev;
  542. if (!req->cryptlen)
  543. return 0;
  544. switch (mode & TDES_FLAGS_OPMODE_MASK) {
  545. case TDES_FLAGS_CFB8:
  546. if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
  547. dev_dbg(dev, "request size is not exact amount of CFB8 blocks\n");
  548. return -EINVAL;
  549. }
  550. ctx->block_size = CFB8_BLOCK_SIZE;
  551. break;
  552. case TDES_FLAGS_CFB16:
  553. if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
  554. dev_dbg(dev, "request size is not exact amount of CFB16 blocks\n");
  555. return -EINVAL;
  556. }
  557. ctx->block_size = CFB16_BLOCK_SIZE;
  558. break;
  559. case TDES_FLAGS_CFB32:
  560. if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
  561. dev_dbg(dev, "request size is not exact amount of CFB32 blocks\n");
  562. return -EINVAL;
  563. }
  564. ctx->block_size = CFB32_BLOCK_SIZE;
  565. break;
  566. default:
  567. if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
  568. dev_dbg(dev, "request size is not exact amount of DES blocks\n");
  569. return -EINVAL;
  570. }
  571. ctx->block_size = DES_BLOCK_SIZE;
  572. break;
  573. }
  574. rctx->mode = mode;
  575. if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
  576. !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
  577. unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
  578. if (req->cryptlen >= ivsize)
  579. scatterwalk_map_and_copy(rctx->lastc, req->src,
  580. req->cryptlen - ivsize,
  581. ivsize, 0);
  582. }
  583. return atmel_tdes_handle_queue(ctx->dd, req);
  584. }
  585. static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
  586. {
  587. int ret;
  588. /* Try to grab 2 DMA channels */
  589. dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
  590. if (IS_ERR(dd->dma_lch_in.chan)) {
  591. ret = PTR_ERR(dd->dma_lch_in.chan);
  592. goto err_dma_in;
  593. }
  594. dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
  595. TDES_IDATA1R;
  596. dd->dma_lch_in.dma_conf.src_maxburst = 1;
  597. dd->dma_lch_in.dma_conf.src_addr_width =
  598. DMA_SLAVE_BUSWIDTH_4_BYTES;
  599. dd->dma_lch_in.dma_conf.dst_maxburst = 1;
  600. dd->dma_lch_in.dma_conf.dst_addr_width =
  601. DMA_SLAVE_BUSWIDTH_4_BYTES;
  602. dd->dma_lch_in.dma_conf.device_fc = false;
  603. dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
  604. if (IS_ERR(dd->dma_lch_out.chan)) {
  605. ret = PTR_ERR(dd->dma_lch_out.chan);
  606. goto err_dma_out;
  607. }
  608. dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
  609. TDES_ODATA1R;
  610. dd->dma_lch_out.dma_conf.src_maxburst = 1;
  611. dd->dma_lch_out.dma_conf.src_addr_width =
  612. DMA_SLAVE_BUSWIDTH_4_BYTES;
  613. dd->dma_lch_out.dma_conf.dst_maxburst = 1;
  614. dd->dma_lch_out.dma_conf.dst_addr_width =
  615. DMA_SLAVE_BUSWIDTH_4_BYTES;
  616. dd->dma_lch_out.dma_conf.device_fc = false;
  617. return 0;
  618. err_dma_out:
  619. dma_release_channel(dd->dma_lch_in.chan);
  620. err_dma_in:
  621. dev_err(dd->dev, "no DMA channel available\n");
  622. return ret;
  623. }
  624. static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
  625. {
  626. dma_release_channel(dd->dma_lch_in.chan);
  627. dma_release_channel(dd->dma_lch_out.chan);
  628. }
  629. static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
  630. unsigned int keylen)
  631. {
  632. struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
  633. int err;
  634. err = verify_skcipher_des_key(tfm, key);
  635. if (err)
  636. return err;
  637. memcpy(ctx->key, key, keylen);
  638. ctx->keylen = keylen;
  639. return 0;
  640. }
  641. static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
  642. unsigned int keylen)
  643. {
  644. struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
  645. int err;
  646. err = verify_skcipher_des3_key(tfm, key);
  647. if (err)
  648. return err;
  649. memcpy(ctx->key, key, keylen);
  650. ctx->keylen = keylen;
  651. return 0;
  652. }
  653. static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
  654. {
  655. return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
  656. }
  657. static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
  658. {
  659. return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
  660. }
  661. static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
  662. {
  663. return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
  664. }
  665. static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
  666. {
  667. return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
  668. }
  669. static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
  670. {
  671. return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
  672. }
  673. static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
  674. {
  675. return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
  676. }
  677. static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
  678. {
  679. return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
  680. }
  681. static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
  682. {
  683. return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
  684. }
  685. static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
  686. {
  687. return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
  688. }
  689. static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
  690. {
  691. return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
  692. }
  693. static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
  694. {
  695. return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
  696. }
  697. static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
  698. {
  699. return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
  700. }
  701. static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
  702. {
  703. return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
  704. }
  705. static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
  706. {
  707. return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
  708. }
  709. static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
  710. {
  711. struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
  712. ctx->dd = atmel_tdes_dev_alloc();
  713. if (!ctx->dd)
  714. return -ENODEV;
  715. crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
  716. return 0;
  717. }
  718. static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
  719. {
  720. alg->base.cra_priority = ATMEL_TDES_PRIORITY;
  721. alg->base.cra_flags = CRYPTO_ALG_ASYNC;
  722. alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
  723. alg->base.cra_module = THIS_MODULE;
  724. alg->init = atmel_tdes_init_tfm;
  725. }
  726. static struct skcipher_alg tdes_algs[] = {
  727. {
  728. .base.cra_name = "ecb(des)",
  729. .base.cra_driver_name = "atmel-ecb-des",
  730. .base.cra_blocksize = DES_BLOCK_SIZE,
  731. .base.cra_alignmask = 0x7,
  732. .min_keysize = DES_KEY_SIZE,
  733. .max_keysize = DES_KEY_SIZE,
  734. .setkey = atmel_des_setkey,
  735. .encrypt = atmel_tdes_ecb_encrypt,
  736. .decrypt = atmel_tdes_ecb_decrypt,
  737. },
  738. {
  739. .base.cra_name = "cbc(des)",
  740. .base.cra_driver_name = "atmel-cbc-des",
  741. .base.cra_blocksize = DES_BLOCK_SIZE,
  742. .base.cra_alignmask = 0x7,
  743. .min_keysize = DES_KEY_SIZE,
  744. .max_keysize = DES_KEY_SIZE,
  745. .ivsize = DES_BLOCK_SIZE,
  746. .setkey = atmel_des_setkey,
  747. .encrypt = atmel_tdes_cbc_encrypt,
  748. .decrypt = atmel_tdes_cbc_decrypt,
  749. },
  750. {
  751. .base.cra_name = "cfb(des)",
  752. .base.cra_driver_name = "atmel-cfb-des",
  753. .base.cra_blocksize = DES_BLOCK_SIZE,
  754. .base.cra_alignmask = 0x7,
  755. .min_keysize = DES_KEY_SIZE,
  756. .max_keysize = DES_KEY_SIZE,
  757. .ivsize = DES_BLOCK_SIZE,
  758. .setkey = atmel_des_setkey,
  759. .encrypt = atmel_tdes_cfb_encrypt,
  760. .decrypt = atmel_tdes_cfb_decrypt,
  761. },
  762. {
  763. .base.cra_name = "cfb8(des)",
  764. .base.cra_driver_name = "atmel-cfb8-des",
  765. .base.cra_blocksize = CFB8_BLOCK_SIZE,
  766. .base.cra_alignmask = 0,
  767. .min_keysize = DES_KEY_SIZE,
  768. .max_keysize = DES_KEY_SIZE,
  769. .ivsize = DES_BLOCK_SIZE,
  770. .setkey = atmel_des_setkey,
  771. .encrypt = atmel_tdes_cfb8_encrypt,
  772. .decrypt = atmel_tdes_cfb8_decrypt,
  773. },
  774. {
  775. .base.cra_name = "cfb16(des)",
  776. .base.cra_driver_name = "atmel-cfb16-des",
  777. .base.cra_blocksize = CFB16_BLOCK_SIZE,
  778. .base.cra_alignmask = 0x1,
  779. .min_keysize = DES_KEY_SIZE,
  780. .max_keysize = DES_KEY_SIZE,
  781. .ivsize = DES_BLOCK_SIZE,
  782. .setkey = atmel_des_setkey,
  783. .encrypt = atmel_tdes_cfb16_encrypt,
  784. .decrypt = atmel_tdes_cfb16_decrypt,
  785. },
  786. {
  787. .base.cra_name = "cfb32(des)",
  788. .base.cra_driver_name = "atmel-cfb32-des",
  789. .base.cra_blocksize = CFB32_BLOCK_SIZE,
  790. .base.cra_alignmask = 0x3,
  791. .min_keysize = DES_KEY_SIZE,
  792. .max_keysize = DES_KEY_SIZE,
  793. .ivsize = DES_BLOCK_SIZE,
  794. .setkey = atmel_des_setkey,
  795. .encrypt = atmel_tdes_cfb32_encrypt,
  796. .decrypt = atmel_tdes_cfb32_decrypt,
  797. },
  798. {
  799. .base.cra_name = "ofb(des)",
  800. .base.cra_driver_name = "atmel-ofb-des",
  801. .base.cra_blocksize = 1,
  802. .base.cra_alignmask = 0x7,
  803. .min_keysize = DES_KEY_SIZE,
  804. .max_keysize = DES_KEY_SIZE,
  805. .ivsize = DES_BLOCK_SIZE,
  806. .setkey = atmel_des_setkey,
  807. .encrypt = atmel_tdes_ofb_encrypt,
  808. .decrypt = atmel_tdes_ofb_decrypt,
  809. },
  810. {
  811. .base.cra_name = "ecb(des3_ede)",
  812. .base.cra_driver_name = "atmel-ecb-tdes",
  813. .base.cra_blocksize = DES_BLOCK_SIZE,
  814. .base.cra_alignmask = 0x7,
  815. .min_keysize = DES3_EDE_KEY_SIZE,
  816. .max_keysize = DES3_EDE_KEY_SIZE,
  817. .setkey = atmel_tdes_setkey,
  818. .encrypt = atmel_tdes_ecb_encrypt,
  819. .decrypt = atmel_tdes_ecb_decrypt,
  820. },
  821. {
  822. .base.cra_name = "cbc(des3_ede)",
  823. .base.cra_driver_name = "atmel-cbc-tdes",
  824. .base.cra_blocksize = DES_BLOCK_SIZE,
  825. .base.cra_alignmask = 0x7,
  826. .min_keysize = DES3_EDE_KEY_SIZE,
  827. .max_keysize = DES3_EDE_KEY_SIZE,
  828. .setkey = atmel_tdes_setkey,
  829. .encrypt = atmel_tdes_cbc_encrypt,
  830. .decrypt = atmel_tdes_cbc_decrypt,
  831. .ivsize = DES_BLOCK_SIZE,
  832. },
  833. {
  834. .base.cra_name = "ofb(des3_ede)",
  835. .base.cra_driver_name = "atmel-ofb-tdes",
  836. .base.cra_blocksize = DES_BLOCK_SIZE,
  837. .base.cra_alignmask = 0x7,
  838. .min_keysize = DES3_EDE_KEY_SIZE,
  839. .max_keysize = DES3_EDE_KEY_SIZE,
  840. .setkey = atmel_tdes_setkey,
  841. .encrypt = atmel_tdes_ofb_encrypt,
  842. .decrypt = atmel_tdes_ofb_decrypt,
  843. .ivsize = DES_BLOCK_SIZE,
  844. },
  845. };
  846. static void atmel_tdes_queue_task(unsigned long data)
  847. {
  848. struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
  849. atmel_tdes_handle_queue(dd, NULL);
  850. }
  851. static void atmel_tdes_done_task(unsigned long data)
  852. {
  853. struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
  854. int err;
  855. if (!(dd->flags & TDES_FLAGS_DMA))
  856. err = atmel_tdes_crypt_pdc_stop(dd);
  857. else
  858. err = atmel_tdes_crypt_dma_stop(dd);
  859. if (dd->total && !err) {
  860. if (dd->flags & TDES_FLAGS_FAST) {
  861. dd->in_sg = sg_next(dd->in_sg);
  862. dd->out_sg = sg_next(dd->out_sg);
  863. if (!dd->in_sg || !dd->out_sg)
  864. err = -EINVAL;
  865. }
  866. if (!err)
  867. err = atmel_tdes_crypt_start(dd);
  868. if (!err)
  869. return; /* DMA started. Not fininishing. */
  870. }
  871. atmel_tdes_finish_req(dd, err);
  872. atmel_tdes_handle_queue(dd, NULL);
  873. }
  874. static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
  875. {
  876. struct atmel_tdes_dev *tdes_dd = dev_id;
  877. u32 reg;
  878. reg = atmel_tdes_read(tdes_dd, TDES_ISR);
  879. if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
  880. atmel_tdes_write(tdes_dd, TDES_IDR, reg);
  881. if (TDES_FLAGS_BUSY & tdes_dd->flags)
  882. tasklet_schedule(&tdes_dd->done_task);
  883. else
  884. dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
  885. return IRQ_HANDLED;
  886. }
  887. return IRQ_NONE;
  888. }
  889. static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
  890. {
  891. int i;
  892. for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
  893. crypto_unregister_skcipher(&tdes_algs[i]);
  894. }
  895. static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
  896. {
  897. int err, i, j;
  898. for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
  899. atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
  900. err = crypto_register_skcipher(&tdes_algs[i]);
  901. if (err)
  902. goto err_tdes_algs;
  903. }
  904. return 0;
  905. err_tdes_algs:
  906. for (j = 0; j < i; j++)
  907. crypto_unregister_skcipher(&tdes_algs[j]);
  908. return err;
  909. }
  910. static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
  911. {
  912. dd->caps.has_dma = 0;
  913. dd->caps.has_cfb_3keys = 0;
  914. /* keep only major version number */
  915. switch (dd->hw_version & 0xf00) {
  916. case 0x800:
  917. case 0x700:
  918. dd->caps.has_dma = 1;
  919. dd->caps.has_cfb_3keys = 1;
  920. break;
  921. case 0x600:
  922. break;
  923. default:
  924. dev_warn(dd->dev,
  925. "Unmanaged tdes version, set minimum capabilities\n");
  926. break;
  927. }
  928. }
  929. #if defined(CONFIG_OF)
  930. static const struct of_device_id atmel_tdes_dt_ids[] = {
  931. { .compatible = "atmel,at91sam9g46-tdes" },
  932. { /* sentinel */ }
  933. };
  934. MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
  935. #endif
  936. static int atmel_tdes_probe(struct platform_device *pdev)
  937. {
  938. struct atmel_tdes_dev *tdes_dd;
  939. struct device *dev = &pdev->dev;
  940. struct resource *tdes_res;
  941. int err;
  942. tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
  943. if (!tdes_dd)
  944. return -ENOMEM;
  945. tdes_dd->dev = dev;
  946. platform_set_drvdata(pdev, tdes_dd);
  947. INIT_LIST_HEAD(&tdes_dd->list);
  948. spin_lock_init(&tdes_dd->lock);
  949. tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
  950. (unsigned long)tdes_dd);
  951. tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
  952. (unsigned long)tdes_dd);
  953. crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
  954. /* Get the base address */
  955. tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  956. if (!tdes_res) {
  957. dev_err(dev, "no MEM resource info\n");
  958. err = -ENODEV;
  959. goto err_tasklet_kill;
  960. }
  961. tdes_dd->phys_base = tdes_res->start;
  962. /* Get the IRQ */
  963. tdes_dd->irq = platform_get_irq(pdev, 0);
  964. if (tdes_dd->irq < 0) {
  965. err = tdes_dd->irq;
  966. goto err_tasklet_kill;
  967. }
  968. err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
  969. IRQF_SHARED, "atmel-tdes", tdes_dd);
  970. if (err) {
  971. dev_err(dev, "unable to request tdes irq.\n");
  972. goto err_tasklet_kill;
  973. }
  974. /* Initializing the clock */
  975. tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
  976. if (IS_ERR(tdes_dd->iclk)) {
  977. dev_err(dev, "clock initialization failed.\n");
  978. err = PTR_ERR(tdes_dd->iclk);
  979. goto err_tasklet_kill;
  980. }
  981. tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
  982. if (IS_ERR(tdes_dd->io_base)) {
  983. err = PTR_ERR(tdes_dd->io_base);
  984. goto err_tasklet_kill;
  985. }
  986. err = atmel_tdes_hw_version_init(tdes_dd);
  987. if (err)
  988. goto err_tasklet_kill;
  989. atmel_tdes_get_cap(tdes_dd);
  990. err = atmel_tdes_buff_init(tdes_dd);
  991. if (err)
  992. goto err_tasklet_kill;
  993. if (tdes_dd->caps.has_dma) {
  994. err = atmel_tdes_dma_init(tdes_dd);
  995. if (err)
  996. goto err_buff_cleanup;
  997. dev_info(dev, "using %s, %s for DMA transfers\n",
  998. dma_chan_name(tdes_dd->dma_lch_in.chan),
  999. dma_chan_name(tdes_dd->dma_lch_out.chan));
  1000. }
  1001. spin_lock(&atmel_tdes.lock);
  1002. list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
  1003. spin_unlock(&atmel_tdes.lock);
  1004. err = atmel_tdes_register_algs(tdes_dd);
  1005. if (err)
  1006. goto err_algs;
  1007. dev_info(dev, "Atmel DES/TDES\n");
  1008. return 0;
  1009. err_algs:
  1010. spin_lock(&atmel_tdes.lock);
  1011. list_del(&tdes_dd->list);
  1012. spin_unlock(&atmel_tdes.lock);
  1013. if (tdes_dd->caps.has_dma)
  1014. atmel_tdes_dma_cleanup(tdes_dd);
  1015. err_buff_cleanup:
  1016. atmel_tdes_buff_cleanup(tdes_dd);
  1017. err_tasklet_kill:
  1018. tasklet_kill(&tdes_dd->done_task);
  1019. tasklet_kill(&tdes_dd->queue_task);
  1020. return err;
  1021. }
  1022. static int atmel_tdes_remove(struct platform_device *pdev)
  1023. {
  1024. struct atmel_tdes_dev *tdes_dd = platform_get_drvdata(pdev);
  1025. spin_lock(&atmel_tdes.lock);
  1026. list_del(&tdes_dd->list);
  1027. spin_unlock(&atmel_tdes.lock);
  1028. atmel_tdes_unregister_algs(tdes_dd);
  1029. tasklet_kill(&tdes_dd->done_task);
  1030. tasklet_kill(&tdes_dd->queue_task);
  1031. if (tdes_dd->caps.has_dma)
  1032. atmel_tdes_dma_cleanup(tdes_dd);
  1033. atmel_tdes_buff_cleanup(tdes_dd);
  1034. return 0;
  1035. }
  1036. static struct platform_driver atmel_tdes_driver = {
  1037. .probe = atmel_tdes_probe,
  1038. .remove = atmel_tdes_remove,
  1039. .driver = {
  1040. .name = "atmel_tdes",
  1041. .of_match_table = of_match_ptr(atmel_tdes_dt_ids),
  1042. },
  1043. };
  1044. module_platform_driver(atmel_tdes_driver);
  1045. MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
  1046. MODULE_LICENSE("GPL v2");
  1047. MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");