mxs-dcp.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Freescale i.MX23/i.MX28 Data Co-Processor driver
  4. *
  5. * Copyright (C) 2013 Marek Vasut <[email protected]>
  6. */
  7. #include <linux/dma-mapping.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/io.h>
  10. #include <linux/kernel.h>
  11. #include <linux/kthread.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/stmp_device.h>
  16. #include <linux/clk.h>
  17. #include <crypto/aes.h>
  18. #include <crypto/sha1.h>
  19. #include <crypto/sha2.h>
  20. #include <crypto/internal/hash.h>
  21. #include <crypto/internal/skcipher.h>
  22. #include <crypto/scatterwalk.h>
  23. #define DCP_MAX_CHANS 4
  24. #define DCP_BUF_SZ PAGE_SIZE
  25. #define DCP_SHA_PAY_SZ 64
  26. #define DCP_ALIGNMENT 64
  27. /*
  28. * Null hashes to align with hw behavior on imx6sl and ull
  29. * these are flipped for consistency with hw output
  30. */
  31. static const uint8_t sha1_null_hash[] =
  32. "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
  33. "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
  34. static const uint8_t sha256_null_hash[] =
  35. "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
  36. "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
  37. "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
  38. "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
  39. /* DCP DMA descriptor. */
  40. struct dcp_dma_desc {
  41. uint32_t next_cmd_addr;
  42. uint32_t control0;
  43. uint32_t control1;
  44. uint32_t source;
  45. uint32_t destination;
  46. uint32_t size;
  47. uint32_t payload;
  48. uint32_t status;
  49. };
  50. /* Coherent aligned block for bounce buffering. */
  51. struct dcp_coherent_block {
  52. uint8_t aes_in_buf[DCP_BUF_SZ];
  53. uint8_t aes_out_buf[DCP_BUF_SZ];
  54. uint8_t sha_in_buf[DCP_BUF_SZ];
  55. uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
  56. uint8_t aes_key[2 * AES_KEYSIZE_128];
  57. struct dcp_dma_desc desc[DCP_MAX_CHANS];
  58. };
  59. struct dcp {
  60. struct device *dev;
  61. void __iomem *base;
  62. uint32_t caps;
  63. struct dcp_coherent_block *coh;
  64. struct completion completion[DCP_MAX_CHANS];
  65. spinlock_t lock[DCP_MAX_CHANS];
  66. struct task_struct *thread[DCP_MAX_CHANS];
  67. struct crypto_queue queue[DCP_MAX_CHANS];
  68. struct clk *dcp_clk;
  69. };
  70. enum dcp_chan {
  71. DCP_CHAN_HASH_SHA = 0,
  72. DCP_CHAN_CRYPTO = 2,
  73. };
  74. struct dcp_async_ctx {
  75. /* Common context */
  76. enum dcp_chan chan;
  77. uint32_t fill;
  78. /* SHA Hash-specific context */
  79. struct mutex mutex;
  80. uint32_t alg;
  81. unsigned int hot:1;
  82. /* Crypto-specific context */
  83. struct crypto_skcipher *fallback;
  84. unsigned int key_len;
  85. uint8_t key[AES_KEYSIZE_128];
  86. };
  87. struct dcp_aes_req_ctx {
  88. unsigned int enc:1;
  89. unsigned int ecb:1;
  90. struct skcipher_request fallback_req; // keep at the end
  91. };
  92. struct dcp_sha_req_ctx {
  93. unsigned int init:1;
  94. unsigned int fini:1;
  95. };
  96. struct dcp_export_state {
  97. struct dcp_sha_req_ctx req_ctx;
  98. struct dcp_async_ctx async_ctx;
  99. };
  100. /*
  101. * There can even be only one instance of the MXS DCP due to the
  102. * design of Linux Crypto API.
  103. */
  104. static struct dcp *global_sdcp;
  105. /* DCP register layout. */
  106. #define MXS_DCP_CTRL 0x00
  107. #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
  108. #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
  109. #define MXS_DCP_STAT 0x10
  110. #define MXS_DCP_STAT_CLR 0x18
  111. #define MXS_DCP_STAT_IRQ_MASK 0xf
  112. #define MXS_DCP_CHANNELCTRL 0x20
  113. #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
  114. #define MXS_DCP_CAPABILITY1 0x40
  115. #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
  116. #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
  117. #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
  118. #define MXS_DCP_CONTEXT 0x50
  119. #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
  120. #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
  121. #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
  122. #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
  123. /* DMA descriptor bits. */
  124. #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
  125. #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
  126. #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
  127. #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
  128. #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
  129. #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
  130. #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
  131. #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
  132. #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
  133. #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
  134. #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
  135. #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
  136. #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
  137. #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
  138. static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
  139. {
  140. int dma_err;
  141. struct dcp *sdcp = global_sdcp;
  142. const int chan = actx->chan;
  143. uint32_t stat;
  144. unsigned long ret;
  145. struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
  146. dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
  147. DMA_TO_DEVICE);
  148. dma_err = dma_mapping_error(sdcp->dev, desc_phys);
  149. if (dma_err)
  150. return dma_err;
  151. reinit_completion(&sdcp->completion[chan]);
  152. /* Clear status register. */
  153. writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
  154. /* Load the DMA descriptor. */
  155. writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
  156. /* Increment the semaphore to start the DMA transfer. */
  157. writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
  158. ret = wait_for_completion_timeout(&sdcp->completion[chan],
  159. msecs_to_jiffies(1000));
  160. if (!ret) {
  161. dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
  162. chan, readl(sdcp->base + MXS_DCP_STAT));
  163. return -ETIMEDOUT;
  164. }
  165. stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
  166. if (stat & 0xff) {
  167. dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
  168. chan, stat);
  169. return -EINVAL;
  170. }
  171. dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
  172. return 0;
  173. }
  174. /*
  175. * Encryption (AES128)
  176. */
  177. static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
  178. struct skcipher_request *req, int init)
  179. {
  180. dma_addr_t key_phys, src_phys, dst_phys;
  181. struct dcp *sdcp = global_sdcp;
  182. struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
  183. struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
  184. int ret;
  185. key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
  186. 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
  187. ret = dma_mapping_error(sdcp->dev, key_phys);
  188. if (ret)
  189. return ret;
  190. src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
  191. DCP_BUF_SZ, DMA_TO_DEVICE);
  192. ret = dma_mapping_error(sdcp->dev, src_phys);
  193. if (ret)
  194. goto err_src;
  195. dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
  196. DCP_BUF_SZ, DMA_FROM_DEVICE);
  197. ret = dma_mapping_error(sdcp->dev, dst_phys);
  198. if (ret)
  199. goto err_dst;
  200. if (actx->fill % AES_BLOCK_SIZE) {
  201. dev_err(sdcp->dev, "Invalid block size!\n");
  202. ret = -EINVAL;
  203. goto aes_done_run;
  204. }
  205. /* Fill in the DMA descriptor. */
  206. desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
  207. MXS_DCP_CONTROL0_INTERRUPT |
  208. MXS_DCP_CONTROL0_ENABLE_CIPHER;
  209. /* Payload contains the key. */
  210. desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
  211. if (rctx->enc)
  212. desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
  213. if (init)
  214. desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
  215. desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
  216. if (rctx->ecb)
  217. desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
  218. else
  219. desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
  220. desc->next_cmd_addr = 0;
  221. desc->source = src_phys;
  222. desc->destination = dst_phys;
  223. desc->size = actx->fill;
  224. desc->payload = key_phys;
  225. desc->status = 0;
  226. ret = mxs_dcp_start_dma(actx);
  227. aes_done_run:
  228. dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
  229. err_dst:
  230. dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
  231. err_src:
  232. dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
  233. DMA_TO_DEVICE);
  234. return ret;
  235. }
  236. static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
  237. {
  238. struct dcp *sdcp = global_sdcp;
  239. struct skcipher_request *req = skcipher_request_cast(arq);
  240. struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
  241. struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
  242. struct scatterlist *dst = req->dst;
  243. struct scatterlist *src = req->src;
  244. int dst_nents = sg_nents(dst);
  245. const int out_off = DCP_BUF_SZ;
  246. uint8_t *in_buf = sdcp->coh->aes_in_buf;
  247. uint8_t *out_buf = sdcp->coh->aes_out_buf;
  248. uint32_t dst_off = 0;
  249. uint8_t *src_buf = NULL;
  250. uint32_t last_out_len = 0;
  251. uint8_t *key = sdcp->coh->aes_key;
  252. int ret = 0;
  253. unsigned int i, len, clen, tlen = 0;
  254. int init = 0;
  255. bool limit_hit = false;
  256. actx->fill = 0;
  257. /* Copy the key from the temporary location. */
  258. memcpy(key, actx->key, actx->key_len);
  259. if (!rctx->ecb) {
  260. /* Copy the CBC IV just past the key. */
  261. memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
  262. /* CBC needs the INIT set. */
  263. init = 1;
  264. } else {
  265. memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
  266. }
  267. for_each_sg(req->src, src, sg_nents(req->src), i) {
  268. src_buf = sg_virt(src);
  269. len = sg_dma_len(src);
  270. tlen += len;
  271. limit_hit = tlen > req->cryptlen;
  272. if (limit_hit)
  273. len = req->cryptlen - (tlen - len);
  274. do {
  275. if (actx->fill + len > out_off)
  276. clen = out_off - actx->fill;
  277. else
  278. clen = len;
  279. memcpy(in_buf + actx->fill, src_buf, clen);
  280. len -= clen;
  281. src_buf += clen;
  282. actx->fill += clen;
  283. /*
  284. * If we filled the buffer or this is the last SG,
  285. * submit the buffer.
  286. */
  287. if (actx->fill == out_off || sg_is_last(src) ||
  288. limit_hit) {
  289. ret = mxs_dcp_run_aes(actx, req, init);
  290. if (ret)
  291. return ret;
  292. init = 0;
  293. sg_pcopy_from_buffer(dst, dst_nents, out_buf,
  294. actx->fill, dst_off);
  295. dst_off += actx->fill;
  296. last_out_len = actx->fill;
  297. actx->fill = 0;
  298. }
  299. } while (len);
  300. if (limit_hit)
  301. break;
  302. }
  303. /* Copy the IV for CBC for chaining */
  304. if (!rctx->ecb) {
  305. if (rctx->enc)
  306. memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
  307. AES_BLOCK_SIZE);
  308. else
  309. memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
  310. AES_BLOCK_SIZE);
  311. }
  312. return ret;
  313. }
  314. static int dcp_chan_thread_aes(void *data)
  315. {
  316. struct dcp *sdcp = global_sdcp;
  317. const int chan = DCP_CHAN_CRYPTO;
  318. struct crypto_async_request *backlog;
  319. struct crypto_async_request *arq;
  320. int ret;
  321. while (!kthread_should_stop()) {
  322. set_current_state(TASK_INTERRUPTIBLE);
  323. spin_lock(&sdcp->lock[chan]);
  324. backlog = crypto_get_backlog(&sdcp->queue[chan]);
  325. arq = crypto_dequeue_request(&sdcp->queue[chan]);
  326. spin_unlock(&sdcp->lock[chan]);
  327. if (!backlog && !arq) {
  328. schedule();
  329. continue;
  330. }
  331. set_current_state(TASK_RUNNING);
  332. if (backlog)
  333. backlog->complete(backlog, -EINPROGRESS);
  334. if (arq) {
  335. ret = mxs_dcp_aes_block_crypt(arq);
  336. arq->complete(arq, ret);
  337. }
  338. }
  339. return 0;
  340. }
  341. static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
  342. {
  343. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  344. struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
  345. struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
  346. int ret;
  347. skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  348. skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
  349. req->base.complete, req->base.data);
  350. skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
  351. req->cryptlen, req->iv);
  352. if (enc)
  353. ret = crypto_skcipher_encrypt(&rctx->fallback_req);
  354. else
  355. ret = crypto_skcipher_decrypt(&rctx->fallback_req);
  356. return ret;
  357. }
  358. static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
  359. {
  360. struct dcp *sdcp = global_sdcp;
  361. struct crypto_async_request *arq = &req->base;
  362. struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
  363. struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
  364. int ret;
  365. if (unlikely(actx->key_len != AES_KEYSIZE_128))
  366. return mxs_dcp_block_fallback(req, enc);
  367. rctx->enc = enc;
  368. rctx->ecb = ecb;
  369. actx->chan = DCP_CHAN_CRYPTO;
  370. spin_lock(&sdcp->lock[actx->chan]);
  371. ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
  372. spin_unlock(&sdcp->lock[actx->chan]);
  373. wake_up_process(sdcp->thread[actx->chan]);
  374. return ret;
  375. }
  376. static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
  377. {
  378. return mxs_dcp_aes_enqueue(req, 0, 1);
  379. }
  380. static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
  381. {
  382. return mxs_dcp_aes_enqueue(req, 1, 1);
  383. }
  384. static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
  385. {
  386. return mxs_dcp_aes_enqueue(req, 0, 0);
  387. }
  388. static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
  389. {
  390. return mxs_dcp_aes_enqueue(req, 1, 0);
  391. }
  392. static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
  393. unsigned int len)
  394. {
  395. struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
  396. /*
  397. * AES 128 is supposed by the hardware, store key into temporary
  398. * buffer and exit. We must use the temporary buffer here, since
  399. * there can still be an operation in progress.
  400. */
  401. actx->key_len = len;
  402. if (len == AES_KEYSIZE_128) {
  403. memcpy(actx->key, key, len);
  404. return 0;
  405. }
  406. /*
  407. * If the requested AES key size is not supported by the hardware,
  408. * but is supported by in-kernel software implementation, we use
  409. * software fallback.
  410. */
  411. crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
  412. crypto_skcipher_set_flags(actx->fallback,
  413. tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
  414. return crypto_skcipher_setkey(actx->fallback, key, len);
  415. }
  416. static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
  417. {
  418. const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
  419. struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
  420. struct crypto_skcipher *blk;
  421. blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
  422. if (IS_ERR(blk))
  423. return PTR_ERR(blk);
  424. actx->fallback = blk;
  425. crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
  426. crypto_skcipher_reqsize(blk));
  427. return 0;
  428. }
  429. static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
  430. {
  431. struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
  432. crypto_free_skcipher(actx->fallback);
  433. }
  434. /*
  435. * Hashing (SHA1/SHA256)
  436. */
  437. static int mxs_dcp_run_sha(struct ahash_request *req)
  438. {
  439. struct dcp *sdcp = global_sdcp;
  440. int ret;
  441. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  442. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  443. struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
  444. struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
  445. dma_addr_t digest_phys = 0;
  446. dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
  447. DCP_BUF_SZ, DMA_TO_DEVICE);
  448. ret = dma_mapping_error(sdcp->dev, buf_phys);
  449. if (ret)
  450. return ret;
  451. /* Fill in the DMA descriptor. */
  452. desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
  453. MXS_DCP_CONTROL0_INTERRUPT |
  454. MXS_DCP_CONTROL0_ENABLE_HASH;
  455. if (rctx->init)
  456. desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
  457. desc->control1 = actx->alg;
  458. desc->next_cmd_addr = 0;
  459. desc->source = buf_phys;
  460. desc->destination = 0;
  461. desc->size = actx->fill;
  462. desc->payload = 0;
  463. desc->status = 0;
  464. /*
  465. * Align driver with hw behavior when generating null hashes
  466. */
  467. if (rctx->init && rctx->fini && desc->size == 0) {
  468. struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
  469. const uint8_t *sha_buf =
  470. (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
  471. sha1_null_hash : sha256_null_hash;
  472. memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
  473. ret = 0;
  474. goto done_run;
  475. }
  476. /* Set HASH_TERM bit for last transfer block. */
  477. if (rctx->fini) {
  478. digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
  479. DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
  480. ret = dma_mapping_error(sdcp->dev, digest_phys);
  481. if (ret)
  482. goto done_run;
  483. desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
  484. desc->payload = digest_phys;
  485. }
  486. ret = mxs_dcp_start_dma(actx);
  487. if (rctx->fini)
  488. dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
  489. DMA_FROM_DEVICE);
  490. done_run:
  491. dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
  492. return ret;
  493. }
  494. static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
  495. {
  496. struct dcp *sdcp = global_sdcp;
  497. struct ahash_request *req = ahash_request_cast(arq);
  498. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  499. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  500. struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
  501. struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
  502. uint8_t *in_buf = sdcp->coh->sha_in_buf;
  503. uint8_t *out_buf = sdcp->coh->sha_out_buf;
  504. struct scatterlist *src;
  505. unsigned int i, len, clen, oft = 0;
  506. int ret;
  507. int fin = rctx->fini;
  508. if (fin)
  509. rctx->fini = 0;
  510. src = req->src;
  511. len = req->nbytes;
  512. while (len) {
  513. if (actx->fill + len > DCP_BUF_SZ)
  514. clen = DCP_BUF_SZ - actx->fill;
  515. else
  516. clen = len;
  517. scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
  518. 0);
  519. len -= clen;
  520. oft += clen;
  521. actx->fill += clen;
  522. /*
  523. * If we filled the buffer and still have some
  524. * more data, submit the buffer.
  525. */
  526. if (len && actx->fill == DCP_BUF_SZ) {
  527. ret = mxs_dcp_run_sha(req);
  528. if (ret)
  529. return ret;
  530. actx->fill = 0;
  531. rctx->init = 0;
  532. }
  533. }
  534. if (fin) {
  535. rctx->fini = 1;
  536. /* Submit whatever is left. */
  537. if (!req->result)
  538. return -EINVAL;
  539. ret = mxs_dcp_run_sha(req);
  540. if (ret)
  541. return ret;
  542. actx->fill = 0;
  543. /* For some reason the result is flipped */
  544. for (i = 0; i < halg->digestsize; i++)
  545. req->result[i] = out_buf[halg->digestsize - i - 1];
  546. }
  547. return 0;
  548. }
  549. static int dcp_chan_thread_sha(void *data)
  550. {
  551. struct dcp *sdcp = global_sdcp;
  552. const int chan = DCP_CHAN_HASH_SHA;
  553. struct crypto_async_request *backlog;
  554. struct crypto_async_request *arq;
  555. int ret;
  556. while (!kthread_should_stop()) {
  557. set_current_state(TASK_INTERRUPTIBLE);
  558. spin_lock(&sdcp->lock[chan]);
  559. backlog = crypto_get_backlog(&sdcp->queue[chan]);
  560. arq = crypto_dequeue_request(&sdcp->queue[chan]);
  561. spin_unlock(&sdcp->lock[chan]);
  562. if (!backlog && !arq) {
  563. schedule();
  564. continue;
  565. }
  566. set_current_state(TASK_RUNNING);
  567. if (backlog)
  568. backlog->complete(backlog, -EINPROGRESS);
  569. if (arq) {
  570. ret = dcp_sha_req_to_buf(arq);
  571. arq->complete(arq, ret);
  572. }
  573. }
  574. return 0;
  575. }
  576. static int dcp_sha_init(struct ahash_request *req)
  577. {
  578. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  579. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  580. struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
  581. /*
  582. * Start hashing session. The code below only inits the
  583. * hashing session context, nothing more.
  584. */
  585. memset(actx, 0, sizeof(*actx));
  586. if (strcmp(halg->base.cra_name, "sha1") == 0)
  587. actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
  588. else
  589. actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
  590. actx->fill = 0;
  591. actx->hot = 0;
  592. actx->chan = DCP_CHAN_HASH_SHA;
  593. mutex_init(&actx->mutex);
  594. return 0;
  595. }
  596. static int dcp_sha_update_fx(struct ahash_request *req, int fini)
  597. {
  598. struct dcp *sdcp = global_sdcp;
  599. struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
  600. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  601. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  602. int ret;
  603. /*
  604. * Ignore requests that have no data in them and are not
  605. * the trailing requests in the stream of requests.
  606. */
  607. if (!req->nbytes && !fini)
  608. return 0;
  609. mutex_lock(&actx->mutex);
  610. rctx->fini = fini;
  611. if (!actx->hot) {
  612. actx->hot = 1;
  613. rctx->init = 1;
  614. }
  615. spin_lock(&sdcp->lock[actx->chan]);
  616. ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
  617. spin_unlock(&sdcp->lock[actx->chan]);
  618. wake_up_process(sdcp->thread[actx->chan]);
  619. mutex_unlock(&actx->mutex);
  620. return ret;
  621. }
  622. static int dcp_sha_update(struct ahash_request *req)
  623. {
  624. return dcp_sha_update_fx(req, 0);
  625. }
  626. static int dcp_sha_final(struct ahash_request *req)
  627. {
  628. ahash_request_set_crypt(req, NULL, req->result, 0);
  629. req->nbytes = 0;
  630. return dcp_sha_update_fx(req, 1);
  631. }
  632. static int dcp_sha_finup(struct ahash_request *req)
  633. {
  634. return dcp_sha_update_fx(req, 1);
  635. }
  636. static int dcp_sha_digest(struct ahash_request *req)
  637. {
  638. int ret;
  639. ret = dcp_sha_init(req);
  640. if (ret)
  641. return ret;
  642. return dcp_sha_finup(req);
  643. }
  644. static int dcp_sha_import(struct ahash_request *req, const void *in)
  645. {
  646. struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
  647. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  648. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  649. const struct dcp_export_state *export = in;
  650. memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
  651. memset(actx, 0, sizeof(struct dcp_async_ctx));
  652. memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
  653. memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
  654. return 0;
  655. }
  656. static int dcp_sha_export(struct ahash_request *req, void *out)
  657. {
  658. struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
  659. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  660. struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
  661. struct dcp_export_state *export = out;
  662. memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
  663. memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
  664. return 0;
  665. }
  666. static int dcp_sha_cra_init(struct crypto_tfm *tfm)
  667. {
  668. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  669. sizeof(struct dcp_sha_req_ctx));
  670. return 0;
  671. }
  672. static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
  673. {
  674. }
  675. /* AES 128 ECB and AES 128 CBC */
  676. static struct skcipher_alg dcp_aes_algs[] = {
  677. {
  678. .base.cra_name = "ecb(aes)",
  679. .base.cra_driver_name = "ecb-aes-dcp",
  680. .base.cra_priority = 400,
  681. .base.cra_alignmask = 15,
  682. .base.cra_flags = CRYPTO_ALG_ASYNC |
  683. CRYPTO_ALG_NEED_FALLBACK,
  684. .base.cra_blocksize = AES_BLOCK_SIZE,
  685. .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
  686. .base.cra_module = THIS_MODULE,
  687. .min_keysize = AES_MIN_KEY_SIZE,
  688. .max_keysize = AES_MAX_KEY_SIZE,
  689. .setkey = mxs_dcp_aes_setkey,
  690. .encrypt = mxs_dcp_aes_ecb_encrypt,
  691. .decrypt = mxs_dcp_aes_ecb_decrypt,
  692. .init = mxs_dcp_aes_fallback_init_tfm,
  693. .exit = mxs_dcp_aes_fallback_exit_tfm,
  694. }, {
  695. .base.cra_name = "cbc(aes)",
  696. .base.cra_driver_name = "cbc-aes-dcp",
  697. .base.cra_priority = 400,
  698. .base.cra_alignmask = 15,
  699. .base.cra_flags = CRYPTO_ALG_ASYNC |
  700. CRYPTO_ALG_NEED_FALLBACK,
  701. .base.cra_blocksize = AES_BLOCK_SIZE,
  702. .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
  703. .base.cra_module = THIS_MODULE,
  704. .min_keysize = AES_MIN_KEY_SIZE,
  705. .max_keysize = AES_MAX_KEY_SIZE,
  706. .setkey = mxs_dcp_aes_setkey,
  707. .encrypt = mxs_dcp_aes_cbc_encrypt,
  708. .decrypt = mxs_dcp_aes_cbc_decrypt,
  709. .ivsize = AES_BLOCK_SIZE,
  710. .init = mxs_dcp_aes_fallback_init_tfm,
  711. .exit = mxs_dcp_aes_fallback_exit_tfm,
  712. },
  713. };
  714. /* SHA1 */
  715. static struct ahash_alg dcp_sha1_alg = {
  716. .init = dcp_sha_init,
  717. .update = dcp_sha_update,
  718. .final = dcp_sha_final,
  719. .finup = dcp_sha_finup,
  720. .digest = dcp_sha_digest,
  721. .import = dcp_sha_import,
  722. .export = dcp_sha_export,
  723. .halg = {
  724. .digestsize = SHA1_DIGEST_SIZE,
  725. .statesize = sizeof(struct dcp_export_state),
  726. .base = {
  727. .cra_name = "sha1",
  728. .cra_driver_name = "sha1-dcp",
  729. .cra_priority = 400,
  730. .cra_alignmask = 63,
  731. .cra_flags = CRYPTO_ALG_ASYNC,
  732. .cra_blocksize = SHA1_BLOCK_SIZE,
  733. .cra_ctxsize = sizeof(struct dcp_async_ctx),
  734. .cra_module = THIS_MODULE,
  735. .cra_init = dcp_sha_cra_init,
  736. .cra_exit = dcp_sha_cra_exit,
  737. },
  738. },
  739. };
  740. /* SHA256 */
  741. static struct ahash_alg dcp_sha256_alg = {
  742. .init = dcp_sha_init,
  743. .update = dcp_sha_update,
  744. .final = dcp_sha_final,
  745. .finup = dcp_sha_finup,
  746. .digest = dcp_sha_digest,
  747. .import = dcp_sha_import,
  748. .export = dcp_sha_export,
  749. .halg = {
  750. .digestsize = SHA256_DIGEST_SIZE,
  751. .statesize = sizeof(struct dcp_export_state),
  752. .base = {
  753. .cra_name = "sha256",
  754. .cra_driver_name = "sha256-dcp",
  755. .cra_priority = 400,
  756. .cra_alignmask = 63,
  757. .cra_flags = CRYPTO_ALG_ASYNC,
  758. .cra_blocksize = SHA256_BLOCK_SIZE,
  759. .cra_ctxsize = sizeof(struct dcp_async_ctx),
  760. .cra_module = THIS_MODULE,
  761. .cra_init = dcp_sha_cra_init,
  762. .cra_exit = dcp_sha_cra_exit,
  763. },
  764. },
  765. };
  766. static irqreturn_t mxs_dcp_irq(int irq, void *context)
  767. {
  768. struct dcp *sdcp = context;
  769. uint32_t stat;
  770. int i;
  771. stat = readl(sdcp->base + MXS_DCP_STAT);
  772. stat &= MXS_DCP_STAT_IRQ_MASK;
  773. if (!stat)
  774. return IRQ_NONE;
  775. /* Clear the interrupts. */
  776. writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
  777. /* Complete the DMA requests that finished. */
  778. for (i = 0; i < DCP_MAX_CHANS; i++)
  779. if (stat & (1 << i))
  780. complete(&sdcp->completion[i]);
  781. return IRQ_HANDLED;
  782. }
  783. static int mxs_dcp_probe(struct platform_device *pdev)
  784. {
  785. struct device *dev = &pdev->dev;
  786. struct dcp *sdcp = NULL;
  787. int i, ret;
  788. int dcp_vmi_irq, dcp_irq;
  789. if (global_sdcp) {
  790. dev_err(dev, "Only one DCP instance allowed!\n");
  791. return -ENODEV;
  792. }
  793. dcp_vmi_irq = platform_get_irq(pdev, 0);
  794. if (dcp_vmi_irq < 0)
  795. return dcp_vmi_irq;
  796. dcp_irq = platform_get_irq(pdev, 1);
  797. if (dcp_irq < 0)
  798. return dcp_irq;
  799. sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
  800. if (!sdcp)
  801. return -ENOMEM;
  802. sdcp->dev = dev;
  803. sdcp->base = devm_platform_ioremap_resource(pdev, 0);
  804. if (IS_ERR(sdcp->base))
  805. return PTR_ERR(sdcp->base);
  806. ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
  807. "dcp-vmi-irq", sdcp);
  808. if (ret) {
  809. dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
  810. return ret;
  811. }
  812. ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
  813. "dcp-irq", sdcp);
  814. if (ret) {
  815. dev_err(dev, "Failed to claim DCP IRQ!\n");
  816. return ret;
  817. }
  818. /* Allocate coherent helper block. */
  819. sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
  820. GFP_KERNEL);
  821. if (!sdcp->coh)
  822. return -ENOMEM;
  823. /* Re-align the structure so it fits the DCP constraints. */
  824. sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
  825. /* DCP clock is optional, only used on some SOCs */
  826. sdcp->dcp_clk = devm_clk_get(dev, "dcp");
  827. if (IS_ERR(sdcp->dcp_clk)) {
  828. if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
  829. return PTR_ERR(sdcp->dcp_clk);
  830. sdcp->dcp_clk = NULL;
  831. }
  832. ret = clk_prepare_enable(sdcp->dcp_clk);
  833. if (ret)
  834. return ret;
  835. /* Restart the DCP block. */
  836. ret = stmp_reset_block(sdcp->base);
  837. if (ret) {
  838. dev_err(dev, "Failed reset\n");
  839. goto err_disable_unprepare_clk;
  840. }
  841. /* Initialize control register. */
  842. writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
  843. MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
  844. sdcp->base + MXS_DCP_CTRL);
  845. /* Enable all DCP DMA channels. */
  846. writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
  847. sdcp->base + MXS_DCP_CHANNELCTRL);
  848. /*
  849. * We do not enable context switching. Give the context buffer a
  850. * pointer to an illegal address so if context switching is
  851. * inadvertantly enabled, the DCP will return an error instead of
  852. * trashing good memory. The DCP DMA cannot access ROM, so any ROM
  853. * address will do.
  854. */
  855. writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
  856. for (i = 0; i < DCP_MAX_CHANS; i++)
  857. writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
  858. writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
  859. global_sdcp = sdcp;
  860. platform_set_drvdata(pdev, sdcp);
  861. for (i = 0; i < DCP_MAX_CHANS; i++) {
  862. spin_lock_init(&sdcp->lock[i]);
  863. init_completion(&sdcp->completion[i]);
  864. crypto_init_queue(&sdcp->queue[i], 50);
  865. }
  866. /* Create the SHA and AES handler threads. */
  867. sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
  868. NULL, "mxs_dcp_chan/sha");
  869. if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
  870. dev_err(dev, "Error starting SHA thread!\n");
  871. ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
  872. goto err_disable_unprepare_clk;
  873. }
  874. sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
  875. NULL, "mxs_dcp_chan/aes");
  876. if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
  877. dev_err(dev, "Error starting SHA thread!\n");
  878. ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
  879. goto err_destroy_sha_thread;
  880. }
  881. /* Register the various crypto algorithms. */
  882. sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
  883. if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
  884. ret = crypto_register_skciphers(dcp_aes_algs,
  885. ARRAY_SIZE(dcp_aes_algs));
  886. if (ret) {
  887. /* Failed to register algorithm. */
  888. dev_err(dev, "Failed to register AES crypto!\n");
  889. goto err_destroy_aes_thread;
  890. }
  891. }
  892. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
  893. ret = crypto_register_ahash(&dcp_sha1_alg);
  894. if (ret) {
  895. dev_err(dev, "Failed to register %s hash!\n",
  896. dcp_sha1_alg.halg.base.cra_name);
  897. goto err_unregister_aes;
  898. }
  899. }
  900. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
  901. ret = crypto_register_ahash(&dcp_sha256_alg);
  902. if (ret) {
  903. dev_err(dev, "Failed to register %s hash!\n",
  904. dcp_sha256_alg.halg.base.cra_name);
  905. goto err_unregister_sha1;
  906. }
  907. }
  908. return 0;
  909. err_unregister_sha1:
  910. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
  911. crypto_unregister_ahash(&dcp_sha1_alg);
  912. err_unregister_aes:
  913. if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
  914. crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
  915. err_destroy_aes_thread:
  916. kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
  917. err_destroy_sha_thread:
  918. kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
  919. err_disable_unprepare_clk:
  920. clk_disable_unprepare(sdcp->dcp_clk);
  921. return ret;
  922. }
  923. static int mxs_dcp_remove(struct platform_device *pdev)
  924. {
  925. struct dcp *sdcp = platform_get_drvdata(pdev);
  926. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
  927. crypto_unregister_ahash(&dcp_sha256_alg);
  928. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
  929. crypto_unregister_ahash(&dcp_sha1_alg);
  930. if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
  931. crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
  932. kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
  933. kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
  934. clk_disable_unprepare(sdcp->dcp_clk);
  935. platform_set_drvdata(pdev, NULL);
  936. global_sdcp = NULL;
  937. return 0;
  938. }
  939. static const struct of_device_id mxs_dcp_dt_ids[] = {
  940. { .compatible = "fsl,imx23-dcp", .data = NULL, },
  941. { .compatible = "fsl,imx28-dcp", .data = NULL, },
  942. { /* sentinel */ }
  943. };
  944. MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
  945. static struct platform_driver mxs_dcp_driver = {
  946. .probe = mxs_dcp_probe,
  947. .remove = mxs_dcp_remove,
  948. .driver = {
  949. .name = "mxs-dcp",
  950. .of_match_table = mxs_dcp_dt_ids,
  951. },
  952. };
  953. module_platform_driver(mxs_dcp_driver);
  954. MODULE_AUTHOR("Marek Vasut <[email protected]>");
  955. MODULE_DESCRIPTION("Freescale MXS DCP Driver");
  956. MODULE_LICENSE("GPL");
  957. MODULE_ALIAS("platform:mxs-dcp");