ixp4xx_crypto.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Intel IXP4xx NPE-C crypto driver
  4. *
  5. * Copyright (C) 2008 Christian Hohnstaedt <[email protected]>
  6. */
  7. #include <linux/platform_device.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/dmapool.h>
  10. #include <linux/crypto.h>
  11. #include <linux/kernel.h>
  12. #include <linux/rtnetlink.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/gfp.h>
  16. #include <linux/module.h>
  17. #include <linux/of.h>
  18. #include <crypto/ctr.h>
  19. #include <crypto/internal/des.h>
  20. #include <crypto/aes.h>
  21. #include <crypto/hmac.h>
  22. #include <crypto/sha1.h>
  23. #include <crypto/algapi.h>
  24. #include <crypto/internal/aead.h>
  25. #include <crypto/internal/skcipher.h>
  26. #include <crypto/authenc.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <linux/soc/ixp4xx/npe.h>
  29. #include <linux/soc/ixp4xx/qmgr.h>
  30. /* Intermittent includes, delete this after v5.14-rc1 */
  31. #include <linux/soc/ixp4xx/cpu.h>
  32. #define MAX_KEYLEN 32
  33. /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
  34. #define NPE_CTX_LEN 80
  35. #define AES_BLOCK128 16
  36. #define NPE_OP_HASH_VERIFY 0x01
  37. #define NPE_OP_CCM_ENABLE 0x04
  38. #define NPE_OP_CRYPT_ENABLE 0x08
  39. #define NPE_OP_HASH_ENABLE 0x10
  40. #define NPE_OP_NOT_IN_PLACE 0x20
  41. #define NPE_OP_HMAC_DISABLE 0x40
  42. #define NPE_OP_CRYPT_ENCRYPT 0x80
  43. #define NPE_OP_CCM_GEN_MIC 0xcc
  44. #define NPE_OP_HASH_GEN_ICV 0x50
  45. #define NPE_OP_ENC_GEN_KEY 0xc9
  46. #define MOD_ECB 0x0000
  47. #define MOD_CTR 0x1000
  48. #define MOD_CBC_ENC 0x2000
  49. #define MOD_CBC_DEC 0x3000
  50. #define MOD_CCM_ENC 0x4000
  51. #define MOD_CCM_DEC 0x5000
  52. #define KEYLEN_128 4
  53. #define KEYLEN_192 6
  54. #define KEYLEN_256 8
  55. #define CIPH_DECR 0x0000
  56. #define CIPH_ENCR 0x0400
  57. #define MOD_DES 0x0000
  58. #define MOD_TDEA2 0x0100
  59. #define MOD_3DES 0x0200
  60. #define MOD_AES 0x0800
  61. #define MOD_AES128 (0x0800 | KEYLEN_128)
  62. #define MOD_AES192 (0x0900 | KEYLEN_192)
  63. #define MOD_AES256 (0x0a00 | KEYLEN_256)
  64. #define MAX_IVLEN 16
  65. #define NPE_QLEN 16
  66. /* Space for registering when the first
  67. * NPE_QLEN crypt_ctl are busy */
  68. #define NPE_QLEN_TOTAL 64
  69. #define CTL_FLAG_UNUSED 0x0000
  70. #define CTL_FLAG_USED 0x1000
  71. #define CTL_FLAG_PERFORM_ABLK 0x0001
  72. #define CTL_FLAG_GEN_ICV 0x0002
  73. #define CTL_FLAG_GEN_REVAES 0x0004
  74. #define CTL_FLAG_PERFORM_AEAD 0x0008
  75. #define CTL_FLAG_MASK 0x000f
  76. #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
  77. #define MD5_DIGEST_SIZE 16
  78. struct buffer_desc {
  79. u32 phys_next;
  80. #ifdef __ARMEB__
  81. u16 buf_len;
  82. u16 pkt_len;
  83. #else
  84. u16 pkt_len;
  85. u16 buf_len;
  86. #endif
  87. dma_addr_t phys_addr;
  88. u32 __reserved[4];
  89. struct buffer_desc *next;
  90. enum dma_data_direction dir;
  91. };
  92. struct crypt_ctl {
  93. #ifdef __ARMEB__
  94. u8 mode; /* NPE_OP_* operation mode */
  95. u8 init_len;
  96. u16 reserved;
  97. #else
  98. u16 reserved;
  99. u8 init_len;
  100. u8 mode; /* NPE_OP_* operation mode */
  101. #endif
  102. u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
  103. dma_addr_t icv_rev_aes; /* icv or rev aes */
  104. dma_addr_t src_buf;
  105. dma_addr_t dst_buf;
  106. #ifdef __ARMEB__
  107. u16 auth_offs; /* Authentication start offset */
  108. u16 auth_len; /* Authentication data length */
  109. u16 crypt_offs; /* Cryption start offset */
  110. u16 crypt_len; /* Cryption data length */
  111. #else
  112. u16 auth_len; /* Authentication data length */
  113. u16 auth_offs; /* Authentication start offset */
  114. u16 crypt_len; /* Cryption data length */
  115. u16 crypt_offs; /* Cryption start offset */
  116. #endif
  117. u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
  118. u32 crypto_ctx; /* NPE Crypto Param structure address */
  119. /* Used by Host: 4*4 bytes*/
  120. unsigned int ctl_flags;
  121. union {
  122. struct skcipher_request *ablk_req;
  123. struct aead_request *aead_req;
  124. struct crypto_tfm *tfm;
  125. } data;
  126. struct buffer_desc *regist_buf;
  127. u8 *regist_ptr;
  128. };
  129. struct ablk_ctx {
  130. struct buffer_desc *src;
  131. struct buffer_desc *dst;
  132. u8 iv[MAX_IVLEN];
  133. bool encrypt;
  134. struct skcipher_request fallback_req; // keep at the end
  135. };
  136. struct aead_ctx {
  137. struct buffer_desc *src;
  138. struct buffer_desc *dst;
  139. struct scatterlist ivlist;
  140. /* used when the hmac is not on one sg entry */
  141. u8 *hmac_virt;
  142. int encrypt;
  143. };
  144. struct ix_hash_algo {
  145. u32 cfgword;
  146. unsigned char *icv;
  147. };
  148. struct ix_sa_dir {
  149. unsigned char *npe_ctx;
  150. dma_addr_t npe_ctx_phys;
  151. int npe_ctx_idx;
  152. u8 npe_mode;
  153. };
  154. struct ixp_ctx {
  155. struct ix_sa_dir encrypt;
  156. struct ix_sa_dir decrypt;
  157. int authkey_len;
  158. u8 authkey[MAX_KEYLEN];
  159. int enckey_len;
  160. u8 enckey[MAX_KEYLEN];
  161. u8 salt[MAX_IVLEN];
  162. u8 nonce[CTR_RFC3686_NONCE_SIZE];
  163. unsigned int salted;
  164. atomic_t configuring;
  165. struct completion completion;
  166. struct crypto_skcipher *fallback_tfm;
  167. };
  168. struct ixp_alg {
  169. struct skcipher_alg crypto;
  170. const struct ix_hash_algo *hash;
  171. u32 cfg_enc;
  172. u32 cfg_dec;
  173. int registered;
  174. };
  175. struct ixp_aead_alg {
  176. struct aead_alg crypto;
  177. const struct ix_hash_algo *hash;
  178. u32 cfg_enc;
  179. u32 cfg_dec;
  180. int registered;
  181. };
  182. static const struct ix_hash_algo hash_alg_md5 = {
  183. .cfgword = 0xAA010004,
  184. .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
  185. "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
  186. };
  187. static const struct ix_hash_algo hash_alg_sha1 = {
  188. .cfgword = 0x00000005,
  189. .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
  190. "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
  191. };
  192. static struct npe *npe_c;
  193. static unsigned int send_qid;
  194. static unsigned int recv_qid;
  195. static struct dma_pool *buffer_pool;
  196. static struct dma_pool *ctx_pool;
  197. static struct crypt_ctl *crypt_virt;
  198. static dma_addr_t crypt_phys;
  199. static int support_aes = 1;
  200. static struct platform_device *pdev;
  201. static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
  202. {
  203. return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
  204. }
  205. static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
  206. {
  207. return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
  208. }
  209. static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
  210. {
  211. return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
  212. }
  213. static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
  214. {
  215. return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
  216. }
  217. static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
  218. {
  219. return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
  220. }
  221. static int setup_crypt_desc(void)
  222. {
  223. struct device *dev = &pdev->dev;
  224. BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
  225. crypt_virt = dma_alloc_coherent(dev,
  226. NPE_QLEN * sizeof(struct crypt_ctl),
  227. &crypt_phys, GFP_ATOMIC);
  228. if (!crypt_virt)
  229. return -ENOMEM;
  230. return 0;
  231. }
  232. static DEFINE_SPINLOCK(desc_lock);
  233. static struct crypt_ctl *get_crypt_desc(void)
  234. {
  235. int i;
  236. static int idx;
  237. unsigned long flags;
  238. spin_lock_irqsave(&desc_lock, flags);
  239. if (unlikely(!crypt_virt))
  240. setup_crypt_desc();
  241. if (unlikely(!crypt_virt)) {
  242. spin_unlock_irqrestore(&desc_lock, flags);
  243. return NULL;
  244. }
  245. i = idx;
  246. if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
  247. if (++idx >= NPE_QLEN)
  248. idx = 0;
  249. crypt_virt[i].ctl_flags = CTL_FLAG_USED;
  250. spin_unlock_irqrestore(&desc_lock, flags);
  251. return crypt_virt + i;
  252. } else {
  253. spin_unlock_irqrestore(&desc_lock, flags);
  254. return NULL;
  255. }
  256. }
  257. static DEFINE_SPINLOCK(emerg_lock);
  258. static struct crypt_ctl *get_crypt_desc_emerg(void)
  259. {
  260. int i;
  261. static int idx = NPE_QLEN;
  262. struct crypt_ctl *desc;
  263. unsigned long flags;
  264. desc = get_crypt_desc();
  265. if (desc)
  266. return desc;
  267. if (unlikely(!crypt_virt))
  268. return NULL;
  269. spin_lock_irqsave(&emerg_lock, flags);
  270. i = idx;
  271. if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
  272. if (++idx >= NPE_QLEN_TOTAL)
  273. idx = NPE_QLEN;
  274. crypt_virt[i].ctl_flags = CTL_FLAG_USED;
  275. spin_unlock_irqrestore(&emerg_lock, flags);
  276. return crypt_virt + i;
  277. } else {
  278. spin_unlock_irqrestore(&emerg_lock, flags);
  279. return NULL;
  280. }
  281. }
  282. static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
  283. dma_addr_t phys)
  284. {
  285. while (buf) {
  286. struct buffer_desc *buf1;
  287. u32 phys1;
  288. buf1 = buf->next;
  289. phys1 = buf->phys_next;
  290. dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
  291. dma_pool_free(buffer_pool, buf, phys);
  292. buf = buf1;
  293. phys = phys1;
  294. }
  295. }
  296. static struct tasklet_struct crypto_done_tasklet;
  297. static void finish_scattered_hmac(struct crypt_ctl *crypt)
  298. {
  299. struct aead_request *req = crypt->data.aead_req;
  300. struct aead_ctx *req_ctx = aead_request_ctx(req);
  301. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  302. int authsize = crypto_aead_authsize(tfm);
  303. int decryptlen = req->assoclen + req->cryptlen - authsize;
  304. if (req_ctx->encrypt) {
  305. scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
  306. decryptlen, authsize, 1);
  307. }
  308. dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
  309. }
  310. static void one_packet(dma_addr_t phys)
  311. {
  312. struct device *dev = &pdev->dev;
  313. struct crypt_ctl *crypt;
  314. struct ixp_ctx *ctx;
  315. int failed;
  316. failed = phys & 0x1 ? -EBADMSG : 0;
  317. phys &= ~0x3;
  318. crypt = crypt_phys2virt(phys);
  319. switch (crypt->ctl_flags & CTL_FLAG_MASK) {
  320. case CTL_FLAG_PERFORM_AEAD: {
  321. struct aead_request *req = crypt->data.aead_req;
  322. struct aead_ctx *req_ctx = aead_request_ctx(req);
  323. free_buf_chain(dev, req_ctx->src, crypt->src_buf);
  324. free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
  325. if (req_ctx->hmac_virt)
  326. finish_scattered_hmac(crypt);
  327. req->base.complete(&req->base, failed);
  328. break;
  329. }
  330. case CTL_FLAG_PERFORM_ABLK: {
  331. struct skcipher_request *req = crypt->data.ablk_req;
  332. struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
  333. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  334. unsigned int ivsize = crypto_skcipher_ivsize(tfm);
  335. unsigned int offset;
  336. if (ivsize > 0) {
  337. offset = req->cryptlen - ivsize;
  338. if (req_ctx->encrypt) {
  339. scatterwalk_map_and_copy(req->iv, req->dst,
  340. offset, ivsize, 0);
  341. } else {
  342. memcpy(req->iv, req_ctx->iv, ivsize);
  343. memzero_explicit(req_ctx->iv, ivsize);
  344. }
  345. }
  346. if (req_ctx->dst)
  347. free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
  348. free_buf_chain(dev, req_ctx->src, crypt->src_buf);
  349. req->base.complete(&req->base, failed);
  350. break;
  351. }
  352. case CTL_FLAG_GEN_ICV:
  353. ctx = crypto_tfm_ctx(crypt->data.tfm);
  354. dma_pool_free(ctx_pool, crypt->regist_ptr,
  355. crypt->regist_buf->phys_addr);
  356. dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
  357. if (atomic_dec_and_test(&ctx->configuring))
  358. complete(&ctx->completion);
  359. break;
  360. case CTL_FLAG_GEN_REVAES:
  361. ctx = crypto_tfm_ctx(crypt->data.tfm);
  362. *(u32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
  363. if (atomic_dec_and_test(&ctx->configuring))
  364. complete(&ctx->completion);
  365. break;
  366. default:
  367. BUG();
  368. }
  369. crypt->ctl_flags = CTL_FLAG_UNUSED;
  370. }
  371. static void irqhandler(void *_unused)
  372. {
  373. tasklet_schedule(&crypto_done_tasklet);
  374. }
  375. static void crypto_done_action(unsigned long arg)
  376. {
  377. int i;
  378. for (i = 0; i < 4; i++) {
  379. dma_addr_t phys = qmgr_get_entry(recv_qid);
  380. if (!phys)
  381. return;
  382. one_packet(phys);
  383. }
  384. tasklet_schedule(&crypto_done_tasklet);
  385. }
  386. static int init_ixp_crypto(struct device *dev)
  387. {
  388. struct device_node *np = dev->of_node;
  389. u32 msg[2] = { 0, 0 };
  390. int ret = -ENODEV;
  391. u32 npe_id;
  392. dev_info(dev, "probing...\n");
  393. /* Locate the NPE and queue manager to use from device tree */
  394. if (IS_ENABLED(CONFIG_OF) && np) {
  395. struct of_phandle_args queue_spec;
  396. struct of_phandle_args npe_spec;
  397. ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
  398. 1, 0, &npe_spec);
  399. if (ret) {
  400. dev_err(dev, "no NPE engine specified\n");
  401. return -ENODEV;
  402. }
  403. npe_id = npe_spec.args[0];
  404. ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
  405. &queue_spec);
  406. if (ret) {
  407. dev_err(dev, "no rx queue phandle\n");
  408. return -ENODEV;
  409. }
  410. recv_qid = queue_spec.args[0];
  411. ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
  412. &queue_spec);
  413. if (ret) {
  414. dev_err(dev, "no txready queue phandle\n");
  415. return -ENODEV;
  416. }
  417. send_qid = queue_spec.args[0];
  418. } else {
  419. /*
  420. * Hardcoded engine when using platform data, this goes away
  421. * when we switch to using DT only.
  422. */
  423. npe_id = 2;
  424. send_qid = 29;
  425. recv_qid = 30;
  426. }
  427. npe_c = npe_request(npe_id);
  428. if (!npe_c)
  429. return ret;
  430. if (!npe_running(npe_c)) {
  431. ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
  432. if (ret)
  433. goto npe_release;
  434. if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
  435. goto npe_error;
  436. } else {
  437. if (npe_send_message(npe_c, msg, "STATUS_MSG"))
  438. goto npe_error;
  439. if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
  440. goto npe_error;
  441. }
  442. switch ((msg[1] >> 16) & 0xff) {
  443. case 3:
  444. dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
  445. support_aes = 0;
  446. break;
  447. case 4:
  448. case 5:
  449. support_aes = 1;
  450. break;
  451. default:
  452. dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
  453. ret = -ENODEV;
  454. goto npe_release;
  455. }
  456. /* buffer_pool will also be used to sometimes store the hmac,
  457. * so assure it is large enough
  458. */
  459. BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
  460. buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
  461. 32, 0);
  462. ret = -ENOMEM;
  463. if (!buffer_pool)
  464. goto err;
  465. ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
  466. if (!ctx_pool)
  467. goto err;
  468. ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
  469. "ixp_crypto:out", NULL);
  470. if (ret)
  471. goto err;
  472. ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
  473. "ixp_crypto:in", NULL);
  474. if (ret) {
  475. qmgr_release_queue(send_qid);
  476. goto err;
  477. }
  478. qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
  479. tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
  480. qmgr_enable_irq(recv_qid);
  481. return 0;
  482. npe_error:
  483. dev_err(dev, "%s not responding\n", npe_name(npe_c));
  484. ret = -EIO;
  485. err:
  486. dma_pool_destroy(ctx_pool);
  487. dma_pool_destroy(buffer_pool);
  488. npe_release:
  489. npe_release(npe_c);
  490. return ret;
  491. }
  492. static void release_ixp_crypto(struct device *dev)
  493. {
  494. qmgr_disable_irq(recv_qid);
  495. tasklet_kill(&crypto_done_tasklet);
  496. qmgr_release_queue(send_qid);
  497. qmgr_release_queue(recv_qid);
  498. dma_pool_destroy(ctx_pool);
  499. dma_pool_destroy(buffer_pool);
  500. npe_release(npe_c);
  501. if (crypt_virt)
  502. dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
  503. crypt_virt, crypt_phys);
  504. }
  505. static void reset_sa_dir(struct ix_sa_dir *dir)
  506. {
  507. memset(dir->npe_ctx, 0, NPE_CTX_LEN);
  508. dir->npe_ctx_idx = 0;
  509. dir->npe_mode = 0;
  510. }
  511. static int init_sa_dir(struct ix_sa_dir *dir)
  512. {
  513. dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
  514. if (!dir->npe_ctx)
  515. return -ENOMEM;
  516. reset_sa_dir(dir);
  517. return 0;
  518. }
  519. static void free_sa_dir(struct ix_sa_dir *dir)
  520. {
  521. memset(dir->npe_ctx, 0, NPE_CTX_LEN);
  522. dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
  523. }
  524. static int init_tfm(struct crypto_tfm *tfm)
  525. {
  526. struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
  527. int ret;
  528. atomic_set(&ctx->configuring, 0);
  529. ret = init_sa_dir(&ctx->encrypt);
  530. if (ret)
  531. return ret;
  532. ret = init_sa_dir(&ctx->decrypt);
  533. if (ret)
  534. free_sa_dir(&ctx->encrypt);
  535. return ret;
  536. }
  537. static int init_tfm_ablk(struct crypto_skcipher *tfm)
  538. {
  539. struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
  540. struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
  541. const char *name = crypto_tfm_alg_name(ctfm);
  542. ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
  543. if (IS_ERR(ctx->fallback_tfm)) {
  544. pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
  545. name, PTR_ERR(ctx->fallback_tfm));
  546. return PTR_ERR(ctx->fallback_tfm);
  547. }
  548. pr_info("Fallback for %s is %s\n",
  549. crypto_tfm_alg_driver_name(&tfm->base),
  550. crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
  551. );
  552. crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
  553. return init_tfm(crypto_skcipher_tfm(tfm));
  554. }
  555. static int init_tfm_aead(struct crypto_aead *tfm)
  556. {
  557. crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
  558. return init_tfm(crypto_aead_tfm(tfm));
  559. }
  560. static void exit_tfm(struct crypto_tfm *tfm)
  561. {
  562. struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
  563. free_sa_dir(&ctx->encrypt);
  564. free_sa_dir(&ctx->decrypt);
  565. }
  566. static void exit_tfm_ablk(struct crypto_skcipher *tfm)
  567. {
  568. struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
  569. struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
  570. crypto_free_skcipher(ctx->fallback_tfm);
  571. exit_tfm(crypto_skcipher_tfm(tfm));
  572. }
  573. static void exit_tfm_aead(struct crypto_aead *tfm)
  574. {
  575. exit_tfm(crypto_aead_tfm(tfm));
  576. }
  577. static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
  578. int init_len, u32 ctx_addr, const u8 *key,
  579. int key_len)
  580. {
  581. struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
  582. struct crypt_ctl *crypt;
  583. struct buffer_desc *buf;
  584. int i;
  585. u8 *pad;
  586. dma_addr_t pad_phys, buf_phys;
  587. BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
  588. pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
  589. if (!pad)
  590. return -ENOMEM;
  591. buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
  592. if (!buf) {
  593. dma_pool_free(ctx_pool, pad, pad_phys);
  594. return -ENOMEM;
  595. }
  596. crypt = get_crypt_desc_emerg();
  597. if (!crypt) {
  598. dma_pool_free(ctx_pool, pad, pad_phys);
  599. dma_pool_free(buffer_pool, buf, buf_phys);
  600. return -EAGAIN;
  601. }
  602. memcpy(pad, key, key_len);
  603. memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
  604. for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
  605. pad[i] ^= xpad;
  606. crypt->data.tfm = tfm;
  607. crypt->regist_ptr = pad;
  608. crypt->regist_buf = buf;
  609. crypt->auth_offs = 0;
  610. crypt->auth_len = HMAC_PAD_BLOCKLEN;
  611. crypt->crypto_ctx = ctx_addr;
  612. crypt->src_buf = buf_phys;
  613. crypt->icv_rev_aes = target;
  614. crypt->mode = NPE_OP_HASH_GEN_ICV;
  615. crypt->init_len = init_len;
  616. crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
  617. buf->next = 0;
  618. buf->buf_len = HMAC_PAD_BLOCKLEN;
  619. buf->pkt_len = 0;
  620. buf->phys_addr = pad_phys;
  621. atomic_inc(&ctx->configuring);
  622. qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
  623. BUG_ON(qmgr_stat_overflow(send_qid));
  624. return 0;
  625. }
  626. static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
  627. const u8 *key, int key_len, unsigned int digest_len)
  628. {
  629. u32 itarget, otarget, npe_ctx_addr;
  630. unsigned char *cinfo;
  631. int init_len, ret = 0;
  632. u32 cfgword;
  633. struct ix_sa_dir *dir;
  634. struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
  635. const struct ix_hash_algo *algo;
  636. dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
  637. cinfo = dir->npe_ctx + dir->npe_ctx_idx;
  638. algo = ix_hash(tfm);
  639. /* write cfg word to cryptinfo */
  640. cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
  641. #ifndef __ARMEB__
  642. cfgword ^= 0xAA000000; /* change the "byte swap" flags */
  643. #endif
  644. *(u32 *)cinfo = cpu_to_be32(cfgword);
  645. cinfo += sizeof(cfgword);
  646. /* write ICV to cryptinfo */
  647. memcpy(cinfo, algo->icv, digest_len);
  648. cinfo += digest_len;
  649. itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
  650. + sizeof(algo->cfgword);
  651. otarget = itarget + digest_len;
  652. init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
  653. npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
  654. dir->npe_ctx_idx += init_len;
  655. dir->npe_mode |= NPE_OP_HASH_ENABLE;
  656. if (!encrypt)
  657. dir->npe_mode |= NPE_OP_HASH_VERIFY;
  658. ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
  659. init_len, npe_ctx_addr, key, key_len);
  660. if (ret)
  661. return ret;
  662. return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
  663. init_len, npe_ctx_addr, key, key_len);
  664. }
  665. static int gen_rev_aes_key(struct crypto_tfm *tfm)
  666. {
  667. struct crypt_ctl *crypt;
  668. struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
  669. struct ix_sa_dir *dir = &ctx->decrypt;
  670. crypt = get_crypt_desc_emerg();
  671. if (!crypt)
  672. return -EAGAIN;
  673. *(u32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
  674. crypt->data.tfm = tfm;
  675. crypt->crypt_offs = 0;
  676. crypt->crypt_len = AES_BLOCK128;
  677. crypt->src_buf = 0;
  678. crypt->crypto_ctx = dir->npe_ctx_phys;
  679. crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
  680. crypt->mode = NPE_OP_ENC_GEN_KEY;
  681. crypt->init_len = dir->npe_ctx_idx;
  682. crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
  683. atomic_inc(&ctx->configuring);
  684. qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
  685. BUG_ON(qmgr_stat_overflow(send_qid));
  686. return 0;
  687. }
  688. static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
  689. int key_len)
  690. {
  691. u8 *cinfo;
  692. u32 cipher_cfg;
  693. u32 keylen_cfg = 0;
  694. struct ix_sa_dir *dir;
  695. struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
  696. int err;
  697. dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
  698. cinfo = dir->npe_ctx;
  699. if (encrypt) {
  700. cipher_cfg = cipher_cfg_enc(tfm);
  701. dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
  702. } else {
  703. cipher_cfg = cipher_cfg_dec(tfm);
  704. }
  705. if (cipher_cfg & MOD_AES) {
  706. switch (key_len) {
  707. case 16:
  708. keylen_cfg = MOD_AES128;
  709. break;
  710. case 24:
  711. keylen_cfg = MOD_AES192;
  712. break;
  713. case 32:
  714. keylen_cfg = MOD_AES256;
  715. break;
  716. default:
  717. return -EINVAL;
  718. }
  719. cipher_cfg |= keylen_cfg;
  720. } else {
  721. err = crypto_des_verify_key(tfm, key);
  722. if (err)
  723. return err;
  724. }
  725. /* write cfg word to cryptinfo */
  726. *(u32 *)cinfo = cpu_to_be32(cipher_cfg);
  727. cinfo += sizeof(cipher_cfg);
  728. /* write cipher key to cryptinfo */
  729. memcpy(cinfo, key, key_len);
  730. /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
  731. if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
  732. memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
  733. key_len = DES3_EDE_KEY_SIZE;
  734. }
  735. dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
  736. dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
  737. if ((cipher_cfg & MOD_AES) && !encrypt)
  738. return gen_rev_aes_key(tfm);
  739. return 0;
  740. }
  741. static struct buffer_desc *chainup_buffers(struct device *dev,
  742. struct scatterlist *sg, unsigned int nbytes,
  743. struct buffer_desc *buf, gfp_t flags,
  744. enum dma_data_direction dir)
  745. {
  746. for (; nbytes > 0; sg = sg_next(sg)) {
  747. unsigned int len = min(nbytes, sg->length);
  748. struct buffer_desc *next_buf;
  749. dma_addr_t next_buf_phys;
  750. void *ptr;
  751. nbytes -= len;
  752. ptr = sg_virt(sg);
  753. next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
  754. if (!next_buf) {
  755. buf = NULL;
  756. break;
  757. }
  758. sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
  759. buf->next = next_buf;
  760. buf->phys_next = next_buf_phys;
  761. buf = next_buf;
  762. buf->phys_addr = sg_dma_address(sg);
  763. buf->buf_len = len;
  764. buf->dir = dir;
  765. }
  766. buf->next = NULL;
  767. buf->phys_next = 0;
  768. return buf;
  769. }
  770. static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
  771. unsigned int key_len)
  772. {
  773. struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
  774. int ret;
  775. init_completion(&ctx->completion);
  776. atomic_inc(&ctx->configuring);
  777. reset_sa_dir(&ctx->encrypt);
  778. reset_sa_dir(&ctx->decrypt);
  779. ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
  780. ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
  781. ret = setup_cipher(&tfm->base, 0, key, key_len);
  782. if (ret)
  783. goto out;
  784. ret = setup_cipher(&tfm->base, 1, key, key_len);
  785. out:
  786. if (!atomic_dec_and_test(&ctx->configuring))
  787. wait_for_completion(&ctx->completion);
  788. if (ret)
  789. return ret;
  790. crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
  791. crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
  792. return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
  793. }
  794. static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
  795. unsigned int key_len)
  796. {
  797. return verify_skcipher_des3_key(tfm, key) ?:
  798. ablk_setkey(tfm, key, key_len);
  799. }
  800. static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
  801. unsigned int key_len)
  802. {
  803. struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
  804. /* the nonce is stored in bytes at end of key */
  805. if (key_len < CTR_RFC3686_NONCE_SIZE)
  806. return -EINVAL;
  807. memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
  808. CTR_RFC3686_NONCE_SIZE);
  809. key_len -= CTR_RFC3686_NONCE_SIZE;
  810. return ablk_setkey(tfm, key, key_len);
  811. }
  812. static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
  813. {
  814. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  815. struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
  816. struct ablk_ctx *rctx = skcipher_request_ctx(areq);
  817. int err;
  818. skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
  819. skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
  820. areq->base.complete, areq->base.data);
  821. skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
  822. areq->cryptlen, areq->iv);
  823. if (encrypt)
  824. err = crypto_skcipher_encrypt(&rctx->fallback_req);
  825. else
  826. err = crypto_skcipher_decrypt(&rctx->fallback_req);
  827. return err;
  828. }
  829. static int ablk_perform(struct skcipher_request *req, int encrypt)
  830. {
  831. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  832. struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
  833. unsigned int ivsize = crypto_skcipher_ivsize(tfm);
  834. struct ix_sa_dir *dir;
  835. struct crypt_ctl *crypt;
  836. unsigned int nbytes = req->cryptlen;
  837. enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
  838. struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
  839. struct buffer_desc src_hook;
  840. struct device *dev = &pdev->dev;
  841. unsigned int offset;
  842. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  843. GFP_KERNEL : GFP_ATOMIC;
  844. if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
  845. return ixp4xx_cipher_fallback(req, encrypt);
  846. if (qmgr_stat_full(send_qid))
  847. return -EAGAIN;
  848. if (atomic_read(&ctx->configuring))
  849. return -EAGAIN;
  850. dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
  851. req_ctx->encrypt = encrypt;
  852. crypt = get_crypt_desc();
  853. if (!crypt)
  854. return -ENOMEM;
  855. crypt->data.ablk_req = req;
  856. crypt->crypto_ctx = dir->npe_ctx_phys;
  857. crypt->mode = dir->npe_mode;
  858. crypt->init_len = dir->npe_ctx_idx;
  859. crypt->crypt_offs = 0;
  860. crypt->crypt_len = nbytes;
  861. BUG_ON(ivsize && !req->iv);
  862. memcpy(crypt->iv, req->iv, ivsize);
  863. if (ivsize > 0 && !encrypt) {
  864. offset = req->cryptlen - ivsize;
  865. scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
  866. }
  867. if (req->src != req->dst) {
  868. struct buffer_desc dst_hook;
  869. crypt->mode |= NPE_OP_NOT_IN_PLACE;
  870. /* This was never tested by Intel
  871. * for more than one dst buffer, I think. */
  872. req_ctx->dst = NULL;
  873. if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
  874. flags, DMA_FROM_DEVICE))
  875. goto free_buf_dest;
  876. src_direction = DMA_TO_DEVICE;
  877. req_ctx->dst = dst_hook.next;
  878. crypt->dst_buf = dst_hook.phys_next;
  879. } else {
  880. req_ctx->dst = NULL;
  881. }
  882. req_ctx->src = NULL;
  883. if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
  884. src_direction))
  885. goto free_buf_src;
  886. req_ctx->src = src_hook.next;
  887. crypt->src_buf = src_hook.phys_next;
  888. crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
  889. qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
  890. BUG_ON(qmgr_stat_overflow(send_qid));
  891. return -EINPROGRESS;
  892. free_buf_src:
  893. free_buf_chain(dev, req_ctx->src, crypt->src_buf);
  894. free_buf_dest:
  895. if (req->src != req->dst)
  896. free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
  897. crypt->ctl_flags = CTL_FLAG_UNUSED;
  898. return -ENOMEM;
  899. }
  900. static int ablk_encrypt(struct skcipher_request *req)
  901. {
  902. return ablk_perform(req, 1);
  903. }
  904. static int ablk_decrypt(struct skcipher_request *req)
  905. {
  906. return ablk_perform(req, 0);
  907. }
  908. static int ablk_rfc3686_crypt(struct skcipher_request *req)
  909. {
  910. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  911. struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
  912. u8 iv[CTR_RFC3686_BLOCK_SIZE];
  913. u8 *info = req->iv;
  914. int ret;
  915. /* set up counter block */
  916. memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
  917. memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
  918. /* initialize counter portion of counter block */
  919. *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
  920. cpu_to_be32(1);
  921. req->iv = iv;
  922. ret = ablk_perform(req, 1);
  923. req->iv = info;
  924. return ret;
  925. }
  926. static int aead_perform(struct aead_request *req, int encrypt,
  927. int cryptoffset, int eff_cryptlen, u8 *iv)
  928. {
  929. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  930. struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
  931. unsigned int ivsize = crypto_aead_ivsize(tfm);
  932. unsigned int authsize = crypto_aead_authsize(tfm);
  933. struct ix_sa_dir *dir;
  934. struct crypt_ctl *crypt;
  935. unsigned int cryptlen;
  936. struct buffer_desc *buf, src_hook;
  937. struct aead_ctx *req_ctx = aead_request_ctx(req);
  938. struct device *dev = &pdev->dev;
  939. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  940. GFP_KERNEL : GFP_ATOMIC;
  941. enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
  942. unsigned int lastlen;
  943. if (qmgr_stat_full(send_qid))
  944. return -EAGAIN;
  945. if (atomic_read(&ctx->configuring))
  946. return -EAGAIN;
  947. if (encrypt) {
  948. dir = &ctx->encrypt;
  949. cryptlen = req->cryptlen;
  950. } else {
  951. dir = &ctx->decrypt;
  952. /* req->cryptlen includes the authsize when decrypting */
  953. cryptlen = req->cryptlen - authsize;
  954. eff_cryptlen -= authsize;
  955. }
  956. crypt = get_crypt_desc();
  957. if (!crypt)
  958. return -ENOMEM;
  959. crypt->data.aead_req = req;
  960. crypt->crypto_ctx = dir->npe_ctx_phys;
  961. crypt->mode = dir->npe_mode;
  962. crypt->init_len = dir->npe_ctx_idx;
  963. crypt->crypt_offs = cryptoffset;
  964. crypt->crypt_len = eff_cryptlen;
  965. crypt->auth_offs = 0;
  966. crypt->auth_len = req->assoclen + cryptlen;
  967. BUG_ON(ivsize && !req->iv);
  968. memcpy(crypt->iv, req->iv, ivsize);
  969. buf = chainup_buffers(dev, req->src, crypt->auth_len,
  970. &src_hook, flags, src_direction);
  971. req_ctx->src = src_hook.next;
  972. crypt->src_buf = src_hook.phys_next;
  973. if (!buf)
  974. goto free_buf_src;
  975. lastlen = buf->buf_len;
  976. if (lastlen >= authsize)
  977. crypt->icv_rev_aes = buf->phys_addr +
  978. buf->buf_len - authsize;
  979. req_ctx->dst = NULL;
  980. if (req->src != req->dst) {
  981. struct buffer_desc dst_hook;
  982. crypt->mode |= NPE_OP_NOT_IN_PLACE;
  983. src_direction = DMA_TO_DEVICE;
  984. buf = chainup_buffers(dev, req->dst, crypt->auth_len,
  985. &dst_hook, flags, DMA_FROM_DEVICE);
  986. req_ctx->dst = dst_hook.next;
  987. crypt->dst_buf = dst_hook.phys_next;
  988. if (!buf)
  989. goto free_buf_dst;
  990. if (encrypt) {
  991. lastlen = buf->buf_len;
  992. if (lastlen >= authsize)
  993. crypt->icv_rev_aes = buf->phys_addr +
  994. buf->buf_len - authsize;
  995. }
  996. }
  997. if (unlikely(lastlen < authsize)) {
  998. /* The 12 hmac bytes are scattered,
  999. * we need to copy them into a safe buffer */
  1000. req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
  1001. &crypt->icv_rev_aes);
  1002. if (unlikely(!req_ctx->hmac_virt))
  1003. goto free_buf_dst;
  1004. if (!encrypt) {
  1005. scatterwalk_map_and_copy(req_ctx->hmac_virt,
  1006. req->src, cryptlen, authsize, 0);
  1007. }
  1008. req_ctx->encrypt = encrypt;
  1009. } else {
  1010. req_ctx->hmac_virt = NULL;
  1011. }
  1012. crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
  1013. qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
  1014. BUG_ON(qmgr_stat_overflow(send_qid));
  1015. return -EINPROGRESS;
  1016. free_buf_dst:
  1017. free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
  1018. free_buf_src:
  1019. free_buf_chain(dev, req_ctx->src, crypt->src_buf);
  1020. crypt->ctl_flags = CTL_FLAG_UNUSED;
  1021. return -ENOMEM;
  1022. }
  1023. static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
  1024. {
  1025. struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
  1026. unsigned int digest_len = crypto_aead_maxauthsize(tfm);
  1027. int ret;
  1028. if (!ctx->enckey_len && !ctx->authkey_len)
  1029. return 0;
  1030. init_completion(&ctx->completion);
  1031. atomic_inc(&ctx->configuring);
  1032. reset_sa_dir(&ctx->encrypt);
  1033. reset_sa_dir(&ctx->decrypt);
  1034. ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
  1035. if (ret)
  1036. goto out;
  1037. ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
  1038. if (ret)
  1039. goto out;
  1040. ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
  1041. ctx->authkey_len, digest_len);
  1042. if (ret)
  1043. goto out;
  1044. ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
  1045. ctx->authkey_len, digest_len);
  1046. out:
  1047. if (!atomic_dec_and_test(&ctx->configuring))
  1048. wait_for_completion(&ctx->completion);
  1049. return ret;
  1050. }
  1051. static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  1052. {
  1053. int max = crypto_aead_maxauthsize(tfm) >> 2;
  1054. if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
  1055. return -EINVAL;
  1056. return aead_setup(tfm, authsize);
  1057. }
  1058. static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
  1059. unsigned int keylen)
  1060. {
  1061. struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
  1062. struct crypto_authenc_keys keys;
  1063. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  1064. goto badkey;
  1065. if (keys.authkeylen > sizeof(ctx->authkey))
  1066. goto badkey;
  1067. if (keys.enckeylen > sizeof(ctx->enckey))
  1068. goto badkey;
  1069. memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
  1070. memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
  1071. ctx->authkey_len = keys.authkeylen;
  1072. ctx->enckey_len = keys.enckeylen;
  1073. memzero_explicit(&keys, sizeof(keys));
  1074. return aead_setup(tfm, crypto_aead_authsize(tfm));
  1075. badkey:
  1076. memzero_explicit(&keys, sizeof(keys));
  1077. return -EINVAL;
  1078. }
  1079. static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
  1080. unsigned int keylen)
  1081. {
  1082. struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
  1083. struct crypto_authenc_keys keys;
  1084. int err;
  1085. err = crypto_authenc_extractkeys(&keys, key, keylen);
  1086. if (unlikely(err))
  1087. goto badkey;
  1088. err = -EINVAL;
  1089. if (keys.authkeylen > sizeof(ctx->authkey))
  1090. goto badkey;
  1091. err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
  1092. if (err)
  1093. goto badkey;
  1094. memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
  1095. memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
  1096. ctx->authkey_len = keys.authkeylen;
  1097. ctx->enckey_len = keys.enckeylen;
  1098. memzero_explicit(&keys, sizeof(keys));
  1099. return aead_setup(tfm, crypto_aead_authsize(tfm));
  1100. badkey:
  1101. memzero_explicit(&keys, sizeof(keys));
  1102. return err;
  1103. }
  1104. static int aead_encrypt(struct aead_request *req)
  1105. {
  1106. return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
  1107. }
  1108. static int aead_decrypt(struct aead_request *req)
  1109. {
  1110. return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
  1111. }
  1112. static struct ixp_alg ixp4xx_algos[] = {
  1113. {
  1114. .crypto = {
  1115. .base.cra_name = "cbc(des)",
  1116. .base.cra_blocksize = DES_BLOCK_SIZE,
  1117. .min_keysize = DES_KEY_SIZE,
  1118. .max_keysize = DES_KEY_SIZE,
  1119. .ivsize = DES_BLOCK_SIZE,
  1120. },
  1121. .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
  1122. .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
  1123. }, {
  1124. .crypto = {
  1125. .base.cra_name = "ecb(des)",
  1126. .base.cra_blocksize = DES_BLOCK_SIZE,
  1127. .min_keysize = DES_KEY_SIZE,
  1128. .max_keysize = DES_KEY_SIZE,
  1129. },
  1130. .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
  1131. .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
  1132. }, {
  1133. .crypto = {
  1134. .base.cra_name = "cbc(des3_ede)",
  1135. .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1136. .min_keysize = DES3_EDE_KEY_SIZE,
  1137. .max_keysize = DES3_EDE_KEY_SIZE,
  1138. .ivsize = DES3_EDE_BLOCK_SIZE,
  1139. .setkey = ablk_des3_setkey,
  1140. },
  1141. .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
  1142. .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
  1143. }, {
  1144. .crypto = {
  1145. .base.cra_name = "ecb(des3_ede)",
  1146. .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1147. .min_keysize = DES3_EDE_KEY_SIZE,
  1148. .max_keysize = DES3_EDE_KEY_SIZE,
  1149. .setkey = ablk_des3_setkey,
  1150. },
  1151. .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
  1152. .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
  1153. }, {
  1154. .crypto = {
  1155. .base.cra_name = "cbc(aes)",
  1156. .base.cra_blocksize = AES_BLOCK_SIZE,
  1157. .min_keysize = AES_MIN_KEY_SIZE,
  1158. .max_keysize = AES_MAX_KEY_SIZE,
  1159. .ivsize = AES_BLOCK_SIZE,
  1160. },
  1161. .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
  1162. .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
  1163. }, {
  1164. .crypto = {
  1165. .base.cra_name = "ecb(aes)",
  1166. .base.cra_blocksize = AES_BLOCK_SIZE,
  1167. .min_keysize = AES_MIN_KEY_SIZE,
  1168. .max_keysize = AES_MAX_KEY_SIZE,
  1169. },
  1170. .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
  1171. .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
  1172. }, {
  1173. .crypto = {
  1174. .base.cra_name = "ctr(aes)",
  1175. .base.cra_blocksize = 1,
  1176. .min_keysize = AES_MIN_KEY_SIZE,
  1177. .max_keysize = AES_MAX_KEY_SIZE,
  1178. .ivsize = AES_BLOCK_SIZE,
  1179. },
  1180. .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
  1181. .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
  1182. }, {
  1183. .crypto = {
  1184. .base.cra_name = "rfc3686(ctr(aes))",
  1185. .base.cra_blocksize = 1,
  1186. .min_keysize = AES_MIN_KEY_SIZE,
  1187. .max_keysize = AES_MAX_KEY_SIZE,
  1188. .ivsize = AES_BLOCK_SIZE,
  1189. .setkey = ablk_rfc3686_setkey,
  1190. .encrypt = ablk_rfc3686_crypt,
  1191. .decrypt = ablk_rfc3686_crypt,
  1192. },
  1193. .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
  1194. .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
  1195. } };
  1196. static struct ixp_aead_alg ixp4xx_aeads[] = {
  1197. {
  1198. .crypto = {
  1199. .base = {
  1200. .cra_name = "authenc(hmac(md5),cbc(des))",
  1201. .cra_blocksize = DES_BLOCK_SIZE,
  1202. },
  1203. .ivsize = DES_BLOCK_SIZE,
  1204. .maxauthsize = MD5_DIGEST_SIZE,
  1205. },
  1206. .hash = &hash_alg_md5,
  1207. .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
  1208. .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
  1209. }, {
  1210. .crypto = {
  1211. .base = {
  1212. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  1213. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1214. },
  1215. .ivsize = DES3_EDE_BLOCK_SIZE,
  1216. .maxauthsize = MD5_DIGEST_SIZE,
  1217. .setkey = des3_aead_setkey,
  1218. },
  1219. .hash = &hash_alg_md5,
  1220. .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
  1221. .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
  1222. }, {
  1223. .crypto = {
  1224. .base = {
  1225. .cra_name = "authenc(hmac(sha1),cbc(des))",
  1226. .cra_blocksize = DES_BLOCK_SIZE,
  1227. },
  1228. .ivsize = DES_BLOCK_SIZE,
  1229. .maxauthsize = SHA1_DIGEST_SIZE,
  1230. },
  1231. .hash = &hash_alg_sha1,
  1232. .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
  1233. .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
  1234. }, {
  1235. .crypto = {
  1236. .base = {
  1237. .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
  1238. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1239. },
  1240. .ivsize = DES3_EDE_BLOCK_SIZE,
  1241. .maxauthsize = SHA1_DIGEST_SIZE,
  1242. .setkey = des3_aead_setkey,
  1243. },
  1244. .hash = &hash_alg_sha1,
  1245. .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
  1246. .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
  1247. }, {
  1248. .crypto = {
  1249. .base = {
  1250. .cra_name = "authenc(hmac(md5),cbc(aes))",
  1251. .cra_blocksize = AES_BLOCK_SIZE,
  1252. },
  1253. .ivsize = AES_BLOCK_SIZE,
  1254. .maxauthsize = MD5_DIGEST_SIZE,
  1255. },
  1256. .hash = &hash_alg_md5,
  1257. .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
  1258. .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
  1259. }, {
  1260. .crypto = {
  1261. .base = {
  1262. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1263. .cra_blocksize = AES_BLOCK_SIZE,
  1264. },
  1265. .ivsize = AES_BLOCK_SIZE,
  1266. .maxauthsize = SHA1_DIGEST_SIZE,
  1267. },
  1268. .hash = &hash_alg_sha1,
  1269. .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
  1270. .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
  1271. } };
  1272. #define IXP_POSTFIX "-ixp4xx"
  1273. static int ixp_crypto_probe(struct platform_device *_pdev)
  1274. {
  1275. struct device *dev = &_pdev->dev;
  1276. int num = ARRAY_SIZE(ixp4xx_algos);
  1277. int i, err;
  1278. pdev = _pdev;
  1279. err = init_ixp_crypto(dev);
  1280. if (err)
  1281. return err;
  1282. for (i = 0; i < num; i++) {
  1283. struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
  1284. if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  1285. "%s"IXP_POSTFIX, cra->base.cra_name) >=
  1286. CRYPTO_MAX_ALG_NAME)
  1287. continue;
  1288. if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
  1289. continue;
  1290. /* block ciphers */
  1291. cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1292. CRYPTO_ALG_ASYNC |
  1293. CRYPTO_ALG_ALLOCATES_MEMORY |
  1294. CRYPTO_ALG_NEED_FALLBACK;
  1295. if (!cra->setkey)
  1296. cra->setkey = ablk_setkey;
  1297. if (!cra->encrypt)
  1298. cra->encrypt = ablk_encrypt;
  1299. if (!cra->decrypt)
  1300. cra->decrypt = ablk_decrypt;
  1301. cra->init = init_tfm_ablk;
  1302. cra->exit = exit_tfm_ablk;
  1303. cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
  1304. cra->base.cra_module = THIS_MODULE;
  1305. cra->base.cra_alignmask = 3;
  1306. cra->base.cra_priority = 300;
  1307. if (crypto_register_skcipher(cra))
  1308. dev_err(&pdev->dev, "Failed to register '%s'\n",
  1309. cra->base.cra_name);
  1310. else
  1311. ixp4xx_algos[i].registered = 1;
  1312. }
  1313. for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
  1314. struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
  1315. if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  1316. "%s"IXP_POSTFIX, cra->base.cra_name) >=
  1317. CRYPTO_MAX_ALG_NAME)
  1318. continue;
  1319. if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
  1320. continue;
  1321. /* authenc */
  1322. cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1323. CRYPTO_ALG_ASYNC |
  1324. CRYPTO_ALG_ALLOCATES_MEMORY;
  1325. cra->setkey = cra->setkey ?: aead_setkey;
  1326. cra->setauthsize = aead_setauthsize;
  1327. cra->encrypt = aead_encrypt;
  1328. cra->decrypt = aead_decrypt;
  1329. cra->init = init_tfm_aead;
  1330. cra->exit = exit_tfm_aead;
  1331. cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
  1332. cra->base.cra_module = THIS_MODULE;
  1333. cra->base.cra_alignmask = 3;
  1334. cra->base.cra_priority = 300;
  1335. if (crypto_register_aead(cra))
  1336. dev_err(&pdev->dev, "Failed to register '%s'\n",
  1337. cra->base.cra_driver_name);
  1338. else
  1339. ixp4xx_aeads[i].registered = 1;
  1340. }
  1341. return 0;
  1342. }
  1343. static int ixp_crypto_remove(struct platform_device *pdev)
  1344. {
  1345. int num = ARRAY_SIZE(ixp4xx_algos);
  1346. int i;
  1347. for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
  1348. if (ixp4xx_aeads[i].registered)
  1349. crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
  1350. }
  1351. for (i = 0; i < num; i++) {
  1352. if (ixp4xx_algos[i].registered)
  1353. crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
  1354. }
  1355. release_ixp_crypto(&pdev->dev);
  1356. return 0;
  1357. }
  1358. static const struct of_device_id ixp4xx_crypto_of_match[] = {
  1359. {
  1360. .compatible = "intel,ixp4xx-crypto",
  1361. },
  1362. {},
  1363. };
  1364. static struct platform_driver ixp_crypto_driver = {
  1365. .probe = ixp_crypto_probe,
  1366. .remove = ixp_crypto_remove,
  1367. .driver = {
  1368. .name = "ixp4xx_crypto",
  1369. .of_match_table = ixp4xx_crypto_of_match,
  1370. },
  1371. };
  1372. module_platform_driver(ixp_crypto_driver);
  1373. MODULE_LICENSE("GPL");
  1374. MODULE_AUTHOR("Christian Hohnstaedt <[email protected]>");
  1375. MODULE_DESCRIPTION("IXP4xx hardware crypto");