aes_s390.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Cryptographic API.
  4. *
  5. * s390 implementation of the AES Cipher Algorithm.
  6. *
  7. * s390 Version:
  8. * Copyright IBM Corp. 2005, 2017
  9. * Author(s): Jan Glauber ([email protected])
  10. * Sebastian Siewior ([email protected]> SW-Fallback
  11. * Patrick Steuer <[email protected]>
  12. * Harald Freudenberger <[email protected]>
  13. *
  14. * Derived from "crypto/aes_generic.c"
  15. */
  16. #define KMSG_COMPONENT "aes_s390"
  17. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  18. #include <crypto/aes.h>
  19. #include <crypto/algapi.h>
  20. #include <crypto/ghash.h>
  21. #include <crypto/internal/aead.h>
  22. #include <crypto/internal/cipher.h>
  23. #include <crypto/internal/skcipher.h>
  24. #include <crypto/scatterwalk.h>
  25. #include <linux/err.h>
  26. #include <linux/module.h>
  27. #include <linux/cpufeature.h>
  28. #include <linux/init.h>
  29. #include <linux/mutex.h>
  30. #include <linux/fips.h>
  31. #include <linux/string.h>
  32. #include <crypto/xts.h>
  33. #include <asm/cpacf.h>
  34. static u8 *ctrblk;
  35. static DEFINE_MUTEX(ctrblk_lock);
  36. static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
  37. kma_functions;
  38. struct s390_aes_ctx {
  39. u8 key[AES_MAX_KEY_SIZE];
  40. int key_len;
  41. unsigned long fc;
  42. union {
  43. struct crypto_skcipher *skcipher;
  44. struct crypto_cipher *cip;
  45. } fallback;
  46. };
  47. struct s390_xts_ctx {
  48. u8 key[32];
  49. u8 pcc_key[32];
  50. int key_len;
  51. unsigned long fc;
  52. struct crypto_skcipher *fallback;
  53. };
  54. struct gcm_sg_walk {
  55. struct scatter_walk walk;
  56. unsigned int walk_bytes;
  57. u8 *walk_ptr;
  58. unsigned int walk_bytes_remain;
  59. u8 buf[AES_BLOCK_SIZE];
  60. unsigned int buf_bytes;
  61. u8 *ptr;
  62. unsigned int nbytes;
  63. };
  64. static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
  65. unsigned int key_len)
  66. {
  67. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  68. sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  69. sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
  70. CRYPTO_TFM_REQ_MASK);
  71. return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
  72. }
  73. static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  74. unsigned int key_len)
  75. {
  76. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  77. unsigned long fc;
  78. /* Pick the correct function code based on the key length */
  79. fc = (key_len == 16) ? CPACF_KM_AES_128 :
  80. (key_len == 24) ? CPACF_KM_AES_192 :
  81. (key_len == 32) ? CPACF_KM_AES_256 : 0;
  82. /* Check if the function code is available */
  83. sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  84. if (!sctx->fc)
  85. return setkey_fallback_cip(tfm, in_key, key_len);
  86. sctx->key_len = key_len;
  87. memcpy(sctx->key, in_key, key_len);
  88. return 0;
  89. }
  90. static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  91. {
  92. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  93. if (unlikely(!sctx->fc)) {
  94. crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
  95. return;
  96. }
  97. cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
  98. }
  99. static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  100. {
  101. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  102. if (unlikely(!sctx->fc)) {
  103. crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
  104. return;
  105. }
  106. cpacf_km(sctx->fc | CPACF_DECRYPT,
  107. &sctx->key, out, in, AES_BLOCK_SIZE);
  108. }
  109. static int fallback_init_cip(struct crypto_tfm *tfm)
  110. {
  111. const char *name = tfm->__crt_alg->cra_name;
  112. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  113. sctx->fallback.cip = crypto_alloc_cipher(name, 0,
  114. CRYPTO_ALG_NEED_FALLBACK);
  115. if (IS_ERR(sctx->fallback.cip)) {
  116. pr_err("Allocating AES fallback algorithm %s failed\n",
  117. name);
  118. return PTR_ERR(sctx->fallback.cip);
  119. }
  120. return 0;
  121. }
  122. static void fallback_exit_cip(struct crypto_tfm *tfm)
  123. {
  124. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  125. crypto_free_cipher(sctx->fallback.cip);
  126. sctx->fallback.cip = NULL;
  127. }
  128. static struct crypto_alg aes_alg = {
  129. .cra_name = "aes",
  130. .cra_driver_name = "aes-s390",
  131. .cra_priority = 300,
  132. .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
  133. CRYPTO_ALG_NEED_FALLBACK,
  134. .cra_blocksize = AES_BLOCK_SIZE,
  135. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  136. .cra_module = THIS_MODULE,
  137. .cra_init = fallback_init_cip,
  138. .cra_exit = fallback_exit_cip,
  139. .cra_u = {
  140. .cipher = {
  141. .cia_min_keysize = AES_MIN_KEY_SIZE,
  142. .cia_max_keysize = AES_MAX_KEY_SIZE,
  143. .cia_setkey = aes_set_key,
  144. .cia_encrypt = crypto_aes_encrypt,
  145. .cia_decrypt = crypto_aes_decrypt,
  146. }
  147. }
  148. };
  149. static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
  150. unsigned int len)
  151. {
  152. struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
  153. crypto_skcipher_clear_flags(sctx->fallback.skcipher,
  154. CRYPTO_TFM_REQ_MASK);
  155. crypto_skcipher_set_flags(sctx->fallback.skcipher,
  156. crypto_skcipher_get_flags(tfm) &
  157. CRYPTO_TFM_REQ_MASK);
  158. return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
  159. }
  160. static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
  161. struct skcipher_request *req,
  162. unsigned long modifier)
  163. {
  164. struct skcipher_request *subreq = skcipher_request_ctx(req);
  165. *subreq = *req;
  166. skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
  167. return (modifier & CPACF_DECRYPT) ?
  168. crypto_skcipher_decrypt(subreq) :
  169. crypto_skcipher_encrypt(subreq);
  170. }
  171. static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
  172. unsigned int key_len)
  173. {
  174. struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
  175. unsigned long fc;
  176. /* Pick the correct function code based on the key length */
  177. fc = (key_len == 16) ? CPACF_KM_AES_128 :
  178. (key_len == 24) ? CPACF_KM_AES_192 :
  179. (key_len == 32) ? CPACF_KM_AES_256 : 0;
  180. /* Check if the function code is available */
  181. sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  182. if (!sctx->fc)
  183. return setkey_fallback_skcipher(tfm, in_key, key_len);
  184. sctx->key_len = key_len;
  185. memcpy(sctx->key, in_key, key_len);
  186. return 0;
  187. }
  188. static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
  189. {
  190. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  191. struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
  192. struct skcipher_walk walk;
  193. unsigned int nbytes, n;
  194. int ret;
  195. if (unlikely(!sctx->fc))
  196. return fallback_skcipher_crypt(sctx, req, modifier);
  197. ret = skcipher_walk_virt(&walk, req, false);
  198. while ((nbytes = walk.nbytes) != 0) {
  199. /* only use complete blocks */
  200. n = nbytes & ~(AES_BLOCK_SIZE - 1);
  201. cpacf_km(sctx->fc | modifier, sctx->key,
  202. walk.dst.virt.addr, walk.src.virt.addr, n);
  203. ret = skcipher_walk_done(&walk, nbytes - n);
  204. }
  205. return ret;
  206. }
  207. static int ecb_aes_encrypt(struct skcipher_request *req)
  208. {
  209. return ecb_aes_crypt(req, 0);
  210. }
  211. static int ecb_aes_decrypt(struct skcipher_request *req)
  212. {
  213. return ecb_aes_crypt(req, CPACF_DECRYPT);
  214. }
  215. static int fallback_init_skcipher(struct crypto_skcipher *tfm)
  216. {
  217. const char *name = crypto_tfm_alg_name(&tfm->base);
  218. struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
  219. sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
  220. CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
  221. if (IS_ERR(sctx->fallback.skcipher)) {
  222. pr_err("Allocating AES fallback algorithm %s failed\n",
  223. name);
  224. return PTR_ERR(sctx->fallback.skcipher);
  225. }
  226. crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
  227. crypto_skcipher_reqsize(sctx->fallback.skcipher));
  228. return 0;
  229. }
  230. static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
  231. {
  232. struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
  233. crypto_free_skcipher(sctx->fallback.skcipher);
  234. }
  235. static struct skcipher_alg ecb_aes_alg = {
  236. .base.cra_name = "ecb(aes)",
  237. .base.cra_driver_name = "ecb-aes-s390",
  238. .base.cra_priority = 401, /* combo: aes + ecb + 1 */
  239. .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
  240. .base.cra_blocksize = AES_BLOCK_SIZE,
  241. .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
  242. .base.cra_module = THIS_MODULE,
  243. .init = fallback_init_skcipher,
  244. .exit = fallback_exit_skcipher,
  245. .min_keysize = AES_MIN_KEY_SIZE,
  246. .max_keysize = AES_MAX_KEY_SIZE,
  247. .setkey = ecb_aes_set_key,
  248. .encrypt = ecb_aes_encrypt,
  249. .decrypt = ecb_aes_decrypt,
  250. };
  251. static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
  252. unsigned int key_len)
  253. {
  254. struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
  255. unsigned long fc;
  256. /* Pick the correct function code based on the key length */
  257. fc = (key_len == 16) ? CPACF_KMC_AES_128 :
  258. (key_len == 24) ? CPACF_KMC_AES_192 :
  259. (key_len == 32) ? CPACF_KMC_AES_256 : 0;
  260. /* Check if the function code is available */
  261. sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
  262. if (!sctx->fc)
  263. return setkey_fallback_skcipher(tfm, in_key, key_len);
  264. sctx->key_len = key_len;
  265. memcpy(sctx->key, in_key, key_len);
  266. return 0;
  267. }
  268. static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
  269. {
  270. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  271. struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
  272. struct skcipher_walk walk;
  273. unsigned int nbytes, n;
  274. int ret;
  275. struct {
  276. u8 iv[AES_BLOCK_SIZE];
  277. u8 key[AES_MAX_KEY_SIZE];
  278. } param;
  279. if (unlikely(!sctx->fc))
  280. return fallback_skcipher_crypt(sctx, req, modifier);
  281. ret = skcipher_walk_virt(&walk, req, false);
  282. if (ret)
  283. return ret;
  284. memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
  285. memcpy(param.key, sctx->key, sctx->key_len);
  286. while ((nbytes = walk.nbytes) != 0) {
  287. /* only use complete blocks */
  288. n = nbytes & ~(AES_BLOCK_SIZE - 1);
  289. cpacf_kmc(sctx->fc | modifier, &param,
  290. walk.dst.virt.addr, walk.src.virt.addr, n);
  291. memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
  292. ret = skcipher_walk_done(&walk, nbytes - n);
  293. }
  294. memzero_explicit(&param, sizeof(param));
  295. return ret;
  296. }
  297. static int cbc_aes_encrypt(struct skcipher_request *req)
  298. {
  299. return cbc_aes_crypt(req, 0);
  300. }
  301. static int cbc_aes_decrypt(struct skcipher_request *req)
  302. {
  303. return cbc_aes_crypt(req, CPACF_DECRYPT);
  304. }
  305. static struct skcipher_alg cbc_aes_alg = {
  306. .base.cra_name = "cbc(aes)",
  307. .base.cra_driver_name = "cbc-aes-s390",
  308. .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
  309. .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
  310. .base.cra_blocksize = AES_BLOCK_SIZE,
  311. .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
  312. .base.cra_module = THIS_MODULE,
  313. .init = fallback_init_skcipher,
  314. .exit = fallback_exit_skcipher,
  315. .min_keysize = AES_MIN_KEY_SIZE,
  316. .max_keysize = AES_MAX_KEY_SIZE,
  317. .ivsize = AES_BLOCK_SIZE,
  318. .setkey = cbc_aes_set_key,
  319. .encrypt = cbc_aes_encrypt,
  320. .decrypt = cbc_aes_decrypt,
  321. };
  322. static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
  323. unsigned int len)
  324. {
  325. struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
  326. crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
  327. crypto_skcipher_set_flags(xts_ctx->fallback,
  328. crypto_skcipher_get_flags(tfm) &
  329. CRYPTO_TFM_REQ_MASK);
  330. return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
  331. }
  332. static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
  333. unsigned int key_len)
  334. {
  335. struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
  336. unsigned long fc;
  337. int err;
  338. err = xts_fallback_setkey(tfm, in_key, key_len);
  339. if (err)
  340. return err;
  341. /* In fips mode only 128 bit or 256 bit keys are valid */
  342. if (fips_enabled && key_len != 32 && key_len != 64)
  343. return -EINVAL;
  344. /* Pick the correct function code based on the key length */
  345. fc = (key_len == 32) ? CPACF_KM_XTS_128 :
  346. (key_len == 64) ? CPACF_KM_XTS_256 : 0;
  347. /* Check if the function code is available */
  348. xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  349. if (!xts_ctx->fc)
  350. return 0;
  351. /* Split the XTS key into the two subkeys */
  352. key_len = key_len / 2;
  353. xts_ctx->key_len = key_len;
  354. memcpy(xts_ctx->key, in_key, key_len);
  355. memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
  356. return 0;
  357. }
  358. static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
  359. {
  360. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  361. struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
  362. struct skcipher_walk walk;
  363. unsigned int offset, nbytes, n;
  364. int ret;
  365. struct {
  366. u8 key[32];
  367. u8 tweak[16];
  368. u8 block[16];
  369. u8 bit[16];
  370. u8 xts[16];
  371. } pcc_param;
  372. struct {
  373. u8 key[32];
  374. u8 init[16];
  375. } xts_param;
  376. if (req->cryptlen < AES_BLOCK_SIZE)
  377. return -EINVAL;
  378. if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
  379. struct skcipher_request *subreq = skcipher_request_ctx(req);
  380. *subreq = *req;
  381. skcipher_request_set_tfm(subreq, xts_ctx->fallback);
  382. return (modifier & CPACF_DECRYPT) ?
  383. crypto_skcipher_decrypt(subreq) :
  384. crypto_skcipher_encrypt(subreq);
  385. }
  386. ret = skcipher_walk_virt(&walk, req, false);
  387. if (ret)
  388. return ret;
  389. offset = xts_ctx->key_len & 0x10;
  390. memset(pcc_param.block, 0, sizeof(pcc_param.block));
  391. memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
  392. memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
  393. memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
  394. memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
  395. cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
  396. memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
  397. memcpy(xts_param.init, pcc_param.xts, 16);
  398. while ((nbytes = walk.nbytes) != 0) {
  399. /* only use complete blocks */
  400. n = nbytes & ~(AES_BLOCK_SIZE - 1);
  401. cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
  402. walk.dst.virt.addr, walk.src.virt.addr, n);
  403. ret = skcipher_walk_done(&walk, nbytes - n);
  404. }
  405. memzero_explicit(&pcc_param, sizeof(pcc_param));
  406. memzero_explicit(&xts_param, sizeof(xts_param));
  407. return ret;
  408. }
  409. static int xts_aes_encrypt(struct skcipher_request *req)
  410. {
  411. return xts_aes_crypt(req, 0);
  412. }
  413. static int xts_aes_decrypt(struct skcipher_request *req)
  414. {
  415. return xts_aes_crypt(req, CPACF_DECRYPT);
  416. }
  417. static int xts_fallback_init(struct crypto_skcipher *tfm)
  418. {
  419. const char *name = crypto_tfm_alg_name(&tfm->base);
  420. struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
  421. xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
  422. CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
  423. if (IS_ERR(xts_ctx->fallback)) {
  424. pr_err("Allocating XTS fallback algorithm %s failed\n",
  425. name);
  426. return PTR_ERR(xts_ctx->fallback);
  427. }
  428. crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
  429. crypto_skcipher_reqsize(xts_ctx->fallback));
  430. return 0;
  431. }
  432. static void xts_fallback_exit(struct crypto_skcipher *tfm)
  433. {
  434. struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
  435. crypto_free_skcipher(xts_ctx->fallback);
  436. }
  437. static struct skcipher_alg xts_aes_alg = {
  438. .base.cra_name = "xts(aes)",
  439. .base.cra_driver_name = "xts-aes-s390",
  440. .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
  441. .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
  442. .base.cra_blocksize = AES_BLOCK_SIZE,
  443. .base.cra_ctxsize = sizeof(struct s390_xts_ctx),
  444. .base.cra_module = THIS_MODULE,
  445. .init = xts_fallback_init,
  446. .exit = xts_fallback_exit,
  447. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  448. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  449. .ivsize = AES_BLOCK_SIZE,
  450. .setkey = xts_aes_set_key,
  451. .encrypt = xts_aes_encrypt,
  452. .decrypt = xts_aes_decrypt,
  453. };
  454. static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
  455. unsigned int key_len)
  456. {
  457. struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
  458. unsigned long fc;
  459. /* Pick the correct function code based on the key length */
  460. fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
  461. (key_len == 24) ? CPACF_KMCTR_AES_192 :
  462. (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
  463. /* Check if the function code is available */
  464. sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
  465. if (!sctx->fc)
  466. return setkey_fallback_skcipher(tfm, in_key, key_len);
  467. sctx->key_len = key_len;
  468. memcpy(sctx->key, in_key, key_len);
  469. return 0;
  470. }
  471. static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
  472. {
  473. unsigned int i, n;
  474. /* only use complete blocks, max. PAGE_SIZE */
  475. memcpy(ctrptr, iv, AES_BLOCK_SIZE);
  476. n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
  477. for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
  478. memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
  479. crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
  480. ctrptr += AES_BLOCK_SIZE;
  481. }
  482. return n;
  483. }
  484. static int ctr_aes_crypt(struct skcipher_request *req)
  485. {
  486. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  487. struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
  488. u8 buf[AES_BLOCK_SIZE], *ctrptr;
  489. struct skcipher_walk walk;
  490. unsigned int n, nbytes;
  491. int ret, locked;
  492. if (unlikely(!sctx->fc))
  493. return fallback_skcipher_crypt(sctx, req, 0);
  494. locked = mutex_trylock(&ctrblk_lock);
  495. ret = skcipher_walk_virt(&walk, req, false);
  496. while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
  497. n = AES_BLOCK_SIZE;
  498. if (nbytes >= 2*AES_BLOCK_SIZE && locked)
  499. n = __ctrblk_init(ctrblk, walk.iv, nbytes);
  500. ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
  501. cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
  502. walk.src.virt.addr, n, ctrptr);
  503. if (ctrptr == ctrblk)
  504. memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
  505. AES_BLOCK_SIZE);
  506. crypto_inc(walk.iv, AES_BLOCK_SIZE);
  507. ret = skcipher_walk_done(&walk, nbytes - n);
  508. }
  509. if (locked)
  510. mutex_unlock(&ctrblk_lock);
  511. /*
  512. * final block may be < AES_BLOCK_SIZE, copy only nbytes
  513. */
  514. if (nbytes) {
  515. cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
  516. AES_BLOCK_SIZE, walk.iv);
  517. memcpy(walk.dst.virt.addr, buf, nbytes);
  518. crypto_inc(walk.iv, AES_BLOCK_SIZE);
  519. ret = skcipher_walk_done(&walk, 0);
  520. }
  521. return ret;
  522. }
  523. static struct skcipher_alg ctr_aes_alg = {
  524. .base.cra_name = "ctr(aes)",
  525. .base.cra_driver_name = "ctr-aes-s390",
  526. .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
  527. .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
  528. .base.cra_blocksize = 1,
  529. .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
  530. .base.cra_module = THIS_MODULE,
  531. .init = fallback_init_skcipher,
  532. .exit = fallback_exit_skcipher,
  533. .min_keysize = AES_MIN_KEY_SIZE,
  534. .max_keysize = AES_MAX_KEY_SIZE,
  535. .ivsize = AES_BLOCK_SIZE,
  536. .setkey = ctr_aes_set_key,
  537. .encrypt = ctr_aes_crypt,
  538. .decrypt = ctr_aes_crypt,
  539. .chunksize = AES_BLOCK_SIZE,
  540. };
  541. static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
  542. unsigned int keylen)
  543. {
  544. struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
  545. switch (keylen) {
  546. case AES_KEYSIZE_128:
  547. ctx->fc = CPACF_KMA_GCM_AES_128;
  548. break;
  549. case AES_KEYSIZE_192:
  550. ctx->fc = CPACF_KMA_GCM_AES_192;
  551. break;
  552. case AES_KEYSIZE_256:
  553. ctx->fc = CPACF_KMA_GCM_AES_256;
  554. break;
  555. default:
  556. return -EINVAL;
  557. }
  558. memcpy(ctx->key, key, keylen);
  559. ctx->key_len = keylen;
  560. return 0;
  561. }
  562. static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  563. {
  564. switch (authsize) {
  565. case 4:
  566. case 8:
  567. case 12:
  568. case 13:
  569. case 14:
  570. case 15:
  571. case 16:
  572. break;
  573. default:
  574. return -EINVAL;
  575. }
  576. return 0;
  577. }
  578. static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
  579. unsigned int len)
  580. {
  581. memset(gw, 0, sizeof(*gw));
  582. gw->walk_bytes_remain = len;
  583. scatterwalk_start(&gw->walk, sg);
  584. }
  585. static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
  586. {
  587. struct scatterlist *nextsg;
  588. gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
  589. while (!gw->walk_bytes) {
  590. nextsg = sg_next(gw->walk.sg);
  591. if (!nextsg)
  592. return 0;
  593. scatterwalk_start(&gw->walk, nextsg);
  594. gw->walk_bytes = scatterwalk_clamp(&gw->walk,
  595. gw->walk_bytes_remain);
  596. }
  597. gw->walk_ptr = scatterwalk_map(&gw->walk);
  598. return gw->walk_bytes;
  599. }
  600. static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
  601. unsigned int nbytes)
  602. {
  603. gw->walk_bytes_remain -= nbytes;
  604. scatterwalk_unmap(gw->walk_ptr);
  605. scatterwalk_advance(&gw->walk, nbytes);
  606. scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
  607. gw->walk_ptr = NULL;
  608. }
  609. static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
  610. {
  611. int n;
  612. if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
  613. gw->ptr = gw->buf;
  614. gw->nbytes = gw->buf_bytes;
  615. goto out;
  616. }
  617. if (gw->walk_bytes_remain == 0) {
  618. gw->ptr = NULL;
  619. gw->nbytes = 0;
  620. goto out;
  621. }
  622. if (!_gcm_sg_clamp_and_map(gw)) {
  623. gw->ptr = NULL;
  624. gw->nbytes = 0;
  625. goto out;
  626. }
  627. if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
  628. gw->ptr = gw->walk_ptr;
  629. gw->nbytes = gw->walk_bytes;
  630. goto out;
  631. }
  632. while (1) {
  633. n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
  634. memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
  635. gw->buf_bytes += n;
  636. _gcm_sg_unmap_and_advance(gw, n);
  637. if (gw->buf_bytes >= minbytesneeded) {
  638. gw->ptr = gw->buf;
  639. gw->nbytes = gw->buf_bytes;
  640. goto out;
  641. }
  642. if (!_gcm_sg_clamp_and_map(gw)) {
  643. gw->ptr = NULL;
  644. gw->nbytes = 0;
  645. goto out;
  646. }
  647. }
  648. out:
  649. return gw->nbytes;
  650. }
  651. static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
  652. {
  653. if (gw->walk_bytes_remain == 0) {
  654. gw->ptr = NULL;
  655. gw->nbytes = 0;
  656. goto out;
  657. }
  658. if (!_gcm_sg_clamp_and_map(gw)) {
  659. gw->ptr = NULL;
  660. gw->nbytes = 0;
  661. goto out;
  662. }
  663. if (gw->walk_bytes >= minbytesneeded) {
  664. gw->ptr = gw->walk_ptr;
  665. gw->nbytes = gw->walk_bytes;
  666. goto out;
  667. }
  668. scatterwalk_unmap(gw->walk_ptr);
  669. gw->walk_ptr = NULL;
  670. gw->ptr = gw->buf;
  671. gw->nbytes = sizeof(gw->buf);
  672. out:
  673. return gw->nbytes;
  674. }
  675. static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
  676. {
  677. if (gw->ptr == NULL)
  678. return 0;
  679. if (gw->ptr == gw->buf) {
  680. int n = gw->buf_bytes - bytesdone;
  681. if (n > 0) {
  682. memmove(gw->buf, gw->buf + bytesdone, n);
  683. gw->buf_bytes = n;
  684. } else
  685. gw->buf_bytes = 0;
  686. } else
  687. _gcm_sg_unmap_and_advance(gw, bytesdone);
  688. return bytesdone;
  689. }
  690. static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
  691. {
  692. int i, n;
  693. if (gw->ptr == NULL)
  694. return 0;
  695. if (gw->ptr == gw->buf) {
  696. for (i = 0; i < bytesdone; i += n) {
  697. if (!_gcm_sg_clamp_and_map(gw))
  698. return i;
  699. n = min(gw->walk_bytes, bytesdone - i);
  700. memcpy(gw->walk_ptr, gw->buf + i, n);
  701. _gcm_sg_unmap_and_advance(gw, n);
  702. }
  703. } else
  704. _gcm_sg_unmap_and_advance(gw, bytesdone);
  705. return bytesdone;
  706. }
  707. static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
  708. {
  709. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  710. struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
  711. unsigned int ivsize = crypto_aead_ivsize(tfm);
  712. unsigned int taglen = crypto_aead_authsize(tfm);
  713. unsigned int aadlen = req->assoclen;
  714. unsigned int pclen = req->cryptlen;
  715. int ret = 0;
  716. unsigned int n, len, in_bytes, out_bytes,
  717. min_bytes, bytes, aad_bytes, pc_bytes;
  718. struct gcm_sg_walk gw_in, gw_out;
  719. u8 tag[GHASH_DIGEST_SIZE];
  720. struct {
  721. u32 _[3]; /* reserved */
  722. u32 cv; /* Counter Value */
  723. u8 t[GHASH_DIGEST_SIZE];/* Tag */
  724. u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
  725. u64 taadl; /* Total AAD Length */
  726. u64 tpcl; /* Total Plain-/Cipher-text Length */
  727. u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
  728. u8 k[AES_MAX_KEY_SIZE]; /* Key */
  729. } param;
  730. /*
  731. * encrypt
  732. * req->src: aad||plaintext
  733. * req->dst: aad||ciphertext||tag
  734. * decrypt
  735. * req->src: aad||ciphertext||tag
  736. * req->dst: aad||plaintext, return 0 or -EBADMSG
  737. * aad, plaintext and ciphertext may be empty.
  738. */
  739. if (flags & CPACF_DECRYPT)
  740. pclen -= taglen;
  741. len = aadlen + pclen;
  742. memset(&param, 0, sizeof(param));
  743. param.cv = 1;
  744. param.taadl = aadlen * 8;
  745. param.tpcl = pclen * 8;
  746. memcpy(param.j0, req->iv, ivsize);
  747. *(u32 *)(param.j0 + ivsize) = 1;
  748. memcpy(param.k, ctx->key, ctx->key_len);
  749. gcm_walk_start(&gw_in, req->src, len);
  750. gcm_walk_start(&gw_out, req->dst, len);
  751. do {
  752. min_bytes = min_t(unsigned int,
  753. aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
  754. in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
  755. out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
  756. bytes = min(in_bytes, out_bytes);
  757. if (aadlen + pclen <= bytes) {
  758. aad_bytes = aadlen;
  759. pc_bytes = pclen;
  760. flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
  761. } else {
  762. if (aadlen <= bytes) {
  763. aad_bytes = aadlen;
  764. pc_bytes = (bytes - aadlen) &
  765. ~(AES_BLOCK_SIZE - 1);
  766. flags |= CPACF_KMA_LAAD;
  767. } else {
  768. aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
  769. pc_bytes = 0;
  770. }
  771. }
  772. if (aad_bytes > 0)
  773. memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
  774. cpacf_kma(ctx->fc | flags, &param,
  775. gw_out.ptr + aad_bytes,
  776. gw_in.ptr + aad_bytes, pc_bytes,
  777. gw_in.ptr, aad_bytes);
  778. n = aad_bytes + pc_bytes;
  779. if (gcm_in_walk_done(&gw_in, n) != n)
  780. return -ENOMEM;
  781. if (gcm_out_walk_done(&gw_out, n) != n)
  782. return -ENOMEM;
  783. aadlen -= aad_bytes;
  784. pclen -= pc_bytes;
  785. } while (aadlen + pclen > 0);
  786. if (flags & CPACF_DECRYPT) {
  787. scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
  788. if (crypto_memneq(tag, param.t, taglen))
  789. ret = -EBADMSG;
  790. } else
  791. scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
  792. memzero_explicit(&param, sizeof(param));
  793. return ret;
  794. }
  795. static int gcm_aes_encrypt(struct aead_request *req)
  796. {
  797. return gcm_aes_crypt(req, CPACF_ENCRYPT);
  798. }
  799. static int gcm_aes_decrypt(struct aead_request *req)
  800. {
  801. return gcm_aes_crypt(req, CPACF_DECRYPT);
  802. }
  803. static struct aead_alg gcm_aes_aead = {
  804. .setkey = gcm_aes_setkey,
  805. .setauthsize = gcm_aes_setauthsize,
  806. .encrypt = gcm_aes_encrypt,
  807. .decrypt = gcm_aes_decrypt,
  808. .ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
  809. .maxauthsize = GHASH_DIGEST_SIZE,
  810. .chunksize = AES_BLOCK_SIZE,
  811. .base = {
  812. .cra_blocksize = 1,
  813. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  814. .cra_priority = 900,
  815. .cra_name = "gcm(aes)",
  816. .cra_driver_name = "gcm-aes-s390",
  817. .cra_module = THIS_MODULE,
  818. },
  819. };
  820. static struct crypto_alg *aes_s390_alg;
  821. static struct skcipher_alg *aes_s390_skcipher_algs[4];
  822. static int aes_s390_skciphers_num;
  823. static struct aead_alg *aes_s390_aead_alg;
  824. static int aes_s390_register_skcipher(struct skcipher_alg *alg)
  825. {
  826. int ret;
  827. ret = crypto_register_skcipher(alg);
  828. if (!ret)
  829. aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
  830. return ret;
  831. }
  832. static void aes_s390_fini(void)
  833. {
  834. if (aes_s390_alg)
  835. crypto_unregister_alg(aes_s390_alg);
  836. while (aes_s390_skciphers_num--)
  837. crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
  838. if (ctrblk)
  839. free_page((unsigned long) ctrblk);
  840. if (aes_s390_aead_alg)
  841. crypto_unregister_aead(aes_s390_aead_alg);
  842. }
  843. static int __init aes_s390_init(void)
  844. {
  845. int ret;
  846. /* Query available functions for KM, KMC, KMCTR and KMA */
  847. cpacf_query(CPACF_KM, &km_functions);
  848. cpacf_query(CPACF_KMC, &kmc_functions);
  849. cpacf_query(CPACF_KMCTR, &kmctr_functions);
  850. cpacf_query(CPACF_KMA, &kma_functions);
  851. if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
  852. cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
  853. cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
  854. ret = crypto_register_alg(&aes_alg);
  855. if (ret)
  856. goto out_err;
  857. aes_s390_alg = &aes_alg;
  858. ret = aes_s390_register_skcipher(&ecb_aes_alg);
  859. if (ret)
  860. goto out_err;
  861. }
  862. if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
  863. cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
  864. cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
  865. ret = aes_s390_register_skcipher(&cbc_aes_alg);
  866. if (ret)
  867. goto out_err;
  868. }
  869. if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
  870. cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
  871. ret = aes_s390_register_skcipher(&xts_aes_alg);
  872. if (ret)
  873. goto out_err;
  874. }
  875. if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
  876. cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
  877. cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
  878. ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
  879. if (!ctrblk) {
  880. ret = -ENOMEM;
  881. goto out_err;
  882. }
  883. ret = aes_s390_register_skcipher(&ctr_aes_alg);
  884. if (ret)
  885. goto out_err;
  886. }
  887. if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
  888. cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
  889. cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
  890. ret = crypto_register_aead(&gcm_aes_aead);
  891. if (ret)
  892. goto out_err;
  893. aes_s390_aead_alg = &gcm_aes_aead;
  894. }
  895. return 0;
  896. out_err:
  897. aes_s390_fini();
  898. return ret;
  899. }
  900. module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init);
  901. module_exit(aes_s390_fini);
  902. MODULE_ALIAS_CRYPTO("aes-all");
  903. MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
  904. MODULE_LICENSE("GPL");
  905. MODULE_IMPORT_NS(CRYPTO_INTERNAL);