rsa-pkcs1pad.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * RSA padding templates.
  4. *
  5. * Copyright (c) 2015 Intel Corporation
  6. */
  7. #include <crypto/algapi.h>
  8. #include <crypto/akcipher.h>
  9. #include <crypto/internal/akcipher.h>
  10. #include <crypto/internal/rsa.h>
  11. #include <linux/err.h>
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/random.h>
  16. #include <linux/scatterlist.h>
  17. /*
  18. * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
  19. */
  20. static const u8 rsa_digest_info_md5[] = {
  21. 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
  22. 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
  23. 0x05, 0x00, 0x04, 0x10
  24. };
  25. static const u8 rsa_digest_info_sha1[] = {
  26. 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
  27. 0x2b, 0x0e, 0x03, 0x02, 0x1a,
  28. 0x05, 0x00, 0x04, 0x14
  29. };
  30. static const u8 rsa_digest_info_rmd160[] = {
  31. 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
  32. 0x2b, 0x24, 0x03, 0x02, 0x01,
  33. 0x05, 0x00, 0x04, 0x14
  34. };
  35. static const u8 rsa_digest_info_sha224[] = {
  36. 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
  37. 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
  38. 0x05, 0x00, 0x04, 0x1c
  39. };
  40. static const u8 rsa_digest_info_sha256[] = {
  41. 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
  42. 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
  43. 0x05, 0x00, 0x04, 0x20
  44. };
  45. static const u8 rsa_digest_info_sha384[] = {
  46. 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
  47. 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
  48. 0x05, 0x00, 0x04, 0x30
  49. };
  50. static const u8 rsa_digest_info_sha512[] = {
  51. 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
  52. 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
  53. 0x05, 0x00, 0x04, 0x40
  54. };
  55. static const struct rsa_asn1_template {
  56. const char *name;
  57. const u8 *data;
  58. size_t size;
  59. } rsa_asn1_templates[] = {
  60. #define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) }
  61. _(md5),
  62. _(sha1),
  63. _(rmd160),
  64. _(sha256),
  65. _(sha384),
  66. _(sha512),
  67. _(sha224),
  68. { NULL }
  69. #undef _
  70. };
  71. static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
  72. {
  73. const struct rsa_asn1_template *p;
  74. for (p = rsa_asn1_templates; p->name; p++)
  75. if (strcmp(name, p->name) == 0)
  76. return p;
  77. return NULL;
  78. }
  79. struct pkcs1pad_ctx {
  80. struct crypto_akcipher *child;
  81. unsigned int key_size;
  82. };
  83. struct pkcs1pad_inst_ctx {
  84. struct crypto_akcipher_spawn spawn;
  85. const struct rsa_asn1_template *digest_info;
  86. };
  87. struct pkcs1pad_request {
  88. struct scatterlist in_sg[2], out_sg[1];
  89. uint8_t *in_buf, *out_buf;
  90. struct akcipher_request child_req;
  91. };
  92. static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
  93. unsigned int keylen)
  94. {
  95. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  96. int err;
  97. ctx->key_size = 0;
  98. err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
  99. if (err)
  100. return err;
  101. /* Find out new modulus size from rsa implementation */
  102. err = crypto_akcipher_maxsize(ctx->child);
  103. if (err > PAGE_SIZE)
  104. return -ENOTSUPP;
  105. ctx->key_size = err;
  106. return 0;
  107. }
  108. static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
  109. unsigned int keylen)
  110. {
  111. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  112. int err;
  113. ctx->key_size = 0;
  114. err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
  115. if (err)
  116. return err;
  117. /* Find out new modulus size from rsa implementation */
  118. err = crypto_akcipher_maxsize(ctx->child);
  119. if (err > PAGE_SIZE)
  120. return -ENOTSUPP;
  121. ctx->key_size = err;
  122. return 0;
  123. }
  124. static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
  125. {
  126. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  127. /*
  128. * The maximum destination buffer size for the encrypt/sign operations
  129. * will be the same as for RSA, even though it's smaller for
  130. * decrypt/verify.
  131. */
  132. return ctx->key_size;
  133. }
  134. static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
  135. struct scatterlist *next)
  136. {
  137. int nsegs = next ? 2 : 1;
  138. sg_init_table(sg, nsegs);
  139. sg_set_buf(sg, buf, len);
  140. if (next)
  141. sg_chain(sg, nsegs, next);
  142. }
  143. static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
  144. {
  145. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  146. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  147. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  148. unsigned int pad_len;
  149. unsigned int len;
  150. u8 *out_buf;
  151. if (err)
  152. goto out;
  153. len = req_ctx->child_req.dst_len;
  154. pad_len = ctx->key_size - len;
  155. /* Four billion to one */
  156. if (likely(!pad_len))
  157. goto out;
  158. out_buf = kzalloc(ctx->key_size, GFP_KERNEL);
  159. err = -ENOMEM;
  160. if (!out_buf)
  161. goto out;
  162. sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len),
  163. out_buf + pad_len, len);
  164. sg_copy_from_buffer(req->dst,
  165. sg_nents_for_len(req->dst, ctx->key_size),
  166. out_buf, ctx->key_size);
  167. kfree_sensitive(out_buf);
  168. out:
  169. req->dst_len = ctx->key_size;
  170. kfree(req_ctx->in_buf);
  171. return err;
  172. }
  173. static void pkcs1pad_encrypt_sign_complete_cb(
  174. struct crypto_async_request *child_async_req, int err)
  175. {
  176. struct akcipher_request *req = child_async_req->data;
  177. if (err == -EINPROGRESS)
  178. goto out;
  179. err = pkcs1pad_encrypt_sign_complete(req, err);
  180. out:
  181. akcipher_request_complete(req, err);
  182. }
  183. static int pkcs1pad_encrypt(struct akcipher_request *req)
  184. {
  185. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  186. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  187. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  188. int err;
  189. unsigned int i, ps_end;
  190. if (!ctx->key_size)
  191. return -EINVAL;
  192. if (req->src_len > ctx->key_size - 11)
  193. return -EOVERFLOW;
  194. if (req->dst_len < ctx->key_size) {
  195. req->dst_len = ctx->key_size;
  196. return -EOVERFLOW;
  197. }
  198. req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
  199. GFP_KERNEL);
  200. if (!req_ctx->in_buf)
  201. return -ENOMEM;
  202. ps_end = ctx->key_size - req->src_len - 2;
  203. req_ctx->in_buf[0] = 0x02;
  204. for (i = 1; i < ps_end; i++)
  205. req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
  206. req_ctx->in_buf[ps_end] = 0x00;
  207. pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
  208. ctx->key_size - 1 - req->src_len, req->src);
  209. akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
  210. akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
  211. pkcs1pad_encrypt_sign_complete_cb, req);
  212. /* Reuse output buffer */
  213. akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
  214. req->dst, ctx->key_size - 1, req->dst_len);
  215. err = crypto_akcipher_encrypt(&req_ctx->child_req);
  216. if (err != -EINPROGRESS && err != -EBUSY)
  217. return pkcs1pad_encrypt_sign_complete(req, err);
  218. return err;
  219. }
  220. static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
  221. {
  222. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  223. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  224. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  225. unsigned int dst_len;
  226. unsigned int pos;
  227. u8 *out_buf;
  228. if (err)
  229. goto done;
  230. err = -EINVAL;
  231. dst_len = req_ctx->child_req.dst_len;
  232. if (dst_len < ctx->key_size - 1)
  233. goto done;
  234. out_buf = req_ctx->out_buf;
  235. if (dst_len == ctx->key_size) {
  236. if (out_buf[0] != 0x00)
  237. /* Decrypted value had no leading 0 byte */
  238. goto done;
  239. dst_len--;
  240. out_buf++;
  241. }
  242. if (out_buf[0] != 0x02)
  243. goto done;
  244. for (pos = 1; pos < dst_len; pos++)
  245. if (out_buf[pos] == 0x00)
  246. break;
  247. if (pos < 9 || pos == dst_len)
  248. goto done;
  249. pos++;
  250. err = 0;
  251. if (req->dst_len < dst_len - pos)
  252. err = -EOVERFLOW;
  253. req->dst_len = dst_len - pos;
  254. if (!err)
  255. sg_copy_from_buffer(req->dst,
  256. sg_nents_for_len(req->dst, req->dst_len),
  257. out_buf + pos, req->dst_len);
  258. done:
  259. kfree_sensitive(req_ctx->out_buf);
  260. return err;
  261. }
  262. static void pkcs1pad_decrypt_complete_cb(
  263. struct crypto_async_request *child_async_req, int err)
  264. {
  265. struct akcipher_request *req = child_async_req->data;
  266. if (err == -EINPROGRESS)
  267. goto out;
  268. err = pkcs1pad_decrypt_complete(req, err);
  269. out:
  270. akcipher_request_complete(req, err);
  271. }
  272. static int pkcs1pad_decrypt(struct akcipher_request *req)
  273. {
  274. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  275. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  276. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  277. int err;
  278. if (!ctx->key_size || req->src_len != ctx->key_size)
  279. return -EINVAL;
  280. req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
  281. if (!req_ctx->out_buf)
  282. return -ENOMEM;
  283. pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
  284. ctx->key_size, NULL);
  285. akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
  286. akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
  287. pkcs1pad_decrypt_complete_cb, req);
  288. /* Reuse input buffer, output to a new buffer */
  289. akcipher_request_set_crypt(&req_ctx->child_req, req->src,
  290. req_ctx->out_sg, req->src_len,
  291. ctx->key_size);
  292. err = crypto_akcipher_decrypt(&req_ctx->child_req);
  293. if (err != -EINPROGRESS && err != -EBUSY)
  294. return pkcs1pad_decrypt_complete(req, err);
  295. return err;
  296. }
  297. static int pkcs1pad_sign(struct akcipher_request *req)
  298. {
  299. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  300. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  301. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  302. struct akcipher_instance *inst = akcipher_alg_instance(tfm);
  303. struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
  304. const struct rsa_asn1_template *digest_info = ictx->digest_info;
  305. int err;
  306. unsigned int ps_end, digest_info_size = 0;
  307. if (!ctx->key_size)
  308. return -EINVAL;
  309. if (digest_info)
  310. digest_info_size = digest_info->size;
  311. if (req->src_len + digest_info_size > ctx->key_size - 11)
  312. return -EOVERFLOW;
  313. if (req->dst_len < ctx->key_size) {
  314. req->dst_len = ctx->key_size;
  315. return -EOVERFLOW;
  316. }
  317. req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
  318. GFP_KERNEL);
  319. if (!req_ctx->in_buf)
  320. return -ENOMEM;
  321. ps_end = ctx->key_size - digest_info_size - req->src_len - 2;
  322. req_ctx->in_buf[0] = 0x01;
  323. memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
  324. req_ctx->in_buf[ps_end] = 0x00;
  325. if (digest_info)
  326. memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
  327. digest_info->size);
  328. pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
  329. ctx->key_size - 1 - req->src_len, req->src);
  330. akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
  331. akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
  332. pkcs1pad_encrypt_sign_complete_cb, req);
  333. /* Reuse output buffer */
  334. akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
  335. req->dst, ctx->key_size - 1, req->dst_len);
  336. err = crypto_akcipher_decrypt(&req_ctx->child_req);
  337. if (err != -EINPROGRESS && err != -EBUSY)
  338. return pkcs1pad_encrypt_sign_complete(req, err);
  339. return err;
  340. }
  341. static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
  342. {
  343. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  344. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  345. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  346. struct akcipher_instance *inst = akcipher_alg_instance(tfm);
  347. struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
  348. const struct rsa_asn1_template *digest_info = ictx->digest_info;
  349. const unsigned int sig_size = req->src_len;
  350. const unsigned int digest_size = req->dst_len;
  351. unsigned int dst_len;
  352. unsigned int pos;
  353. u8 *out_buf;
  354. if (err)
  355. goto done;
  356. err = -EINVAL;
  357. dst_len = req_ctx->child_req.dst_len;
  358. if (dst_len < ctx->key_size - 1)
  359. goto done;
  360. out_buf = req_ctx->out_buf;
  361. if (dst_len == ctx->key_size) {
  362. if (out_buf[0] != 0x00)
  363. /* Decrypted value had no leading 0 byte */
  364. goto done;
  365. dst_len--;
  366. out_buf++;
  367. }
  368. err = -EBADMSG;
  369. if (out_buf[0] != 0x01)
  370. goto done;
  371. for (pos = 1; pos < dst_len; pos++)
  372. if (out_buf[pos] != 0xff)
  373. break;
  374. if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
  375. goto done;
  376. pos++;
  377. if (digest_info) {
  378. if (digest_info->size > dst_len - pos)
  379. goto done;
  380. if (crypto_memneq(out_buf + pos, digest_info->data,
  381. digest_info->size))
  382. goto done;
  383. pos += digest_info->size;
  384. }
  385. err = 0;
  386. if (digest_size != dst_len - pos) {
  387. err = -EKEYREJECTED;
  388. req->dst_len = dst_len - pos;
  389. goto done;
  390. }
  391. /* Extract appended digest. */
  392. sg_pcopy_to_buffer(req->src,
  393. sg_nents_for_len(req->src, sig_size + digest_size),
  394. req_ctx->out_buf + ctx->key_size,
  395. digest_size, sig_size);
  396. /* Do the actual verification step. */
  397. if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
  398. digest_size) != 0)
  399. err = -EKEYREJECTED;
  400. done:
  401. kfree_sensitive(req_ctx->out_buf);
  402. return err;
  403. }
  404. static void pkcs1pad_verify_complete_cb(
  405. struct crypto_async_request *child_async_req, int err)
  406. {
  407. struct akcipher_request *req = child_async_req->data;
  408. if (err == -EINPROGRESS)
  409. goto out;
  410. err = pkcs1pad_verify_complete(req, err);
  411. out:
  412. akcipher_request_complete(req, err);
  413. }
  414. /*
  415. * The verify operation is here for completeness similar to the verification
  416. * defined in RFC2313 section 10.2 except that block type 0 is not accepted,
  417. * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to
  418. * retrieve the DigestInfo from a signature, instead the user is expected
  419. * to call the sign operation to generate the expected signature and compare
  420. * signatures instead of the message-digests.
  421. */
  422. static int pkcs1pad_verify(struct akcipher_request *req)
  423. {
  424. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  425. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  426. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  427. const unsigned int sig_size = req->src_len;
  428. const unsigned int digest_size = req->dst_len;
  429. int err;
  430. if (WARN_ON(req->dst) || WARN_ON(!digest_size) ||
  431. !ctx->key_size || sig_size != ctx->key_size)
  432. return -EINVAL;
  433. req_ctx->out_buf = kmalloc(ctx->key_size + digest_size, GFP_KERNEL);
  434. if (!req_ctx->out_buf)
  435. return -ENOMEM;
  436. pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
  437. ctx->key_size, NULL);
  438. akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
  439. akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
  440. pkcs1pad_verify_complete_cb, req);
  441. /* Reuse input buffer, output to a new buffer */
  442. akcipher_request_set_crypt(&req_ctx->child_req, req->src,
  443. req_ctx->out_sg, sig_size, ctx->key_size);
  444. err = crypto_akcipher_encrypt(&req_ctx->child_req);
  445. if (err != -EINPROGRESS && err != -EBUSY)
  446. return pkcs1pad_verify_complete(req, err);
  447. return err;
  448. }
  449. static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
  450. {
  451. struct akcipher_instance *inst = akcipher_alg_instance(tfm);
  452. struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
  453. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  454. struct crypto_akcipher *child_tfm;
  455. child_tfm = crypto_spawn_akcipher(&ictx->spawn);
  456. if (IS_ERR(child_tfm))
  457. return PTR_ERR(child_tfm);
  458. ctx->child = child_tfm;
  459. akcipher_set_reqsize(tfm, sizeof(struct pkcs1pad_request) +
  460. crypto_akcipher_reqsize(child_tfm));
  461. return 0;
  462. }
  463. static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
  464. {
  465. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  466. crypto_free_akcipher(ctx->child);
  467. }
  468. static void pkcs1pad_free(struct akcipher_instance *inst)
  469. {
  470. struct pkcs1pad_inst_ctx *ctx = akcipher_instance_ctx(inst);
  471. struct crypto_akcipher_spawn *spawn = &ctx->spawn;
  472. crypto_drop_akcipher(spawn);
  473. kfree(inst);
  474. }
  475. static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
  476. {
  477. u32 mask;
  478. struct akcipher_instance *inst;
  479. struct pkcs1pad_inst_ctx *ctx;
  480. struct akcipher_alg *rsa_alg;
  481. const char *hash_name;
  482. int err;
  483. err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask);
  484. if (err)
  485. return err;
  486. inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
  487. if (!inst)
  488. return -ENOMEM;
  489. ctx = akcipher_instance_ctx(inst);
  490. err = crypto_grab_akcipher(&ctx->spawn, akcipher_crypto_instance(inst),
  491. crypto_attr_alg_name(tb[1]), 0, mask);
  492. if (err)
  493. goto err_free_inst;
  494. rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn);
  495. if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) {
  496. err = -EINVAL;
  497. goto err_free_inst;
  498. }
  499. err = -ENAMETOOLONG;
  500. hash_name = crypto_attr_alg_name(tb[2]);
  501. if (IS_ERR(hash_name)) {
  502. if (snprintf(inst->alg.base.cra_name,
  503. CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
  504. rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
  505. goto err_free_inst;
  506. if (snprintf(inst->alg.base.cra_driver_name,
  507. CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
  508. rsa_alg->base.cra_driver_name) >=
  509. CRYPTO_MAX_ALG_NAME)
  510. goto err_free_inst;
  511. } else {
  512. ctx->digest_info = rsa_lookup_asn1(hash_name);
  513. if (!ctx->digest_info) {
  514. err = -EINVAL;
  515. goto err_free_inst;
  516. }
  517. if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
  518. "pkcs1pad(%s,%s)", rsa_alg->base.cra_name,
  519. hash_name) >= CRYPTO_MAX_ALG_NAME)
  520. goto err_free_inst;
  521. if (snprintf(inst->alg.base.cra_driver_name,
  522. CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
  523. rsa_alg->base.cra_driver_name,
  524. hash_name) >= CRYPTO_MAX_ALG_NAME)
  525. goto err_free_inst;
  526. }
  527. inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
  528. inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
  529. inst->alg.init = pkcs1pad_init_tfm;
  530. inst->alg.exit = pkcs1pad_exit_tfm;
  531. inst->alg.encrypt = pkcs1pad_encrypt;
  532. inst->alg.decrypt = pkcs1pad_decrypt;
  533. inst->alg.sign = pkcs1pad_sign;
  534. inst->alg.verify = pkcs1pad_verify;
  535. inst->alg.set_pub_key = pkcs1pad_set_pub_key;
  536. inst->alg.set_priv_key = pkcs1pad_set_priv_key;
  537. inst->alg.max_size = pkcs1pad_get_max_size;
  538. inst->free = pkcs1pad_free;
  539. err = akcipher_register_instance(tmpl, inst);
  540. if (err) {
  541. err_free_inst:
  542. pkcs1pad_free(inst);
  543. }
  544. return err;
  545. }
  546. struct crypto_template rsa_pkcs1pad_tmpl = {
  547. .name = "pkcs1pad",
  548. .create = pkcs1pad_create,
  549. .module = THIS_MODULE,
  550. };