md5_glue.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Glue code for MD5 hashing optimized for sparc64 crypto opcodes.
  3. *
  4. * This is based largely upon arch/x86/crypto/sha1_ssse3_glue.c
  5. * and crypto/md5.c which are:
  6. *
  7. * Copyright (c) Alan Smithee.
  8. * Copyright (c) Andrew McDonald <[email protected]>
  9. * Copyright (c) Jean-Francois Dive <[email protected]>
  10. * Copyright (c) Mathias Krause <[email protected]>
  11. * Copyright (c) Cryptoapi developers.
  12. * Copyright (c) 2002 James Morris <[email protected]>
  13. */
  14. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15. #include <crypto/internal/hash.h>
  16. #include <linux/init.h>
  17. #include <linux/module.h>
  18. #include <linux/mm.h>
  19. #include <linux/types.h>
  20. #include <crypto/md5.h>
  21. #include <asm/pstate.h>
  22. #include <asm/elf.h>
  23. #include "opcodes.h"
  24. asmlinkage void md5_sparc64_transform(u32 *digest, const char *data,
  25. unsigned int rounds);
  26. static int md5_sparc64_init(struct shash_desc *desc)
  27. {
  28. struct md5_state *mctx = shash_desc_ctx(desc);
  29. mctx->hash[0] = MD5_H0;
  30. mctx->hash[1] = MD5_H1;
  31. mctx->hash[2] = MD5_H2;
  32. mctx->hash[3] = MD5_H3;
  33. le32_to_cpu_array(mctx->hash, 4);
  34. mctx->byte_count = 0;
  35. return 0;
  36. }
  37. static void __md5_sparc64_update(struct md5_state *sctx, const u8 *data,
  38. unsigned int len, unsigned int partial)
  39. {
  40. unsigned int done = 0;
  41. sctx->byte_count += len;
  42. if (partial) {
  43. done = MD5_HMAC_BLOCK_SIZE - partial;
  44. memcpy((u8 *)sctx->block + partial, data, done);
  45. md5_sparc64_transform(sctx->hash, (u8 *)sctx->block, 1);
  46. }
  47. if (len - done >= MD5_HMAC_BLOCK_SIZE) {
  48. const unsigned int rounds = (len - done) / MD5_HMAC_BLOCK_SIZE;
  49. md5_sparc64_transform(sctx->hash, data + done, rounds);
  50. done += rounds * MD5_HMAC_BLOCK_SIZE;
  51. }
  52. memcpy(sctx->block, data + done, len - done);
  53. }
  54. static int md5_sparc64_update(struct shash_desc *desc, const u8 *data,
  55. unsigned int len)
  56. {
  57. struct md5_state *sctx = shash_desc_ctx(desc);
  58. unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE;
  59. /* Handle the fast case right here */
  60. if (partial + len < MD5_HMAC_BLOCK_SIZE) {
  61. sctx->byte_count += len;
  62. memcpy((u8 *)sctx->block + partial, data, len);
  63. } else
  64. __md5_sparc64_update(sctx, data, len, partial);
  65. return 0;
  66. }
  67. /* Add padding and return the message digest. */
  68. static int md5_sparc64_final(struct shash_desc *desc, u8 *out)
  69. {
  70. struct md5_state *sctx = shash_desc_ctx(desc);
  71. unsigned int i, index, padlen;
  72. u32 *dst = (u32 *)out;
  73. __le64 bits;
  74. static const u8 padding[MD5_HMAC_BLOCK_SIZE] = { 0x80, };
  75. bits = cpu_to_le64(sctx->byte_count << 3);
  76. /* Pad out to 56 mod 64 and append length */
  77. index = sctx->byte_count % MD5_HMAC_BLOCK_SIZE;
  78. padlen = (index < 56) ? (56 - index) : ((MD5_HMAC_BLOCK_SIZE+56) - index);
  79. /* We need to fill a whole block for __md5_sparc64_update() */
  80. if (padlen <= 56) {
  81. sctx->byte_count += padlen;
  82. memcpy((u8 *)sctx->block + index, padding, padlen);
  83. } else {
  84. __md5_sparc64_update(sctx, padding, padlen, index);
  85. }
  86. __md5_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
  87. /* Store state in digest */
  88. for (i = 0; i < MD5_HASH_WORDS; i++)
  89. dst[i] = sctx->hash[i];
  90. /* Wipe context */
  91. memset(sctx, 0, sizeof(*sctx));
  92. return 0;
  93. }
  94. static int md5_sparc64_export(struct shash_desc *desc, void *out)
  95. {
  96. struct md5_state *sctx = shash_desc_ctx(desc);
  97. memcpy(out, sctx, sizeof(*sctx));
  98. return 0;
  99. }
  100. static int md5_sparc64_import(struct shash_desc *desc, const void *in)
  101. {
  102. struct md5_state *sctx = shash_desc_ctx(desc);
  103. memcpy(sctx, in, sizeof(*sctx));
  104. return 0;
  105. }
  106. static struct shash_alg alg = {
  107. .digestsize = MD5_DIGEST_SIZE,
  108. .init = md5_sparc64_init,
  109. .update = md5_sparc64_update,
  110. .final = md5_sparc64_final,
  111. .export = md5_sparc64_export,
  112. .import = md5_sparc64_import,
  113. .descsize = sizeof(struct md5_state),
  114. .statesize = sizeof(struct md5_state),
  115. .base = {
  116. .cra_name = "md5",
  117. .cra_driver_name= "md5-sparc64",
  118. .cra_priority = SPARC_CR_OPCODE_PRIORITY,
  119. .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
  120. .cra_module = THIS_MODULE,
  121. }
  122. };
  123. static bool __init sparc64_has_md5_opcode(void)
  124. {
  125. unsigned long cfr;
  126. if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
  127. return false;
  128. __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
  129. if (!(cfr & CFR_MD5))
  130. return false;
  131. return true;
  132. }
  133. static int __init md5_sparc64_mod_init(void)
  134. {
  135. if (sparc64_has_md5_opcode()) {
  136. pr_info("Using sparc64 md5 opcode optimized MD5 implementation\n");
  137. return crypto_register_shash(&alg);
  138. }
  139. pr_info("sparc64 md5 opcode not available.\n");
  140. return -ENODEV;
  141. }
  142. static void __exit md5_sparc64_mod_fini(void)
  143. {
  144. crypto_unregister_shash(&alg);
  145. }
  146. module_init(md5_sparc64_mod_init);
  147. module_exit(md5_sparc64_mod_fini);
  148. MODULE_LICENSE("GPL");
  149. MODULE_DESCRIPTION("MD5 Message Digest Algorithm, sparc64 md5 opcode accelerated");
  150. MODULE_ALIAS_CRYPTO("md5");
  151. #include "crop_devid.c"