sm3_avx_glue.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * SM3 Secure Hash Algorithm, AVX assembler accelerated.
  4. * specified in: https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
  5. *
  6. * Copyright (C) 2021 Tianjia Zhang <[email protected]>
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <crypto/internal/hash.h>
  10. #include <crypto/internal/simd.h>
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/types.h>
  14. #include <crypto/sm3.h>
  15. #include <crypto/sm3_base.h>
  16. #include <asm/simd.h>
  17. asmlinkage void sm3_transform_avx(struct sm3_state *state,
  18. const u8 *data, int nblocks);
  19. static int sm3_avx_update(struct shash_desc *desc, const u8 *data,
  20. unsigned int len)
  21. {
  22. struct sm3_state *sctx = shash_desc_ctx(desc);
  23. if (!crypto_simd_usable() ||
  24. (sctx->count % SM3_BLOCK_SIZE) + len < SM3_BLOCK_SIZE) {
  25. sm3_update(sctx, data, len);
  26. return 0;
  27. }
  28. /*
  29. * Make sure struct sm3_state begins directly with the SM3
  30. * 256-bit internal state, as this is what the asm functions expect.
  31. */
  32. BUILD_BUG_ON(offsetof(struct sm3_state, state) != 0);
  33. kernel_fpu_begin();
  34. sm3_base_do_update(desc, data, len, sm3_transform_avx);
  35. kernel_fpu_end();
  36. return 0;
  37. }
  38. static int sm3_avx_finup(struct shash_desc *desc, const u8 *data,
  39. unsigned int len, u8 *out)
  40. {
  41. if (!crypto_simd_usable()) {
  42. struct sm3_state *sctx = shash_desc_ctx(desc);
  43. if (len)
  44. sm3_update(sctx, data, len);
  45. sm3_final(sctx, out);
  46. return 0;
  47. }
  48. kernel_fpu_begin();
  49. if (len)
  50. sm3_base_do_update(desc, data, len, sm3_transform_avx);
  51. sm3_base_do_finalize(desc, sm3_transform_avx);
  52. kernel_fpu_end();
  53. return sm3_base_finish(desc, out);
  54. }
  55. static int sm3_avx_final(struct shash_desc *desc, u8 *out)
  56. {
  57. if (!crypto_simd_usable()) {
  58. sm3_final(shash_desc_ctx(desc), out);
  59. return 0;
  60. }
  61. kernel_fpu_begin();
  62. sm3_base_do_finalize(desc, sm3_transform_avx);
  63. kernel_fpu_end();
  64. return sm3_base_finish(desc, out);
  65. }
  66. static struct shash_alg sm3_avx_alg = {
  67. .digestsize = SM3_DIGEST_SIZE,
  68. .init = sm3_base_init,
  69. .update = sm3_avx_update,
  70. .final = sm3_avx_final,
  71. .finup = sm3_avx_finup,
  72. .descsize = sizeof(struct sm3_state),
  73. .base = {
  74. .cra_name = "sm3",
  75. .cra_driver_name = "sm3-avx",
  76. .cra_priority = 300,
  77. .cra_blocksize = SM3_BLOCK_SIZE,
  78. .cra_module = THIS_MODULE,
  79. }
  80. };
  81. static int __init sm3_avx_mod_init(void)
  82. {
  83. const char *feature_name;
  84. if (!boot_cpu_has(X86_FEATURE_AVX)) {
  85. pr_info("AVX instruction are not detected.\n");
  86. return -ENODEV;
  87. }
  88. if (!boot_cpu_has(X86_FEATURE_BMI2)) {
  89. pr_info("BMI2 instruction are not detected.\n");
  90. return -ENODEV;
  91. }
  92. if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
  93. &feature_name)) {
  94. pr_info("CPU feature '%s' is not supported.\n", feature_name);
  95. return -ENODEV;
  96. }
  97. return crypto_register_shash(&sm3_avx_alg);
  98. }
  99. static void __exit sm3_avx_mod_exit(void)
  100. {
  101. crypto_unregister_shash(&sm3_avx_alg);
  102. }
  103. module_init(sm3_avx_mod_init);
  104. module_exit(sm3_avx_mod_exit);
  105. MODULE_LICENSE("GPL v2");
  106. MODULE_AUTHOR("Tianjia Zhang <[email protected]>");
  107. MODULE_DESCRIPTION("SM3 Secure Hash Algorithm, AVX assembler accelerated");
  108. MODULE_ALIAS_CRYPTO("sm3");
  109. MODULE_ALIAS_CRYPTO("sm3-avx");