copy_mc.c 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
  3. #include <linux/jump_label.h>
  4. #include <linux/uaccess.h>
  5. #include <linux/export.h>
  6. #include <linux/string.h>
  7. #include <linux/types.h>
  8. #include <asm/mce.h>
  9. #ifdef CONFIG_X86_MCE
  10. static DEFINE_STATIC_KEY_FALSE(copy_mc_fragile_key);
  11. void enable_copy_mc_fragile(void)
  12. {
  13. static_branch_inc(&copy_mc_fragile_key);
  14. }
  15. #define copy_mc_fragile_enabled (static_branch_unlikely(&copy_mc_fragile_key))
  16. /*
  17. * Similar to copy_user_handle_tail, probe for the write fault point, or
  18. * source exception point.
  19. */
  20. __visible notrace unsigned long
  21. copy_mc_fragile_handle_tail(char *to, char *from, unsigned len)
  22. {
  23. for (; len; --len, to++, from++)
  24. if (copy_mc_fragile(to, from, 1))
  25. break;
  26. return len;
  27. }
  28. #else
  29. /*
  30. * No point in doing careful copying, or consulting a static key when
  31. * there is no #MC handler in the CONFIG_X86_MCE=n case.
  32. */
  33. void enable_copy_mc_fragile(void)
  34. {
  35. }
  36. #define copy_mc_fragile_enabled (0)
  37. #endif
  38. unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned len);
  39. /**
  40. * copy_mc_to_kernel - memory copy that handles source exceptions
  41. *
  42. * @dst: destination address
  43. * @src: source address
  44. * @len: number of bytes to copy
  45. *
  46. * Call into the 'fragile' version on systems that benefit from avoiding
  47. * corner case poison consumption scenarios, For example, accessing
  48. * poison across 2 cachelines with a single instruction. Almost all
  49. * other uses case can use copy_mc_enhanced_fast_string() for a fast
  50. * recoverable copy, or fallback to plain memcpy.
  51. *
  52. * Return 0 for success, or number of bytes not copied if there was an
  53. * exception.
  54. */
  55. unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
  56. {
  57. if (copy_mc_fragile_enabled)
  58. return copy_mc_fragile(dst, src, len);
  59. if (static_cpu_has(X86_FEATURE_ERMS))
  60. return copy_mc_enhanced_fast_string(dst, src, len);
  61. memcpy(dst, src, len);
  62. return 0;
  63. }
  64. EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
  65. unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
  66. {
  67. unsigned long ret;
  68. if (copy_mc_fragile_enabled) {
  69. __uaccess_begin();
  70. ret = copy_mc_fragile((__force void *)dst, src, len);
  71. __uaccess_end();
  72. return ret;
  73. }
  74. if (static_cpu_has(X86_FEATURE_ERMS)) {
  75. __uaccess_begin();
  76. ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
  77. __uaccess_end();
  78. return ret;
  79. }
  80. return copy_user_generic((__force void *)dst, src, len);
  81. }