nested.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Hyper-V nested virtualization code.
  4. *
  5. * Copyright (C) 2018, Microsoft, Inc.
  6. *
  7. * Author : Lan Tianyu <[email protected]>
  8. */
  9. #define pr_fmt(fmt) "Hyper-V: " fmt
  10. #include <linux/types.h>
  11. #include <asm/hyperv-tlfs.h>
  12. #include <asm/mshyperv.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/trace/hyperv.h>
  15. int hyperv_flush_guest_mapping(u64 as)
  16. {
  17. struct hv_guest_mapping_flush **flush_pcpu;
  18. struct hv_guest_mapping_flush *flush;
  19. u64 status;
  20. unsigned long flags;
  21. int ret = -ENOTSUPP;
  22. if (!hv_hypercall_pg)
  23. goto fault;
  24. local_irq_save(flags);
  25. flush_pcpu = (struct hv_guest_mapping_flush **)
  26. this_cpu_ptr(hyperv_pcpu_input_arg);
  27. flush = *flush_pcpu;
  28. if (unlikely(!flush)) {
  29. local_irq_restore(flags);
  30. goto fault;
  31. }
  32. flush->address_space = as;
  33. flush->flags = 0;
  34. status = hv_do_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE,
  35. flush, NULL);
  36. local_irq_restore(flags);
  37. if (hv_result_success(status))
  38. ret = 0;
  39. fault:
  40. trace_hyperv_nested_flush_guest_mapping(as, ret);
  41. return ret;
  42. }
  43. EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping);
  44. int hyperv_fill_flush_guest_mapping_list(
  45. struct hv_guest_mapping_flush_list *flush,
  46. u64 start_gfn, u64 pages)
  47. {
  48. u64 cur = start_gfn;
  49. u64 additional_pages;
  50. int gpa_n = 0;
  51. do {
  52. /*
  53. * If flush requests exceed max flush count, go back to
  54. * flush tlbs without range.
  55. */
  56. if (gpa_n >= HV_MAX_FLUSH_REP_COUNT)
  57. return -ENOSPC;
  58. additional_pages = min_t(u64, pages, HV_MAX_FLUSH_PAGES) - 1;
  59. flush->gpa_list[gpa_n].page.additional_pages = additional_pages;
  60. flush->gpa_list[gpa_n].page.largepage = false;
  61. flush->gpa_list[gpa_n].page.basepfn = cur;
  62. pages -= additional_pages + 1;
  63. cur += additional_pages + 1;
  64. gpa_n++;
  65. } while (pages > 0);
  66. return gpa_n;
  67. }
  68. EXPORT_SYMBOL_GPL(hyperv_fill_flush_guest_mapping_list);
  69. int hyperv_flush_guest_mapping_range(u64 as,
  70. hyperv_fill_flush_list_func fill_flush_list_func, void *data)
  71. {
  72. struct hv_guest_mapping_flush_list **flush_pcpu;
  73. struct hv_guest_mapping_flush_list *flush;
  74. u64 status;
  75. unsigned long flags;
  76. int ret = -ENOTSUPP;
  77. int gpa_n = 0;
  78. if (!hv_hypercall_pg || !fill_flush_list_func)
  79. goto fault;
  80. local_irq_save(flags);
  81. flush_pcpu = (struct hv_guest_mapping_flush_list **)
  82. this_cpu_ptr(hyperv_pcpu_input_arg);
  83. flush = *flush_pcpu;
  84. if (unlikely(!flush)) {
  85. local_irq_restore(flags);
  86. goto fault;
  87. }
  88. flush->address_space = as;
  89. flush->flags = 0;
  90. gpa_n = fill_flush_list_func(flush, data);
  91. if (gpa_n < 0) {
  92. local_irq_restore(flags);
  93. goto fault;
  94. }
  95. status = hv_do_rep_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST,
  96. gpa_n, 0, flush, NULL);
  97. local_irq_restore(flags);
  98. if (hv_result_success(status))
  99. ret = 0;
  100. else
  101. ret = hv_result(status);
  102. fault:
  103. trace_hyperv_nested_flush_guest_mapping_range(as, ret);
  104. return ret;
  105. }
  106. EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping_range);