mtrr.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * vMTRR implementation
  4. *
  5. * Copyright (C) 2006 Qumranet, Inc.
  6. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  7. * Copyright(C) 2015 Intel Corporation.
  8. *
  9. * Authors:
  10. * Yaniv Kamay <[email protected]>
  11. * Avi Kivity <[email protected]>
  12. * Marcelo Tosatti <[email protected]>
  13. * Paolo Bonzini <[email protected]>
  14. * Xiao Guangrong <[email protected]>
  15. */
  16. #include <linux/kvm_host.h>
  17. #include <asm/mtrr.h>
  18. #include "cpuid.h"
  19. #include "mmu.h"
  20. #define IA32_MTRR_DEF_TYPE_E (1ULL << 11)
  21. #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10)
  22. #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff)
  23. static bool msr_mtrr_valid(unsigned msr)
  24. {
  25. switch (msr) {
  26. case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  27. case MSR_MTRRfix64K_00000:
  28. case MSR_MTRRfix16K_80000:
  29. case MSR_MTRRfix16K_A0000:
  30. case MSR_MTRRfix4K_C0000:
  31. case MSR_MTRRfix4K_C8000:
  32. case MSR_MTRRfix4K_D0000:
  33. case MSR_MTRRfix4K_D8000:
  34. case MSR_MTRRfix4K_E0000:
  35. case MSR_MTRRfix4K_E8000:
  36. case MSR_MTRRfix4K_F0000:
  37. case MSR_MTRRfix4K_F8000:
  38. case MSR_MTRRdefType:
  39. case MSR_IA32_CR_PAT:
  40. return true;
  41. }
  42. return false;
  43. }
  44. static bool valid_mtrr_type(unsigned t)
  45. {
  46. return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
  47. }
  48. bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  49. {
  50. int i;
  51. u64 mask;
  52. if (!msr_mtrr_valid(msr))
  53. return false;
  54. if (msr == MSR_IA32_CR_PAT) {
  55. return kvm_pat_valid(data);
  56. } else if (msr == MSR_MTRRdefType) {
  57. if (data & ~0xcff)
  58. return false;
  59. return valid_mtrr_type(data & 0xff);
  60. } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
  61. for (i = 0; i < 8 ; i++)
  62. if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
  63. return false;
  64. return true;
  65. }
  66. /* variable MTRRs */
  67. WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
  68. mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
  69. if ((msr & 1) == 0) {
  70. /* MTRR base */
  71. if (!valid_mtrr_type(data & 0xff))
  72. return false;
  73. mask |= 0xf00;
  74. } else
  75. /* MTRR mask */
  76. mask |= 0x7ff;
  77. return (data & mask) == 0;
  78. }
  79. EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
  80. static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
  81. {
  82. return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
  83. }
  84. static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
  85. {
  86. return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
  87. }
  88. static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
  89. {
  90. return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
  91. }
  92. static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
  93. {
  94. /*
  95. * Intel SDM 11.11.2.2: all MTRRs are disabled when
  96. * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
  97. * memory type is applied to all of physical memory.
  98. *
  99. * However, virtual machines can be run with CPUID such that
  100. * there are no MTRRs. In that case, the firmware will never
  101. * enable MTRRs and it is obviously undesirable to run the
  102. * guest entirely with UC memory and we use WB.
  103. */
  104. if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
  105. return MTRR_TYPE_UNCACHABLE;
  106. else
  107. return MTRR_TYPE_WRBACK;
  108. }
  109. /*
  110. * Three terms are used in the following code:
  111. * - segment, it indicates the address segments covered by fixed MTRRs.
  112. * - unit, it corresponds to the MSR entry in the segment.
  113. * - range, a range is covered in one memory cache type.
  114. */
  115. struct fixed_mtrr_segment {
  116. u64 start;
  117. u64 end;
  118. int range_shift;
  119. /* the start position in kvm_mtrr.fixed_ranges[]. */
  120. int range_start;
  121. };
  122. static struct fixed_mtrr_segment fixed_seg_table[] = {
  123. /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
  124. {
  125. .start = 0x0,
  126. .end = 0x80000,
  127. .range_shift = 16, /* 64K */
  128. .range_start = 0,
  129. },
  130. /*
  131. * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
  132. * 16K fixed mtrr.
  133. */
  134. {
  135. .start = 0x80000,
  136. .end = 0xc0000,
  137. .range_shift = 14, /* 16K */
  138. .range_start = 8,
  139. },
  140. /*
  141. * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
  142. * 4K fixed mtrr.
  143. */
  144. {
  145. .start = 0xc0000,
  146. .end = 0x100000,
  147. .range_shift = 12, /* 12K */
  148. .range_start = 24,
  149. }
  150. };
  151. /*
  152. * The size of unit is covered in one MSR, one MSR entry contains
  153. * 8 ranges so that unit size is always 8 * 2^range_shift.
  154. */
  155. static u64 fixed_mtrr_seg_unit_size(int seg)
  156. {
  157. return 8 << fixed_seg_table[seg].range_shift;
  158. }
  159. static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
  160. {
  161. switch (msr) {
  162. case MSR_MTRRfix64K_00000:
  163. *seg = 0;
  164. *unit = 0;
  165. break;
  166. case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
  167. *seg = 1;
  168. *unit = array_index_nospec(
  169. msr - MSR_MTRRfix16K_80000,
  170. MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
  171. break;
  172. case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
  173. *seg = 2;
  174. *unit = array_index_nospec(
  175. msr - MSR_MTRRfix4K_C0000,
  176. MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
  177. break;
  178. default:
  179. return false;
  180. }
  181. return true;
  182. }
  183. static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
  184. {
  185. struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
  186. u64 unit_size = fixed_mtrr_seg_unit_size(seg);
  187. *start = mtrr_seg->start + unit * unit_size;
  188. *end = *start + unit_size;
  189. WARN_ON(*end > mtrr_seg->end);
  190. }
  191. static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
  192. {
  193. struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
  194. WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
  195. > mtrr_seg->end);
  196. /* each unit has 8 ranges. */
  197. return mtrr_seg->range_start + 8 * unit;
  198. }
  199. static int fixed_mtrr_seg_end_range_index(int seg)
  200. {
  201. struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
  202. int n;
  203. n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
  204. return mtrr_seg->range_start + n - 1;
  205. }
  206. static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
  207. {
  208. int seg, unit;
  209. if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
  210. return false;
  211. fixed_mtrr_seg_unit_range(seg, unit, start, end);
  212. return true;
  213. }
  214. static int fixed_msr_to_range_index(u32 msr)
  215. {
  216. int seg, unit;
  217. if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
  218. return -1;
  219. return fixed_mtrr_seg_unit_range_index(seg, unit);
  220. }
  221. static int fixed_mtrr_addr_to_seg(u64 addr)
  222. {
  223. struct fixed_mtrr_segment *mtrr_seg;
  224. int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
  225. for (seg = 0; seg < seg_num; seg++) {
  226. mtrr_seg = &fixed_seg_table[seg];
  227. if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
  228. return seg;
  229. }
  230. return -1;
  231. }
  232. static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
  233. {
  234. struct fixed_mtrr_segment *mtrr_seg;
  235. int index;
  236. mtrr_seg = &fixed_seg_table[seg];
  237. index = mtrr_seg->range_start;
  238. index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
  239. return index;
  240. }
  241. static u64 fixed_mtrr_range_end_addr(int seg, int index)
  242. {
  243. struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
  244. int pos = index - mtrr_seg->range_start;
  245. return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
  246. }
  247. static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
  248. {
  249. u64 mask;
  250. *start = range->base & PAGE_MASK;
  251. mask = range->mask & PAGE_MASK;
  252. /* This cannot overflow because writing to the reserved bits of
  253. * variable MTRRs causes a #GP.
  254. */
  255. *end = (*start | ~mask) + 1;
  256. }
  257. static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
  258. {
  259. struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
  260. gfn_t start, end;
  261. int index;
  262. if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
  263. !kvm_arch_has_noncoherent_dma(vcpu->kvm))
  264. return;
  265. if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
  266. return;
  267. /* fixed MTRRs. */
  268. if (fixed_msr_to_range(msr, &start, &end)) {
  269. if (!fixed_mtrr_is_enabled(mtrr_state))
  270. return;
  271. } else if (msr == MSR_MTRRdefType) {
  272. start = 0x0;
  273. end = ~0ULL;
  274. } else {
  275. /* variable range MTRRs. */
  276. index = (msr - 0x200) / 2;
  277. var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
  278. }
  279. kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
  280. }
  281. static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
  282. {
  283. return (range->mask & (1 << 11)) != 0;
  284. }
  285. static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  286. {
  287. struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
  288. struct kvm_mtrr_range *tmp, *cur;
  289. int index, is_mtrr_mask;
  290. index = (msr - 0x200) / 2;
  291. is_mtrr_mask = msr - 0x200 - 2 * index;
  292. cur = &mtrr_state->var_ranges[index];
  293. /* remove the entry if it's in the list. */
  294. if (var_mtrr_range_is_valid(cur))
  295. list_del(&mtrr_state->var_ranges[index].node);
  296. /*
  297. * Set all illegal GPA bits in the mask, since those bits must
  298. * implicitly be 0. The bits are then cleared when reading them.
  299. */
  300. if (!is_mtrr_mask)
  301. cur->base = data;
  302. else
  303. cur->mask = data | kvm_vcpu_reserved_gpa_bits_raw(vcpu);
  304. /* add it to the list if it's enabled. */
  305. if (var_mtrr_range_is_valid(cur)) {
  306. list_for_each_entry(tmp, &mtrr_state->head, node)
  307. if (cur->base >= tmp->base)
  308. break;
  309. list_add_tail(&cur->node, &tmp->node);
  310. }
  311. }
  312. int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  313. {
  314. int index;
  315. if (!kvm_mtrr_valid(vcpu, msr, data))
  316. return 1;
  317. index = fixed_msr_to_range_index(msr);
  318. if (index >= 0)
  319. *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
  320. else if (msr == MSR_MTRRdefType)
  321. vcpu->arch.mtrr_state.deftype = data;
  322. else if (msr == MSR_IA32_CR_PAT)
  323. vcpu->arch.pat = data;
  324. else
  325. set_var_mtrr_msr(vcpu, msr, data);
  326. update_mtrr(vcpu, msr);
  327. return 0;
  328. }
  329. int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  330. {
  331. int index;
  332. /* MSR_MTRRcap is a readonly MSR. */
  333. if (msr == MSR_MTRRcap) {
  334. /*
  335. * SMRR = 0
  336. * WC = 1
  337. * FIX = 1
  338. * VCNT = KVM_NR_VAR_MTRR
  339. */
  340. *pdata = 0x500 | KVM_NR_VAR_MTRR;
  341. return 0;
  342. }
  343. if (!msr_mtrr_valid(msr))
  344. return 1;
  345. index = fixed_msr_to_range_index(msr);
  346. if (index >= 0)
  347. *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
  348. else if (msr == MSR_MTRRdefType)
  349. *pdata = vcpu->arch.mtrr_state.deftype;
  350. else if (msr == MSR_IA32_CR_PAT)
  351. *pdata = vcpu->arch.pat;
  352. else { /* Variable MTRRs */
  353. int is_mtrr_mask;
  354. index = (msr - 0x200) / 2;
  355. is_mtrr_mask = msr - 0x200 - 2 * index;
  356. if (!is_mtrr_mask)
  357. *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
  358. else
  359. *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
  360. *pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu);
  361. }
  362. return 0;
  363. }
  364. void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
  365. {
  366. INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
  367. }
  368. struct mtrr_iter {
  369. /* input fields. */
  370. struct kvm_mtrr *mtrr_state;
  371. u64 start;
  372. u64 end;
  373. /* output fields. */
  374. int mem_type;
  375. /* mtrr is completely disabled? */
  376. bool mtrr_disabled;
  377. /* [start, end) is not fully covered in MTRRs? */
  378. bool partial_map;
  379. /* private fields. */
  380. union {
  381. /* used for fixed MTRRs. */
  382. struct {
  383. int index;
  384. int seg;
  385. };
  386. /* used for var MTRRs. */
  387. struct {
  388. struct kvm_mtrr_range *range;
  389. /* max address has been covered in var MTRRs. */
  390. u64 start_max;
  391. };
  392. };
  393. bool fixed;
  394. };
  395. static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
  396. {
  397. int seg, index;
  398. if (!fixed_mtrr_is_enabled(iter->mtrr_state))
  399. return false;
  400. seg = fixed_mtrr_addr_to_seg(iter->start);
  401. if (seg < 0)
  402. return false;
  403. iter->fixed = true;
  404. index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
  405. iter->index = index;
  406. iter->seg = seg;
  407. return true;
  408. }
  409. static bool match_var_range(struct mtrr_iter *iter,
  410. struct kvm_mtrr_range *range)
  411. {
  412. u64 start, end;
  413. var_mtrr_range(range, &start, &end);
  414. if (!(start >= iter->end || end <= iter->start)) {
  415. iter->range = range;
  416. /*
  417. * the function is called when we do kvm_mtrr.head walking.
  418. * Range has the minimum base address which interleaves
  419. * [looker->start_max, looker->end).
  420. */
  421. iter->partial_map |= iter->start_max < start;
  422. /* update the max address has been covered. */
  423. iter->start_max = max(iter->start_max, end);
  424. return true;
  425. }
  426. return false;
  427. }
  428. static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
  429. {
  430. struct kvm_mtrr *mtrr_state = iter->mtrr_state;
  431. list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
  432. if (match_var_range(iter, iter->range))
  433. return;
  434. iter->range = NULL;
  435. iter->partial_map |= iter->start_max < iter->end;
  436. }
  437. static void mtrr_lookup_var_start(struct mtrr_iter *iter)
  438. {
  439. struct kvm_mtrr *mtrr_state = iter->mtrr_state;
  440. iter->fixed = false;
  441. iter->start_max = iter->start;
  442. iter->range = NULL;
  443. iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
  444. __mtrr_lookup_var_next(iter);
  445. }
  446. static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
  447. {
  448. /* terminate the lookup. */
  449. if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
  450. iter->fixed = false;
  451. iter->range = NULL;
  452. return;
  453. }
  454. iter->index++;
  455. /* have looked up for all fixed MTRRs. */
  456. if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
  457. return mtrr_lookup_var_start(iter);
  458. /* switch to next segment. */
  459. if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
  460. iter->seg++;
  461. }
  462. static void mtrr_lookup_var_next(struct mtrr_iter *iter)
  463. {
  464. __mtrr_lookup_var_next(iter);
  465. }
  466. static void mtrr_lookup_start(struct mtrr_iter *iter)
  467. {
  468. if (!mtrr_is_enabled(iter->mtrr_state)) {
  469. iter->mtrr_disabled = true;
  470. return;
  471. }
  472. if (!mtrr_lookup_fixed_start(iter))
  473. mtrr_lookup_var_start(iter);
  474. }
  475. static void mtrr_lookup_init(struct mtrr_iter *iter,
  476. struct kvm_mtrr *mtrr_state, u64 start, u64 end)
  477. {
  478. iter->mtrr_state = mtrr_state;
  479. iter->start = start;
  480. iter->end = end;
  481. iter->mtrr_disabled = false;
  482. iter->partial_map = false;
  483. iter->fixed = false;
  484. iter->range = NULL;
  485. mtrr_lookup_start(iter);
  486. }
  487. static bool mtrr_lookup_okay(struct mtrr_iter *iter)
  488. {
  489. if (iter->fixed) {
  490. iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
  491. return true;
  492. }
  493. if (iter->range) {
  494. iter->mem_type = iter->range->base & 0xff;
  495. return true;
  496. }
  497. return false;
  498. }
  499. static void mtrr_lookup_next(struct mtrr_iter *iter)
  500. {
  501. if (iter->fixed)
  502. mtrr_lookup_fixed_next(iter);
  503. else
  504. mtrr_lookup_var_next(iter);
  505. }
  506. #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
  507. for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
  508. mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
  509. u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
  510. {
  511. struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
  512. struct mtrr_iter iter;
  513. u64 start, end;
  514. int type = -1;
  515. const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
  516. | (1 << MTRR_TYPE_WRTHROUGH);
  517. start = gfn_to_gpa(gfn);
  518. end = start + PAGE_SIZE;
  519. mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
  520. int curr_type = iter.mem_type;
  521. /*
  522. * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
  523. * Precedences.
  524. */
  525. if (type == -1) {
  526. type = curr_type;
  527. continue;
  528. }
  529. /*
  530. * If two or more variable memory ranges match and the
  531. * memory types are identical, then that memory type is
  532. * used.
  533. */
  534. if (type == curr_type)
  535. continue;
  536. /*
  537. * If two or more variable memory ranges match and one of
  538. * the memory types is UC, the UC memory type used.
  539. */
  540. if (curr_type == MTRR_TYPE_UNCACHABLE)
  541. return MTRR_TYPE_UNCACHABLE;
  542. /*
  543. * If two or more variable memory ranges match and the
  544. * memory types are WT and WB, the WT memory type is used.
  545. */
  546. if (((1 << type) & wt_wb_mask) &&
  547. ((1 << curr_type) & wt_wb_mask)) {
  548. type = MTRR_TYPE_WRTHROUGH;
  549. continue;
  550. }
  551. /*
  552. * For overlaps not defined by the above rules, processor
  553. * behavior is undefined.
  554. */
  555. /* We use WB for this undefined behavior. :( */
  556. return MTRR_TYPE_WRBACK;
  557. }
  558. if (iter.mtrr_disabled)
  559. return mtrr_disabled_type(vcpu);
  560. /* not contained in any MTRRs. */
  561. if (type == -1)
  562. return mtrr_default_type(mtrr_state);
  563. /*
  564. * We just check one page, partially covered by MTRRs is
  565. * impossible.
  566. */
  567. WARN_ON(iter.partial_map);
  568. return type;
  569. }
  570. EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
  571. bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
  572. int page_num)
  573. {
  574. struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
  575. struct mtrr_iter iter;
  576. u64 start, end;
  577. int type = -1;
  578. start = gfn_to_gpa(gfn);
  579. end = gfn_to_gpa(gfn + page_num);
  580. mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
  581. if (type == -1) {
  582. type = iter.mem_type;
  583. continue;
  584. }
  585. if (type != iter.mem_type)
  586. return false;
  587. }
  588. if (iter.mtrr_disabled)
  589. return true;
  590. if (!iter.partial_map)
  591. return true;
  592. if (type == -1)
  593. return true;
  594. return type == mtrr_default_type(mtrr_state);
  595. }