paging_tmpl.h 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Kernel-based Virtual Machine driver for Linux
  4. *
  5. * This module enables machines with Intel VT-x extensions to run virtual
  6. * machines without emulation or binary translation.
  7. *
  8. * MMU support
  9. *
  10. * Copyright (C) 2006 Qumranet, Inc.
  11. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  12. *
  13. * Authors:
  14. * Yaniv Kamay <[email protected]>
  15. * Avi Kivity <[email protected]>
  16. */
  17. /*
  18. * The MMU needs to be able to access/walk 32-bit and 64-bit guest page tables,
  19. * as well as guest EPT tables, so the code in this file is compiled thrice,
  20. * once per guest PTE type. The per-type defines are #undef'd at the end.
  21. */
  22. #if PTTYPE == 64
  23. #define pt_element_t u64
  24. #define guest_walker guest_walker64
  25. #define FNAME(name) paging##64_##name
  26. #define PT_LEVEL_BITS 9
  27. #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
  28. #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
  29. #define PT_HAVE_ACCESSED_DIRTY(mmu) true
  30. #ifdef CONFIG_X86_64
  31. #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
  32. #else
  33. #define PT_MAX_FULL_LEVELS 2
  34. #endif
  35. #elif PTTYPE == 32
  36. #define pt_element_t u32
  37. #define guest_walker guest_walker32
  38. #define FNAME(name) paging##32_##name
  39. #define PT_LEVEL_BITS 10
  40. #define PT_MAX_FULL_LEVELS 2
  41. #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
  42. #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
  43. #define PT_HAVE_ACCESSED_DIRTY(mmu) true
  44. #define PT32_DIR_PSE36_SIZE 4
  45. #define PT32_DIR_PSE36_SHIFT 13
  46. #define PT32_DIR_PSE36_MASK \
  47. (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
  48. #elif PTTYPE == PTTYPE_EPT
  49. #define pt_element_t u64
  50. #define guest_walker guest_walkerEPT
  51. #define FNAME(name) ept_##name
  52. #define PT_LEVEL_BITS 9
  53. #define PT_GUEST_DIRTY_SHIFT 9
  54. #define PT_GUEST_ACCESSED_SHIFT 8
  55. #define PT_HAVE_ACCESSED_DIRTY(mmu) (!(mmu)->cpu_role.base.ad_disabled)
  56. #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
  57. #else
  58. #error Invalid PTTYPE value
  59. #endif
  60. /* Common logic, but per-type values. These also need to be undefined. */
  61. #define PT_BASE_ADDR_MASK ((pt_element_t)(((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)))
  62. #define PT_LVL_ADDR_MASK(lvl) __PT_LVL_ADDR_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS)
  63. #define PT_LVL_OFFSET_MASK(lvl) __PT_LVL_OFFSET_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS)
  64. #define PT_INDEX(addr, lvl) __PT_INDEX(addr, lvl, PT_LEVEL_BITS)
  65. #define PT_GUEST_DIRTY_MASK (1 << PT_GUEST_DIRTY_SHIFT)
  66. #define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT)
  67. #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
  68. #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K)
  69. /*
  70. * The guest_walker structure emulates the behavior of the hardware page
  71. * table walker.
  72. */
  73. struct guest_walker {
  74. int level;
  75. unsigned max_level;
  76. gfn_t table_gfn[PT_MAX_FULL_LEVELS];
  77. pt_element_t ptes[PT_MAX_FULL_LEVELS];
  78. pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
  79. gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
  80. pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
  81. bool pte_writable[PT_MAX_FULL_LEVELS];
  82. unsigned int pt_access[PT_MAX_FULL_LEVELS];
  83. unsigned int pte_access;
  84. gfn_t gfn;
  85. struct x86_exception fault;
  86. };
  87. #if PTTYPE == 32
  88. static inline gfn_t pse36_gfn_delta(u32 gpte)
  89. {
  90. int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
  91. return (gpte & PT32_DIR_PSE36_MASK) << shift;
  92. }
  93. #endif
  94. static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
  95. {
  96. return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
  97. }
  98. static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
  99. unsigned gpte)
  100. {
  101. unsigned mask;
  102. /* dirty bit is not supported, so no need to track it */
  103. if (!PT_HAVE_ACCESSED_DIRTY(mmu))
  104. return;
  105. BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
  106. mask = (unsigned)~ACC_WRITE_MASK;
  107. /* Allow write access to dirty gptes */
  108. mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
  109. PT_WRITABLE_MASK;
  110. *access &= mask;
  111. }
  112. static inline int FNAME(is_present_gpte)(unsigned long pte)
  113. {
  114. #if PTTYPE != PTTYPE_EPT
  115. return pte & PT_PRESENT_MASK;
  116. #else
  117. return pte & 7;
  118. #endif
  119. }
  120. static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte)
  121. {
  122. #if PTTYPE != PTTYPE_EPT
  123. return false;
  124. #else
  125. return __is_bad_mt_xwr(rsvd_check, gpte);
  126. #endif
  127. }
  128. static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
  129. {
  130. return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) ||
  131. FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
  132. }
  133. static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
  134. struct kvm_mmu_page *sp, u64 *spte,
  135. u64 gpte)
  136. {
  137. if (!FNAME(is_present_gpte)(gpte))
  138. goto no_present;
  139. /* Prefetch only accessed entries (unless A/D bits are disabled). */
  140. if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) &&
  141. !(gpte & PT_GUEST_ACCESSED_MASK))
  142. goto no_present;
  143. if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K))
  144. goto no_present;
  145. return false;
  146. no_present:
  147. drop_spte(vcpu->kvm, spte);
  148. return true;
  149. }
  150. /*
  151. * For PTTYPE_EPT, a page table can be executable but not readable
  152. * on supported processors. Therefore, set_spte does not automatically
  153. * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
  154. * to signify readability since it isn't used in the EPT case
  155. */
  156. static inline unsigned FNAME(gpte_access)(u64 gpte)
  157. {
  158. unsigned access;
  159. #if PTTYPE == PTTYPE_EPT
  160. access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
  161. ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
  162. ((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0);
  163. #else
  164. BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK);
  165. BUILD_BUG_ON(ACC_EXEC_MASK != 1);
  166. access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
  167. /* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */
  168. access ^= (gpte >> PT64_NX_SHIFT);
  169. #endif
  170. return access;
  171. }
  172. static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
  173. struct kvm_mmu *mmu,
  174. struct guest_walker *walker,
  175. gpa_t addr, int write_fault)
  176. {
  177. unsigned level, index;
  178. pt_element_t pte, orig_pte;
  179. pt_element_t __user *ptep_user;
  180. gfn_t table_gfn;
  181. int ret;
  182. /* dirty/accessed bits are not supported, so no need to update them */
  183. if (!PT_HAVE_ACCESSED_DIRTY(mmu))
  184. return 0;
  185. for (level = walker->max_level; level >= walker->level; --level) {
  186. pte = orig_pte = walker->ptes[level - 1];
  187. table_gfn = walker->table_gfn[level - 1];
  188. ptep_user = walker->ptep_user[level - 1];
  189. index = offset_in_page(ptep_user) / sizeof(pt_element_t);
  190. if (!(pte & PT_GUEST_ACCESSED_MASK)) {
  191. trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
  192. pte |= PT_GUEST_ACCESSED_MASK;
  193. }
  194. if (level == walker->level && write_fault &&
  195. !(pte & PT_GUEST_DIRTY_MASK)) {
  196. trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
  197. #if PTTYPE == PTTYPE_EPT
  198. if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr))
  199. return -EINVAL;
  200. #endif
  201. pte |= PT_GUEST_DIRTY_MASK;
  202. }
  203. if (pte == orig_pte)
  204. continue;
  205. /*
  206. * If the slot is read-only, simply do not process the accessed
  207. * and dirty bits. This is the correct thing to do if the slot
  208. * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
  209. * are only supported if the accessed and dirty bits are already
  210. * set in the ROM (so that MMIO writes are never needed).
  211. *
  212. * Note that NPT does not allow this at all and faults, since
  213. * it always wants nested page table entries for the guest
  214. * page tables to be writable. And EPT works but will simply
  215. * overwrite the read-only memory to set the accessed and dirty
  216. * bits.
  217. */
  218. if (unlikely(!walker->pte_writable[level - 1]))
  219. continue;
  220. ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault);
  221. if (ret)
  222. return ret;
  223. kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
  224. walker->ptes[level - 1] = pte;
  225. }
  226. return 0;
  227. }
  228. static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
  229. {
  230. unsigned pkeys = 0;
  231. #if PTTYPE == 64
  232. pte_t pte = {.pte = gpte};
  233. pkeys = pte_flags_pkey(pte_flags(pte));
  234. #endif
  235. return pkeys;
  236. }
  237. static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
  238. unsigned int level, unsigned int gpte)
  239. {
  240. /*
  241. * For EPT and PAE paging (both variants), bit 7 is either reserved at
  242. * all level or indicates a huge page (ignoring CR3/EPTP). In either
  243. * case, bit 7 being set terminates the walk.
  244. */
  245. #if PTTYPE == 32
  246. /*
  247. * 32-bit paging requires special handling because bit 7 is ignored if
  248. * CR4.PSE=0, not reserved. Clear bit 7 in the gpte if the level is
  249. * greater than the last level for which bit 7 is the PAGE_SIZE bit.
  250. *
  251. * The RHS has bit 7 set iff level < (2 + PSE). If it is clear, bit 7
  252. * is not reserved and does not indicate a large page at this level,
  253. * so clear PT_PAGE_SIZE_MASK in gpte if that is the case.
  254. */
  255. gpte &= level - (PT32_ROOT_LEVEL + mmu->cpu_role.ext.cr4_pse);
  256. #endif
  257. /*
  258. * PG_LEVEL_4K always terminates. The RHS has bit 7 set
  259. * iff level <= PG_LEVEL_4K, which for our purpose means
  260. * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
  261. */
  262. gpte |= level - PG_LEVEL_4K - 1;
  263. return gpte & PT_PAGE_SIZE_MASK;
  264. }
  265. /*
  266. * Fetch a guest pte for a guest virtual address, or for an L2's GPA.
  267. */
  268. static int FNAME(walk_addr_generic)(struct guest_walker *walker,
  269. struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  270. gpa_t addr, u64 access)
  271. {
  272. int ret;
  273. pt_element_t pte;
  274. pt_element_t __user *ptep_user;
  275. gfn_t table_gfn;
  276. u64 pt_access, pte_access;
  277. unsigned index, accessed_dirty, pte_pkey;
  278. u64 nested_access;
  279. gpa_t pte_gpa;
  280. bool have_ad;
  281. int offset;
  282. u64 walk_nx_mask = 0;
  283. const int write_fault = access & PFERR_WRITE_MASK;
  284. const int user_fault = access & PFERR_USER_MASK;
  285. const int fetch_fault = access & PFERR_FETCH_MASK;
  286. u16 errcode = 0;
  287. gpa_t real_gpa;
  288. gfn_t gfn;
  289. trace_kvm_mmu_pagetable_walk(addr, access);
  290. retry_walk:
  291. walker->level = mmu->cpu_role.base.level;
  292. pte = kvm_mmu_get_guest_pgd(vcpu, mmu);
  293. have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
  294. #if PTTYPE == 64
  295. walk_nx_mask = 1ULL << PT64_NX_SHIFT;
  296. if (walker->level == PT32E_ROOT_LEVEL) {
  297. pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
  298. trace_kvm_mmu_paging_element(pte, walker->level);
  299. if (!FNAME(is_present_gpte)(pte))
  300. goto error;
  301. --walker->level;
  302. }
  303. #endif
  304. walker->max_level = walker->level;
  305. ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
  306. /*
  307. * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
  308. * by the MOV to CR instruction are treated as reads and do not cause the
  309. * processor to set the dirty flag in any EPT paging-structure entry.
  310. */
  311. nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
  312. pte_access = ~0;
  313. ++walker->level;
  314. do {
  315. unsigned long host_addr;
  316. pt_access = pte_access;
  317. --walker->level;
  318. index = PT_INDEX(addr, walker->level);
  319. table_gfn = gpte_to_gfn(pte);
  320. offset = index * sizeof(pt_element_t);
  321. pte_gpa = gfn_to_gpa(table_gfn) + offset;
  322. BUG_ON(walker->level < 1);
  323. walker->table_gfn[walker->level - 1] = table_gfn;
  324. walker->pte_gpa[walker->level - 1] = pte_gpa;
  325. real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(table_gfn),
  326. nested_access, &walker->fault);
  327. /*
  328. * FIXME: This can happen if emulation (for of an INS/OUTS
  329. * instruction) triggers a nested page fault. The exit
  330. * qualification / exit info field will incorrectly have
  331. * "guest page access" as the nested page fault's cause,
  332. * instead of "guest page structure access". To fix this,
  333. * the x86_exception struct should be augmented with enough
  334. * information to fix the exit_qualification or exit_info_1
  335. * fields.
  336. */
  337. if (unlikely(real_gpa == INVALID_GPA))
  338. return 0;
  339. host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
  340. &walker->pte_writable[walker->level - 1]);
  341. if (unlikely(kvm_is_error_hva(host_addr)))
  342. goto error;
  343. ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
  344. if (unlikely(__get_user(pte, ptep_user)))
  345. goto error;
  346. walker->ptep_user[walker->level - 1] = ptep_user;
  347. trace_kvm_mmu_paging_element(pte, walker->level);
  348. /*
  349. * Inverting the NX it lets us AND it like other
  350. * permission bits.
  351. */
  352. pte_access = pt_access & (pte ^ walk_nx_mask);
  353. if (unlikely(!FNAME(is_present_gpte)(pte)))
  354. goto error;
  355. if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) {
  356. errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
  357. goto error;
  358. }
  359. walker->ptes[walker->level - 1] = pte;
  360. /* Convert to ACC_*_MASK flags for struct guest_walker. */
  361. walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
  362. } while (!FNAME(is_last_gpte)(mmu, walker->level, pte));
  363. pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
  364. accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
  365. /* Convert to ACC_*_MASK flags for struct guest_walker. */
  366. walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
  367. errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
  368. if (unlikely(errcode))
  369. goto error;
  370. gfn = gpte_to_gfn_lvl(pte, walker->level);
  371. gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
  372. #if PTTYPE == 32
  373. if (walker->level > PG_LEVEL_4K && is_cpuid_PSE36())
  374. gfn += pse36_gfn_delta(pte);
  375. #endif
  376. real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
  377. if (real_gpa == INVALID_GPA)
  378. return 0;
  379. walker->gfn = real_gpa >> PAGE_SHIFT;
  380. if (!write_fault)
  381. FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
  382. else
  383. /*
  384. * On a write fault, fold the dirty bit into accessed_dirty.
  385. * For modes without A/D bits support accessed_dirty will be
  386. * always clear.
  387. */
  388. accessed_dirty &= pte >>
  389. (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
  390. if (unlikely(!accessed_dirty)) {
  391. ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
  392. addr, write_fault);
  393. if (unlikely(ret < 0))
  394. goto error;
  395. else if (ret)
  396. goto retry_walk;
  397. }
  398. pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
  399. __func__, (u64)pte, walker->pte_access,
  400. walker->pt_access[walker->level - 1]);
  401. return 1;
  402. error:
  403. errcode |= write_fault | user_fault;
  404. if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu)))
  405. errcode |= PFERR_FETCH_MASK;
  406. walker->fault.vector = PF_VECTOR;
  407. walker->fault.error_code_valid = true;
  408. walker->fault.error_code = errcode;
  409. #if PTTYPE == PTTYPE_EPT
  410. /*
  411. * Use PFERR_RSVD_MASK in error_code to tell if EPT
  412. * misconfiguration requires to be injected. The detection is
  413. * done by is_rsvd_bits_set() above.
  414. *
  415. * We set up the value of exit_qualification to inject:
  416. * [2:0] - Derive from the access bits. The exit_qualification might be
  417. * out of date if it is serving an EPT misconfiguration.
  418. * [5:3] - Calculated by the page walk of the guest EPT page tables
  419. * [7:8] - Derived from [7:8] of real exit_qualification
  420. *
  421. * The other bits are set to 0.
  422. */
  423. if (!(errcode & PFERR_RSVD_MASK)) {
  424. vcpu->arch.exit_qualification &= (EPT_VIOLATION_GVA_IS_VALID |
  425. EPT_VIOLATION_GVA_TRANSLATED);
  426. if (write_fault)
  427. vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE;
  428. if (user_fault)
  429. vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ;
  430. if (fetch_fault)
  431. vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR;
  432. /*
  433. * Note, pte_access holds the raw RWX bits from the EPTE, not
  434. * ACC_*_MASK flags!
  435. */
  436. vcpu->arch.exit_qualification |= (pte_access & VMX_EPT_RWX_MASK) <<
  437. EPT_VIOLATION_RWX_SHIFT;
  438. }
  439. #endif
  440. walker->fault.address = addr;
  441. walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
  442. walker->fault.async_page_fault = false;
  443. trace_kvm_mmu_walker_error(walker->fault.error_code);
  444. return 0;
  445. }
  446. static int FNAME(walk_addr)(struct guest_walker *walker,
  447. struct kvm_vcpu *vcpu, gpa_t addr, u64 access)
  448. {
  449. return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
  450. access);
  451. }
  452. static bool
  453. FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  454. u64 *spte, pt_element_t gpte, bool no_dirty_log)
  455. {
  456. struct kvm_memory_slot *slot;
  457. unsigned pte_access;
  458. gfn_t gfn;
  459. kvm_pfn_t pfn;
  460. if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
  461. return false;
  462. pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
  463. gfn = gpte_to_gfn(gpte);
  464. pte_access = sp->role.access & FNAME(gpte_access)(gpte);
  465. FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
  466. slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn,
  467. no_dirty_log && (pte_access & ACC_WRITE_MASK));
  468. if (!slot)
  469. return false;
  470. pfn = gfn_to_pfn_memslot_atomic(slot, gfn);
  471. if (is_error_pfn(pfn))
  472. return false;
  473. mmu_set_spte(vcpu, slot, spte, pte_access, gfn, pfn, NULL);
  474. kvm_release_pfn_clean(pfn);
  475. return true;
  476. }
  477. static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
  478. struct guest_walker *gw, int level)
  479. {
  480. pt_element_t curr_pte;
  481. gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
  482. u64 mask;
  483. int r, index;
  484. if (level == PG_LEVEL_4K) {
  485. mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
  486. base_gpa = pte_gpa & ~mask;
  487. index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
  488. r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa,
  489. gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
  490. curr_pte = gw->prefetch_ptes[index];
  491. } else
  492. r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa,
  493. &curr_pte, sizeof(curr_pte));
  494. return r || curr_pte != gw->ptes[level - 1];
  495. }
  496. static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
  497. u64 *sptep)
  498. {
  499. struct kvm_mmu_page *sp;
  500. pt_element_t *gptep = gw->prefetch_ptes;
  501. u64 *spte;
  502. int i;
  503. sp = sptep_to_sp(sptep);
  504. if (sp->role.level > PG_LEVEL_4K)
  505. return;
  506. /*
  507. * If addresses are being invalidated, skip prefetching to avoid
  508. * accidentally prefetching those addresses.
  509. */
  510. if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
  511. return;
  512. if (sp->role.direct)
  513. return __direct_pte_prefetch(vcpu, sp, sptep);
  514. i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
  515. spte = sp->spt + i;
  516. for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
  517. if (spte == sptep)
  518. continue;
  519. if (is_shadow_present_pte(*spte))
  520. continue;
  521. if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
  522. break;
  523. }
  524. }
  525. /*
  526. * Fetch a shadow pte for a specific level in the paging hierarchy.
  527. * If the guest tries to write a write-protected page, we need to
  528. * emulate this operation, return 1 to indicate this case.
  529. */
  530. static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
  531. struct guest_walker *gw)
  532. {
  533. struct kvm_mmu_page *sp = NULL;
  534. struct kvm_shadow_walk_iterator it;
  535. unsigned int direct_access, access;
  536. int top_level, ret;
  537. gfn_t base_gfn = fault->gfn;
  538. WARN_ON_ONCE(gw->gfn != base_gfn);
  539. direct_access = gw->pte_access;
  540. top_level = vcpu->arch.mmu->cpu_role.base.level;
  541. if (top_level == PT32E_ROOT_LEVEL)
  542. top_level = PT32_ROOT_LEVEL;
  543. /*
  544. * Verify that the top-level gpte is still there. Since the page
  545. * is a root page, it is either write protected (and cannot be
  546. * changed from now on) or it is invalid (in which case, we don't
  547. * really care if it changes underneath us after this point).
  548. */
  549. if (FNAME(gpte_changed)(vcpu, gw, top_level))
  550. goto out_gpte_changed;
  551. if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
  552. goto out_gpte_changed;
  553. for (shadow_walk_init(&it, vcpu, fault->addr);
  554. shadow_walk_okay(&it) && it.level > gw->level;
  555. shadow_walk_next(&it)) {
  556. gfn_t table_gfn;
  557. clear_sp_write_flooding_count(it.sptep);
  558. table_gfn = gw->table_gfn[it.level - 2];
  559. access = gw->pt_access[it.level - 2];
  560. sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn,
  561. false, access);
  562. if (sp != ERR_PTR(-EEXIST)) {
  563. /*
  564. * We must synchronize the pagetable before linking it
  565. * because the guest doesn't need to flush tlb when
  566. * the gpte is changed from non-present to present.
  567. * Otherwise, the guest may use the wrong mapping.
  568. *
  569. * For PG_LEVEL_4K, kvm_mmu_get_page() has already
  570. * synchronized it transiently via kvm_sync_page().
  571. *
  572. * For higher level pagetable, we synchronize it via
  573. * the slower mmu_sync_children(). If it needs to
  574. * break, some progress has been made; return
  575. * RET_PF_RETRY and retry on the next #PF.
  576. * KVM_REQ_MMU_SYNC is not necessary but it
  577. * expedites the process.
  578. */
  579. if (sp->unsync_children &&
  580. mmu_sync_children(vcpu, sp, false))
  581. return RET_PF_RETRY;
  582. }
  583. /*
  584. * Verify that the gpte in the page we've just write
  585. * protected is still there.
  586. */
  587. if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
  588. goto out_gpte_changed;
  589. if (sp != ERR_PTR(-EEXIST))
  590. link_shadow_page(vcpu, it.sptep, sp);
  591. }
  592. kvm_mmu_hugepage_adjust(vcpu, fault);
  593. trace_kvm_mmu_spte_requested(fault);
  594. for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
  595. clear_sp_write_flooding_count(it.sptep);
  596. /*
  597. * We cannot overwrite existing page tables with an NX
  598. * large page, as the leaf could be executable.
  599. */
  600. if (fault->nx_huge_page_workaround_enabled)
  601. disallowed_hugepage_adjust(fault, *it.sptep, it.level);
  602. base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
  603. if (it.level == fault->goal_level)
  604. break;
  605. validate_direct_spte(vcpu, it.sptep, direct_access);
  606. sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn,
  607. true, direct_access);
  608. if (sp == ERR_PTR(-EEXIST))
  609. continue;
  610. link_shadow_page(vcpu, it.sptep, sp);
  611. if (fault->huge_page_disallowed &&
  612. fault->req_level >= it.level)
  613. account_huge_nx_page(vcpu->kvm, sp);
  614. }
  615. if (WARN_ON_ONCE(it.level != fault->goal_level))
  616. return -EFAULT;
  617. ret = mmu_set_spte(vcpu, fault->slot, it.sptep, gw->pte_access,
  618. base_gfn, fault->pfn, fault);
  619. if (ret == RET_PF_SPURIOUS)
  620. return ret;
  621. FNAME(pte_prefetch)(vcpu, gw, it.sptep);
  622. return ret;
  623. out_gpte_changed:
  624. return RET_PF_RETRY;
  625. }
  626. /*
  627. * To see whether the mapped gfn can write its page table in the current
  628. * mapping.
  629. *
  630. * It is the helper function of FNAME(page_fault). When guest uses large page
  631. * size to map the writable gfn which is used as current page table, we should
  632. * force kvm to use small page size to map it because new shadow page will be
  633. * created when kvm establishes shadow page table that stop kvm using large
  634. * page size. Do it early can avoid unnecessary #PF and emulation.
  635. *
  636. * @write_fault_to_shadow_pgtable will return true if the fault gfn is
  637. * currently used as its page table.
  638. *
  639. * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
  640. * since the PDPT is always shadowed, that means, we can not use large page
  641. * size to map the gfn which is used as PDPT.
  642. */
  643. static bool
  644. FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
  645. struct guest_walker *walker, bool user_fault,
  646. bool *write_fault_to_shadow_pgtable)
  647. {
  648. int level;
  649. gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
  650. bool self_changed = false;
  651. if (!(walker->pte_access & ACC_WRITE_MASK ||
  652. (!is_cr0_wp(vcpu->arch.mmu) && !user_fault)))
  653. return false;
  654. for (level = walker->level; level <= walker->max_level; level++) {
  655. gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
  656. self_changed |= !(gfn & mask);
  657. *write_fault_to_shadow_pgtable |= !gfn;
  658. }
  659. return self_changed;
  660. }
  661. /*
  662. * Page fault handler. There are several causes for a page fault:
  663. * - there is no shadow pte for the guest pte
  664. * - write access through a shadow pte marked read only so that we can set
  665. * the dirty bit
  666. * - write access to a shadow pte marked read only so we can update the page
  667. * dirty bitmap, when userspace requests it
  668. * - mmio access; in this case we will never install a present shadow pte
  669. * - normal guest page fault due to the guest pte marked not present, not
  670. * writable, or not executable
  671. *
  672. * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
  673. * a negative value on error.
  674. */
  675. static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
  676. {
  677. struct guest_walker walker;
  678. int r;
  679. unsigned long mmu_seq;
  680. bool is_self_change_mapping;
  681. pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code);
  682. WARN_ON_ONCE(fault->is_tdp);
  683. /*
  684. * Look up the guest pte for the faulting address.
  685. * If PFEC.RSVD is set, this is a shadow page fault.
  686. * The bit needs to be cleared before walking guest page tables.
  687. */
  688. r = FNAME(walk_addr)(&walker, vcpu, fault->addr,
  689. fault->error_code & ~PFERR_RSVD_MASK);
  690. /*
  691. * The page is not mapped by the guest. Let the guest handle it.
  692. */
  693. if (!r) {
  694. pgprintk("%s: guest page fault\n", __func__);
  695. if (!fault->prefetch)
  696. kvm_inject_emulated_page_fault(vcpu, &walker.fault);
  697. return RET_PF_RETRY;
  698. }
  699. fault->gfn = walker.gfn;
  700. fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);
  701. if (page_fault_handle_page_track(vcpu, fault)) {
  702. shadow_page_table_clear_flood(vcpu, fault->addr);
  703. return RET_PF_EMULATE;
  704. }
  705. r = mmu_topup_memory_caches(vcpu, true);
  706. if (r)
  707. return r;
  708. vcpu->arch.write_fault_to_shadow_pgtable = false;
  709. is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
  710. &walker, fault->user, &vcpu->arch.write_fault_to_shadow_pgtable);
  711. if (is_self_change_mapping)
  712. fault->max_level = PG_LEVEL_4K;
  713. else
  714. fault->max_level = walker.level;
  715. mmu_seq = vcpu->kvm->mmu_invalidate_seq;
  716. smp_rmb();
  717. r = kvm_faultin_pfn(vcpu, fault);
  718. if (r != RET_PF_CONTINUE)
  719. return r;
  720. r = handle_abnormal_pfn(vcpu, fault, walker.pte_access);
  721. if (r != RET_PF_CONTINUE)
  722. return r;
  723. /*
  724. * Do not change pte_access if the pfn is a mmio page, otherwise
  725. * we will cache the incorrect access into mmio spte.
  726. */
  727. if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) &&
  728. !is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) {
  729. walker.pte_access |= ACC_WRITE_MASK;
  730. walker.pte_access &= ~ACC_USER_MASK;
  731. /*
  732. * If we converted a user page to a kernel page,
  733. * so that the kernel can write to it when cr0.wp=0,
  734. * then we should prevent the kernel from executing it
  735. * if SMEP is enabled.
  736. */
  737. if (is_cr4_smep(vcpu->arch.mmu))
  738. walker.pte_access &= ~ACC_EXEC_MASK;
  739. }
  740. r = RET_PF_RETRY;
  741. write_lock(&vcpu->kvm->mmu_lock);
  742. if (is_page_fault_stale(vcpu, fault, mmu_seq))
  743. goto out_unlock;
  744. r = make_mmu_pages_available(vcpu);
  745. if (r)
  746. goto out_unlock;
  747. r = FNAME(fetch)(vcpu, fault, &walker);
  748. out_unlock:
  749. write_unlock(&vcpu->kvm->mmu_lock);
  750. kvm_release_pfn_clean(fault->pfn);
  751. return r;
  752. }
  753. static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
  754. {
  755. int offset = 0;
  756. WARN_ON(sp->role.level != PG_LEVEL_4K);
  757. if (PTTYPE == 32)
  758. offset = sp->role.quadrant << SPTE_LEVEL_BITS;
  759. return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
  760. }
  761. static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
  762. {
  763. struct kvm_shadow_walk_iterator iterator;
  764. struct kvm_mmu_page *sp;
  765. u64 old_spte;
  766. int level;
  767. u64 *sptep;
  768. vcpu_clear_mmio_info(vcpu, gva);
  769. /*
  770. * No need to check return value here, rmap_can_add() can
  771. * help us to skip pte prefetch later.
  772. */
  773. mmu_topup_memory_caches(vcpu, true);
  774. if (!VALID_PAGE(root_hpa)) {
  775. WARN_ON(1);
  776. return;
  777. }
  778. write_lock(&vcpu->kvm->mmu_lock);
  779. for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
  780. level = iterator.level;
  781. sptep = iterator.sptep;
  782. sp = sptep_to_sp(sptep);
  783. old_spte = *sptep;
  784. if (is_last_spte(old_spte, level)) {
  785. pt_element_t gpte;
  786. gpa_t pte_gpa;
  787. if (!sp->unsync)
  788. break;
  789. pte_gpa = FNAME(get_level1_sp_gpa)(sp);
  790. pte_gpa += spte_index(sptep) * sizeof(pt_element_t);
  791. mmu_page_zap_pte(vcpu->kvm, sp, sptep, NULL);
  792. if (is_shadow_present_pte(old_spte))
  793. kvm_flush_remote_tlbs_with_address(vcpu->kvm,
  794. sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
  795. if (!rmap_can_add(vcpu))
  796. break;
  797. if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
  798. sizeof(pt_element_t)))
  799. break;
  800. FNAME(prefetch_gpte)(vcpu, sp, sptep, gpte, false);
  801. }
  802. if (!sp->unsync_children)
  803. break;
  804. }
  805. write_unlock(&vcpu->kvm->mmu_lock);
  806. }
  807. /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
  808. static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  809. gpa_t addr, u64 access,
  810. struct x86_exception *exception)
  811. {
  812. struct guest_walker walker;
  813. gpa_t gpa = INVALID_GPA;
  814. int r;
  815. #ifndef CONFIG_X86_64
  816. /* A 64-bit GVA should be impossible on 32-bit KVM. */
  817. WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu);
  818. #endif
  819. r = FNAME(walk_addr_generic)(&walker, vcpu, mmu, addr, access);
  820. if (r) {
  821. gpa = gfn_to_gpa(walker.gfn);
  822. gpa |= addr & ~PAGE_MASK;
  823. } else if (exception)
  824. *exception = walker.fault;
  825. return gpa;
  826. }
  827. /*
  828. * Using the information in sp->shadowed_translation (kvm_mmu_page_get_gfn()) is
  829. * safe because:
  830. * - The spte has a reference to the struct page, so the pfn for a given gfn
  831. * can't change unless all sptes pointing to it are nuked first.
  832. *
  833. * Returns
  834. * < 0: the sp should be zapped
  835. * 0: the sp is synced and no tlb flushing is required
  836. * > 0: the sp is synced and tlb flushing is required
  837. */
  838. static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
  839. {
  840. union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
  841. int i;
  842. bool host_writable;
  843. gpa_t first_pte_gpa;
  844. bool flush = false;
  845. /*
  846. * Ignore various flags when verifying that it's safe to sync a shadow
  847. * page using the current MMU context.
  848. *
  849. * - level: not part of the overall MMU role and will never match as the MMU's
  850. * level tracks the root level
  851. * - access: updated based on the new guest PTE
  852. * - quadrant: not part of the overall MMU role (similar to level)
  853. */
  854. const union kvm_mmu_page_role sync_role_ign = {
  855. .level = 0xf,
  856. .access = 0x7,
  857. .quadrant = 0x3,
  858. .passthrough = 0x1,
  859. };
  860. /*
  861. * Direct pages can never be unsync, and KVM should never attempt to
  862. * sync a shadow page for a different MMU context, e.g. if the role
  863. * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
  864. * reserved bits checks will be wrong, etc...
  865. */
  866. if (WARN_ON_ONCE(sp->role.direct ||
  867. (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
  868. return -1;
  869. first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
  870. for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
  871. u64 *sptep, spte;
  872. struct kvm_memory_slot *slot;
  873. unsigned pte_access;
  874. pt_element_t gpte;
  875. gpa_t pte_gpa;
  876. gfn_t gfn;
  877. if (!sp->spt[i])
  878. continue;
  879. pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
  880. if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
  881. sizeof(pt_element_t)))
  882. return -1;
  883. if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
  884. flush = true;
  885. continue;
  886. }
  887. gfn = gpte_to_gfn(gpte);
  888. pte_access = sp->role.access;
  889. pte_access &= FNAME(gpte_access)(gpte);
  890. FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
  891. if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
  892. continue;
  893. /*
  894. * Drop the SPTE if the new protections would result in a RWX=0
  895. * SPTE or if the gfn is changing. The RWX=0 case only affects
  896. * EPT with execute-only support, i.e. EPT without an effective
  897. * "present" bit, as all other paging modes will create a
  898. * read-only SPTE if pte_access is zero.
  899. */
  900. if ((!pte_access && !shadow_present_mask) ||
  901. gfn != kvm_mmu_page_get_gfn(sp, i)) {
  902. drop_spte(vcpu->kvm, &sp->spt[i]);
  903. flush = true;
  904. continue;
  905. }
  906. /* Update the shadowed access bits in case they changed. */
  907. kvm_mmu_page_set_access(sp, i, pte_access);
  908. sptep = &sp->spt[i];
  909. spte = *sptep;
  910. host_writable = spte & shadow_host_writable_mask;
  911. slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
  912. make_spte(vcpu, sp, slot, pte_access, gfn,
  913. spte_to_pfn(spte), spte, true, false,
  914. host_writable, &spte);
  915. flush |= mmu_spte_update(sptep, spte);
  916. }
  917. /*
  918. * Note, any flush is purely for KVM's correctness, e.g. when dropping
  919. * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
  920. * unmap or dirty logging event doesn't fail to flush. The guest is
  921. * responsible for flushing the TLB to ensure any changes in protection
  922. * bits are recognized, i.e. until the guest flushes or page faults on
  923. * a relevant address, KVM is architecturally allowed to let vCPUs use
  924. * cached translations with the old protection bits.
  925. */
  926. return flush;
  927. }
  928. #undef pt_element_t
  929. #undef guest_walker
  930. #undef FNAME
  931. #undef PT_BASE_ADDR_MASK
  932. #undef PT_INDEX
  933. #undef PT_LVL_ADDR_MASK
  934. #undef PT_LVL_OFFSET_MASK
  935. #undef PT_LEVEL_BITS
  936. #undef PT_MAX_FULL_LEVELS
  937. #undef gpte_to_gfn
  938. #undef gpte_to_gfn_lvl
  939. #undef PT_GUEST_ACCESSED_MASK
  940. #undef PT_GUEST_DIRTY_MASK
  941. #undef PT_GUEST_DIRTY_SHIFT
  942. #undef PT_GUEST_ACCESSED_SHIFT
  943. #undef PT_HAVE_ACCESSED_DIRTY