kvm_host.h 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. #ifndef __KVM_HOST_H
  3. #define __KVM_HOST_H
  4. #include <linux/types.h>
  5. #include <linux/hardirq.h>
  6. #include <linux/list.h>
  7. #include <linux/mutex.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/signal.h>
  10. #include <linux/sched.h>
  11. #include <linux/sched/stat.h>
  12. #include <linux/bug.h>
  13. #include <linux/minmax.h>
  14. #include <linux/mm.h>
  15. #include <linux/mmu_notifier.h>
  16. #include <linux/preempt.h>
  17. #include <linux/msi.h>
  18. #include <linux/slab.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/rcupdate.h>
  21. #include <linux/ratelimit.h>
  22. #include <linux/err.h>
  23. #include <linux/irqflags.h>
  24. #include <linux/context_tracking.h>
  25. #include <linux/irqbypass.h>
  26. #include <linux/rcuwait.h>
  27. #include <linux/refcount.h>
  28. #include <linux/nospec.h>
  29. #include <linux/notifier.h>
  30. #include <linux/ftrace.h>
  31. #include <linux/hashtable.h>
  32. #include <linux/instrumentation.h>
  33. #include <linux/interval_tree.h>
  34. #include <linux/rbtree.h>
  35. #include <linux/xarray.h>
  36. #include <asm/signal.h>
  37. #include <linux/kvm.h>
  38. #include <linux/kvm_para.h>
  39. #include <linux/kvm_types.h>
  40. #include <asm/kvm_host.h>
  41. #include <linux/kvm_dirty_ring.h>
  42. #ifndef KVM_MAX_VCPU_IDS
  43. #define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
  44. #endif
  45. /*
  46. * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
  47. * in kvm, other bits are visible for userspace which are defined in
  48. * include/linux/kvm_h.
  49. */
  50. #define KVM_MEMSLOT_INVALID (1UL << 16)
  51. /*
  52. * Bit 63 of the memslot generation number is an "update in-progress flag",
  53. * e.g. is temporarily set for the duration of install_new_memslots().
  54. * This flag effectively creates a unique generation number that is used to
  55. * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
  56. * i.e. may (or may not) have come from the previous memslots generation.
  57. *
  58. * This is necessary because the actual memslots update is not atomic with
  59. * respect to the generation number update. Updating the generation number
  60. * first would allow a vCPU to cache a spte from the old memslots using the
  61. * new generation number, and updating the generation number after switching
  62. * to the new memslots would allow cache hits using the old generation number
  63. * to reference the defunct memslots.
  64. *
  65. * This mechanism is used to prevent getting hits in KVM's caches while a
  66. * memslot update is in-progress, and to prevent cache hits *after* updating
  67. * the actual generation number against accesses that were inserted into the
  68. * cache *before* the memslots were updated.
  69. */
  70. #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
  71. /* Two fragments for cross MMIO pages. */
  72. #define KVM_MAX_MMIO_FRAGMENTS 2
  73. #ifndef KVM_ADDRESS_SPACE_NUM
  74. #define KVM_ADDRESS_SPACE_NUM 1
  75. #endif
  76. /*
  77. * For the normal pfn, the highest 12 bits should be zero,
  78. * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
  79. * mask bit 63 to indicate the noslot pfn.
  80. */
  81. #define KVM_PFN_ERR_MASK (0x7ffULL << 52)
  82. #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
  83. #define KVM_PFN_NOSLOT (0x1ULL << 63)
  84. #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
  85. #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
  86. #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
  87. /*
  88. * error pfns indicate that the gfn is in slot but faild to
  89. * translate it to pfn on host.
  90. */
  91. static inline bool is_error_pfn(kvm_pfn_t pfn)
  92. {
  93. return !!(pfn & KVM_PFN_ERR_MASK);
  94. }
  95. /*
  96. * error_noslot pfns indicate that the gfn can not be
  97. * translated to pfn - it is not in slot or failed to
  98. * translate it to pfn.
  99. */
  100. static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
  101. {
  102. return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
  103. }
  104. /* noslot pfn indicates that the gfn is not in slot. */
  105. static inline bool is_noslot_pfn(kvm_pfn_t pfn)
  106. {
  107. return pfn == KVM_PFN_NOSLOT;
  108. }
  109. /*
  110. * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
  111. * provide own defines and kvm_is_error_hva
  112. */
  113. #ifndef KVM_HVA_ERR_BAD
  114. #define KVM_HVA_ERR_BAD (PAGE_OFFSET)
  115. #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
  116. static inline bool kvm_is_error_hva(unsigned long addr)
  117. {
  118. return addr >= PAGE_OFFSET;
  119. }
  120. #endif
  121. #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
  122. static inline bool is_error_page(struct page *page)
  123. {
  124. return IS_ERR(page);
  125. }
  126. #define KVM_REQUEST_MASK GENMASK(7,0)
  127. #define KVM_REQUEST_NO_WAKEUP BIT(8)
  128. #define KVM_REQUEST_WAIT BIT(9)
  129. #define KVM_REQUEST_NO_ACTION BIT(10)
  130. /*
  131. * Architecture-independent vcpu->requests bit members
  132. * Bits 3-7 are reserved for more arch-independent bits.
  133. */
  134. #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  135. #define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  136. #define KVM_REQ_UNBLOCK 2
  137. #define KVM_REQUEST_ARCH_BASE 8
  138. /*
  139. * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
  140. * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
  141. * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
  142. * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous
  143. * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
  144. * guarantee the vCPU received an IPI and has actually exited guest mode.
  145. */
  146. #define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  147. #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
  148. BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
  149. (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
  150. })
  151. #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
  152. bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
  153. unsigned long *vcpu_bitmap);
  154. bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
  155. bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
  156. struct kvm_vcpu *except);
  157. bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
  158. unsigned long *vcpu_bitmap);
  159. #define KVM_USERSPACE_IRQ_SOURCE_ID 0
  160. #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
  161. extern struct mutex kvm_lock;
  162. extern struct list_head vm_list;
  163. struct kvm_io_range {
  164. gpa_t addr;
  165. int len;
  166. struct kvm_io_device *dev;
  167. };
  168. #define NR_IOBUS_DEVS 1000
  169. struct kvm_io_bus {
  170. int dev_count;
  171. int ioeventfd_count;
  172. struct kvm_io_range range[];
  173. };
  174. enum kvm_bus {
  175. KVM_MMIO_BUS,
  176. KVM_PIO_BUS,
  177. KVM_VIRTIO_CCW_NOTIFY_BUS,
  178. KVM_FAST_MMIO_BUS,
  179. KVM_NR_BUSES
  180. };
  181. int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
  182. int len, const void *val);
  183. int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
  184. gpa_t addr, int len, const void *val, long cookie);
  185. int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
  186. int len, void *val);
  187. int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  188. int len, struct kvm_io_device *dev);
  189. int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  190. struct kvm_io_device *dev);
  191. struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  192. gpa_t addr);
  193. #ifdef CONFIG_KVM_ASYNC_PF
  194. struct kvm_async_pf {
  195. struct work_struct work;
  196. struct list_head link;
  197. struct list_head queue;
  198. struct kvm_vcpu *vcpu;
  199. struct mm_struct *mm;
  200. gpa_t cr2_or_gpa;
  201. unsigned long addr;
  202. struct kvm_arch_async_pf arch;
  203. bool wakeup_all;
  204. bool notpresent_injected;
  205. };
  206. void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
  207. void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
  208. bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
  209. unsigned long hva, struct kvm_arch_async_pf *arch);
  210. int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
  211. #endif
  212. #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
  213. struct kvm_gfn_range {
  214. struct kvm_memory_slot *slot;
  215. gfn_t start;
  216. gfn_t end;
  217. pte_t pte;
  218. bool may_block;
  219. };
  220. bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
  221. bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
  222. bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
  223. bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
  224. #endif
  225. enum {
  226. OUTSIDE_GUEST_MODE,
  227. IN_GUEST_MODE,
  228. EXITING_GUEST_MODE,
  229. READING_SHADOW_PAGE_TABLES,
  230. };
  231. #define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
  232. struct kvm_host_map {
  233. /*
  234. * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
  235. * a 'struct page' for it. When using mem= kernel parameter some memory
  236. * can be used as guest memory but they are not managed by host
  237. * kernel).
  238. * If 'pfn' is not managed by the host kernel, this field is
  239. * initialized to KVM_UNMAPPED_PAGE.
  240. */
  241. struct page *page;
  242. void *hva;
  243. kvm_pfn_t pfn;
  244. kvm_pfn_t gfn;
  245. };
  246. /*
  247. * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
  248. * directly to check for that.
  249. */
  250. static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
  251. {
  252. return !!map->hva;
  253. }
  254. static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
  255. {
  256. return single_task_running() && !need_resched() && ktime_before(cur, stop);
  257. }
  258. /*
  259. * Sometimes a large or cross-page mmio needs to be broken up into separate
  260. * exits for userspace servicing.
  261. */
  262. struct kvm_mmio_fragment {
  263. gpa_t gpa;
  264. void *data;
  265. unsigned len;
  266. };
  267. struct kvm_vcpu {
  268. struct kvm *kvm;
  269. #ifdef CONFIG_PREEMPT_NOTIFIERS
  270. struct preempt_notifier preempt_notifier;
  271. #endif
  272. int cpu;
  273. int vcpu_id; /* id given by userspace at creation */
  274. int vcpu_idx; /* index in kvm->vcpus array */
  275. int ____srcu_idx; /* Don't use this directly. You've been warned. */
  276. #ifdef CONFIG_PROVE_RCU
  277. int srcu_depth;
  278. #endif
  279. int mode;
  280. u64 requests;
  281. unsigned long guest_debug;
  282. struct mutex mutex;
  283. struct kvm_run *run;
  284. #ifndef __KVM_HAVE_ARCH_WQP
  285. struct rcuwait wait;
  286. #endif
  287. struct pid __rcu *pid;
  288. int sigset_active;
  289. sigset_t sigset;
  290. unsigned int halt_poll_ns;
  291. bool valid_wakeup;
  292. #ifdef CONFIG_HAS_IOMEM
  293. int mmio_needed;
  294. int mmio_read_completed;
  295. int mmio_is_write;
  296. int mmio_cur_fragment;
  297. int mmio_nr_fragments;
  298. struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
  299. #endif
  300. #ifdef CONFIG_KVM_ASYNC_PF
  301. struct {
  302. u32 queued;
  303. struct list_head queue;
  304. struct list_head done;
  305. spinlock_t lock;
  306. } async_pf;
  307. #endif
  308. #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
  309. /*
  310. * Cpu relax intercept or pause loop exit optimization
  311. * in_spin_loop: set when a vcpu does a pause loop exit
  312. * or cpu relax intercepted.
  313. * dy_eligible: indicates whether vcpu is eligible for directed yield.
  314. */
  315. struct {
  316. bool in_spin_loop;
  317. bool dy_eligible;
  318. } spin_loop;
  319. #endif
  320. bool preempted;
  321. bool ready;
  322. struct kvm_vcpu_arch arch;
  323. struct kvm_vcpu_stat stat;
  324. char stats_id[KVM_STATS_NAME_SIZE];
  325. struct kvm_dirty_ring dirty_ring;
  326. /*
  327. * The most recently used memslot by this vCPU and the slots generation
  328. * for which it is valid.
  329. * No wraparound protection is needed since generations won't overflow in
  330. * thousands of years, even assuming 1M memslot operations per second.
  331. */
  332. struct kvm_memory_slot *last_used_slot;
  333. u64 last_used_slot_gen;
  334. };
  335. /*
  336. * Start accounting time towards a guest.
  337. * Must be called before entering guest context.
  338. */
  339. static __always_inline void guest_timing_enter_irqoff(void)
  340. {
  341. /*
  342. * This is running in ioctl context so its safe to assume that it's the
  343. * stime pending cputime to flush.
  344. */
  345. instrumentation_begin();
  346. vtime_account_guest_enter();
  347. instrumentation_end();
  348. }
  349. /*
  350. * Enter guest context and enter an RCU extended quiescent state.
  351. *
  352. * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
  353. * unsafe to use any code which may directly or indirectly use RCU, tracing
  354. * (including IRQ flag tracing), or lockdep. All code in this period must be
  355. * non-instrumentable.
  356. */
  357. static __always_inline void guest_context_enter_irqoff(void)
  358. {
  359. /*
  360. * KVM does not hold any references to rcu protected data when it
  361. * switches CPU into a guest mode. In fact switching to a guest mode
  362. * is very similar to exiting to userspace from rcu point of view. In
  363. * addition CPU may stay in a guest mode for quite a long time (up to
  364. * one time slice). Lets treat guest mode as quiescent state, just like
  365. * we do with user-mode execution.
  366. */
  367. if (!context_tracking_guest_enter()) {
  368. instrumentation_begin();
  369. rcu_virt_note_context_switch(smp_processor_id());
  370. instrumentation_end();
  371. }
  372. }
  373. /*
  374. * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
  375. * guest_state_enter_irqoff().
  376. */
  377. static __always_inline void guest_enter_irqoff(void)
  378. {
  379. guest_timing_enter_irqoff();
  380. guest_context_enter_irqoff();
  381. }
  382. /**
  383. * guest_state_enter_irqoff - Fixup state when entering a guest
  384. *
  385. * Entry to a guest will enable interrupts, but the kernel state is interrupts
  386. * disabled when this is invoked. Also tell RCU about it.
  387. *
  388. * 1) Trace interrupts on state
  389. * 2) Invoke context tracking if enabled to adjust RCU state
  390. * 3) Tell lockdep that interrupts are enabled
  391. *
  392. * Invoked from architecture specific code before entering a guest.
  393. * Must be called with interrupts disabled and the caller must be
  394. * non-instrumentable.
  395. * The caller has to invoke guest_timing_enter_irqoff() before this.
  396. *
  397. * Note: this is analogous to exit_to_user_mode().
  398. */
  399. static __always_inline void guest_state_enter_irqoff(void)
  400. {
  401. instrumentation_begin();
  402. trace_hardirqs_on_prepare();
  403. lockdep_hardirqs_on_prepare();
  404. instrumentation_end();
  405. guest_context_enter_irqoff();
  406. lockdep_hardirqs_on(CALLER_ADDR0);
  407. }
  408. /*
  409. * Exit guest context and exit an RCU extended quiescent state.
  410. *
  411. * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
  412. * unsafe to use any code which may directly or indirectly use RCU, tracing
  413. * (including IRQ flag tracing), or lockdep. All code in this period must be
  414. * non-instrumentable.
  415. */
  416. static __always_inline void guest_context_exit_irqoff(void)
  417. {
  418. context_tracking_guest_exit();
  419. }
  420. /*
  421. * Stop accounting time towards a guest.
  422. * Must be called after exiting guest context.
  423. */
  424. static __always_inline void guest_timing_exit_irqoff(void)
  425. {
  426. instrumentation_begin();
  427. /* Flush the guest cputime we spent on the guest */
  428. vtime_account_guest_exit();
  429. instrumentation_end();
  430. }
  431. /*
  432. * Deprecated. Architectures should move to guest_state_exit_irqoff() and
  433. * guest_timing_exit_irqoff().
  434. */
  435. static __always_inline void guest_exit_irqoff(void)
  436. {
  437. guest_context_exit_irqoff();
  438. guest_timing_exit_irqoff();
  439. }
  440. static inline void guest_exit(void)
  441. {
  442. unsigned long flags;
  443. local_irq_save(flags);
  444. guest_exit_irqoff();
  445. local_irq_restore(flags);
  446. }
  447. /**
  448. * guest_state_exit_irqoff - Establish state when returning from guest mode
  449. *
  450. * Entry from a guest disables interrupts, but guest mode is traced as
  451. * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
  452. *
  453. * 1) Tell lockdep that interrupts are disabled
  454. * 2) Invoke context tracking if enabled to reactivate RCU
  455. * 3) Trace interrupts off state
  456. *
  457. * Invoked from architecture specific code after exiting a guest.
  458. * Must be invoked with interrupts disabled and the caller must be
  459. * non-instrumentable.
  460. * The caller has to invoke guest_timing_exit_irqoff() after this.
  461. *
  462. * Note: this is analogous to enter_from_user_mode().
  463. */
  464. static __always_inline void guest_state_exit_irqoff(void)
  465. {
  466. lockdep_hardirqs_off(CALLER_ADDR0);
  467. guest_context_exit_irqoff();
  468. instrumentation_begin();
  469. trace_hardirqs_off_finish();
  470. instrumentation_end();
  471. }
  472. static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
  473. {
  474. /*
  475. * The memory barrier ensures a previous write to vcpu->requests cannot
  476. * be reordered with the read of vcpu->mode. It pairs with the general
  477. * memory barrier following the write of vcpu->mode in VCPU RUN.
  478. */
  479. smp_mb__before_atomic();
  480. return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
  481. }
  482. /*
  483. * Some of the bitops functions do not support too long bitmaps.
  484. * This number must be determined not to exceed such limits.
  485. */
  486. #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
  487. /*
  488. * Since at idle each memslot belongs to two memslot sets it has to contain
  489. * two embedded nodes for each data structure that it forms a part of.
  490. *
  491. * Two memslot sets (one active and one inactive) are necessary so the VM
  492. * continues to run on one memslot set while the other is being modified.
  493. *
  494. * These two memslot sets normally point to the same set of memslots.
  495. * They can, however, be desynchronized when performing a memslot management
  496. * operation by replacing the memslot to be modified by its copy.
  497. * After the operation is complete, both memslot sets once again point to
  498. * the same, common set of memslot data.
  499. *
  500. * The memslots themselves are independent of each other so they can be
  501. * individually added or deleted.
  502. */
  503. struct kvm_memory_slot {
  504. struct hlist_node id_node[2];
  505. struct interval_tree_node hva_node[2];
  506. struct rb_node gfn_node[2];
  507. gfn_t base_gfn;
  508. unsigned long npages;
  509. unsigned long *dirty_bitmap;
  510. struct kvm_arch_memory_slot arch;
  511. unsigned long userspace_addr;
  512. u32 flags;
  513. short id;
  514. u16 as_id;
  515. };
  516. static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
  517. {
  518. return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
  519. }
  520. static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
  521. {
  522. return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  523. }
  524. static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
  525. {
  526. unsigned long len = kvm_dirty_bitmap_bytes(memslot);
  527. return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
  528. }
  529. #ifndef KVM_DIRTY_LOG_MANUAL_CAPS
  530. #define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
  531. #endif
  532. struct kvm_s390_adapter_int {
  533. u64 ind_addr;
  534. u64 summary_addr;
  535. u64 ind_offset;
  536. u32 summary_offset;
  537. u32 adapter_id;
  538. };
  539. struct kvm_hv_sint {
  540. u32 vcpu;
  541. u32 sint;
  542. };
  543. struct kvm_xen_evtchn {
  544. u32 port;
  545. u32 vcpu_id;
  546. int vcpu_idx;
  547. u32 priority;
  548. };
  549. struct kvm_kernel_irq_routing_entry {
  550. u32 gsi;
  551. u32 type;
  552. int (*set)(struct kvm_kernel_irq_routing_entry *e,
  553. struct kvm *kvm, int irq_source_id, int level,
  554. bool line_status);
  555. union {
  556. struct {
  557. unsigned irqchip;
  558. unsigned pin;
  559. } irqchip;
  560. struct {
  561. u32 address_lo;
  562. u32 address_hi;
  563. u32 data;
  564. u32 flags;
  565. u32 devid;
  566. } msi;
  567. struct kvm_s390_adapter_int adapter;
  568. struct kvm_hv_sint hv_sint;
  569. struct kvm_xen_evtchn xen_evtchn;
  570. };
  571. struct hlist_node link;
  572. };
  573. #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
  574. struct kvm_irq_routing_table {
  575. int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
  576. u32 nr_rt_entries;
  577. /*
  578. * Array indexed by gsi. Each entry contains list of irq chips
  579. * the gsi is connected to.
  580. */
  581. struct hlist_head map[];
  582. };
  583. #endif
  584. #ifndef KVM_INTERNAL_MEM_SLOTS
  585. #define KVM_INTERNAL_MEM_SLOTS 0
  586. #endif
  587. #define KVM_MEM_SLOTS_NUM SHRT_MAX
  588. #define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
  589. #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
  590. static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
  591. {
  592. return 0;
  593. }
  594. #endif
  595. struct kvm_memslots {
  596. u64 generation;
  597. atomic_long_t last_used_slot;
  598. struct rb_root_cached hva_tree;
  599. struct rb_root gfn_tree;
  600. /*
  601. * The mapping table from slot id to memslot.
  602. *
  603. * 7-bit bucket count matches the size of the old id to index array for
  604. * 512 slots, while giving good performance with this slot count.
  605. * Higher bucket counts bring only small performance improvements but
  606. * always result in higher memory usage (even for lower memslot counts).
  607. */
  608. DECLARE_HASHTABLE(id_hash, 7);
  609. int node_idx;
  610. };
  611. struct kvm {
  612. #ifdef KVM_HAVE_MMU_RWLOCK
  613. rwlock_t mmu_lock;
  614. #else
  615. spinlock_t mmu_lock;
  616. #endif /* KVM_HAVE_MMU_RWLOCK */
  617. struct mutex slots_lock;
  618. /*
  619. * Protects the arch-specific fields of struct kvm_memory_slots in
  620. * use by the VM. To be used under the slots_lock (above) or in a
  621. * kvm->srcu critical section where acquiring the slots_lock would
  622. * lead to deadlock with the synchronize_srcu in
  623. * install_new_memslots.
  624. */
  625. struct mutex slots_arch_lock;
  626. struct mm_struct *mm; /* userspace tied to this vm */
  627. unsigned long nr_memslot_pages;
  628. /* The two memslot sets - active and inactive (per address space) */
  629. struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
  630. /* The current active memslot set for each address space */
  631. struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
  632. struct xarray vcpu_array;
  633. /* Used to wait for completion of MMU notifiers. */
  634. spinlock_t mn_invalidate_lock;
  635. unsigned long mn_active_invalidate_count;
  636. struct rcuwait mn_memslots_update_rcuwait;
  637. /* For management / invalidation of gfn_to_pfn_caches */
  638. spinlock_t gpc_lock;
  639. struct list_head gpc_list;
  640. /*
  641. * created_vcpus is protected by kvm->lock, and is incremented
  642. * at the beginning of KVM_CREATE_VCPU. online_vcpus is only
  643. * incremented after storing the kvm_vcpu pointer in vcpus,
  644. * and is accessed atomically.
  645. */
  646. atomic_t online_vcpus;
  647. int max_vcpus;
  648. int created_vcpus;
  649. int last_boosted_vcpu;
  650. struct list_head vm_list;
  651. struct mutex lock;
  652. struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
  653. #ifdef CONFIG_HAVE_KVM_EVENTFD
  654. struct {
  655. spinlock_t lock;
  656. struct list_head items;
  657. struct list_head resampler_list;
  658. struct mutex resampler_lock;
  659. } irqfds;
  660. struct list_head ioeventfds;
  661. #endif
  662. struct kvm_vm_stat stat;
  663. struct kvm_arch arch;
  664. refcount_t users_count;
  665. #ifdef CONFIG_KVM_MMIO
  666. struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
  667. spinlock_t ring_lock;
  668. struct list_head coalesced_zones;
  669. #endif
  670. struct mutex irq_lock;
  671. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  672. /*
  673. * Update side is protected by irq_lock.
  674. */
  675. struct kvm_irq_routing_table __rcu *irq_routing;
  676. #endif
  677. #ifdef CONFIG_HAVE_KVM_IRQFD
  678. struct hlist_head irq_ack_notifier_list;
  679. #endif
  680. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  681. struct mmu_notifier mmu_notifier;
  682. unsigned long mmu_invalidate_seq;
  683. long mmu_invalidate_in_progress;
  684. unsigned long mmu_invalidate_range_start;
  685. unsigned long mmu_invalidate_range_end;
  686. #endif
  687. struct list_head devices;
  688. u64 manual_dirty_log_protect;
  689. struct dentry *debugfs_dentry;
  690. struct kvm_stat_data **debugfs_stat_data;
  691. struct srcu_struct srcu;
  692. struct srcu_struct irq_srcu;
  693. pid_t userspace_pid;
  694. bool override_halt_poll_ns;
  695. unsigned int max_halt_poll_ns;
  696. u32 dirty_ring_size;
  697. bool vm_bugged;
  698. bool vm_dead;
  699. #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
  700. struct notifier_block pm_notifier;
  701. #endif
  702. char stats_id[KVM_STATS_NAME_SIZE];
  703. };
  704. #define kvm_err(fmt, ...) \
  705. pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
  706. #define kvm_info(fmt, ...) \
  707. pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
  708. #define kvm_debug(fmt, ...) \
  709. pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
  710. #define kvm_debug_ratelimited(fmt, ...) \
  711. pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
  712. ## __VA_ARGS__)
  713. #define kvm_pr_unimpl(fmt, ...) \
  714. pr_err_ratelimited("kvm [%i]: " fmt, \
  715. task_tgid_nr(current), ## __VA_ARGS__)
  716. /* The guest did something we don't support. */
  717. #define vcpu_unimpl(vcpu, fmt, ...) \
  718. kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \
  719. (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
  720. #define vcpu_debug(vcpu, fmt, ...) \
  721. kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
  722. #define vcpu_debug_ratelimited(vcpu, fmt, ...) \
  723. kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
  724. ## __VA_ARGS__)
  725. #define vcpu_err(vcpu, fmt, ...) \
  726. kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
  727. static inline void kvm_vm_dead(struct kvm *kvm)
  728. {
  729. kvm->vm_dead = true;
  730. kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
  731. }
  732. static inline void kvm_vm_bugged(struct kvm *kvm)
  733. {
  734. kvm->vm_bugged = true;
  735. kvm_vm_dead(kvm);
  736. }
  737. #define KVM_BUG(cond, kvm, fmt...) \
  738. ({ \
  739. int __ret = (cond); \
  740. \
  741. if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
  742. kvm_vm_bugged(kvm); \
  743. unlikely(__ret); \
  744. })
  745. #define KVM_BUG_ON(cond, kvm) \
  746. ({ \
  747. int __ret = (cond); \
  748. \
  749. if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
  750. kvm_vm_bugged(kvm); \
  751. unlikely(__ret); \
  752. })
  753. static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
  754. {
  755. #ifdef CONFIG_PROVE_RCU
  756. WARN_ONCE(vcpu->srcu_depth++,
  757. "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
  758. #endif
  759. vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  760. }
  761. static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
  762. {
  763. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
  764. #ifdef CONFIG_PROVE_RCU
  765. WARN_ONCE(--vcpu->srcu_depth,
  766. "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
  767. #endif
  768. }
  769. static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
  770. {
  771. return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
  772. }
  773. static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
  774. {
  775. return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
  776. lockdep_is_held(&kvm->slots_lock) ||
  777. !refcount_read(&kvm->users_count));
  778. }
  779. static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
  780. {
  781. int num_vcpus = atomic_read(&kvm->online_vcpus);
  782. i = array_index_nospec(i, num_vcpus);
  783. /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
  784. smp_rmb();
  785. return xa_load(&kvm->vcpu_array, i);
  786. }
  787. #define kvm_for_each_vcpu(idx, vcpup, kvm) \
  788. xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
  789. (atomic_read(&kvm->online_vcpus) - 1))
  790. static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
  791. {
  792. struct kvm_vcpu *vcpu = NULL;
  793. unsigned long i;
  794. if (id < 0)
  795. return NULL;
  796. if (id < KVM_MAX_VCPUS)
  797. vcpu = kvm_get_vcpu(kvm, id);
  798. if (vcpu && vcpu->vcpu_id == id)
  799. return vcpu;
  800. kvm_for_each_vcpu(i, vcpu, kvm)
  801. if (vcpu->vcpu_id == id)
  802. return vcpu;
  803. return NULL;
  804. }
  805. void kvm_destroy_vcpus(struct kvm *kvm);
  806. void vcpu_load(struct kvm_vcpu *vcpu);
  807. void vcpu_put(struct kvm_vcpu *vcpu);
  808. #ifdef __KVM_HAVE_IOAPIC
  809. void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
  810. void kvm_arch_post_irq_routing_update(struct kvm *kvm);
  811. #else
  812. static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
  813. {
  814. }
  815. static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
  816. {
  817. }
  818. #endif
  819. #ifdef CONFIG_HAVE_KVM_IRQFD
  820. int kvm_irqfd_init(void);
  821. void kvm_irqfd_exit(void);
  822. #else
  823. static inline int kvm_irqfd_init(void)
  824. {
  825. return 0;
  826. }
  827. static inline void kvm_irqfd_exit(void)
  828. {
  829. }
  830. #endif
  831. int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
  832. struct module *module);
  833. void kvm_exit(void);
  834. void kvm_get_kvm(struct kvm *kvm);
  835. bool kvm_get_kvm_safe(struct kvm *kvm);
  836. void kvm_put_kvm(struct kvm *kvm);
  837. bool file_is_kvm(struct file *file);
  838. void kvm_put_kvm_no_destroy(struct kvm *kvm);
  839. static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
  840. {
  841. as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
  842. return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
  843. lockdep_is_held(&kvm->slots_lock) ||
  844. !refcount_read(&kvm->users_count));
  845. }
  846. static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
  847. {
  848. return __kvm_memslots(kvm, 0);
  849. }
  850. static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
  851. {
  852. int as_id = kvm_arch_vcpu_memslots_id(vcpu);
  853. return __kvm_memslots(vcpu->kvm, as_id);
  854. }
  855. static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
  856. {
  857. return RB_EMPTY_ROOT(&slots->gfn_tree);
  858. }
  859. #define kvm_for_each_memslot(memslot, bkt, slots) \
  860. hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
  861. if (WARN_ON_ONCE(!memslot->npages)) { \
  862. } else
  863. static inline
  864. struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
  865. {
  866. struct kvm_memory_slot *slot;
  867. int idx = slots->node_idx;
  868. hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
  869. if (slot->id == id)
  870. return slot;
  871. }
  872. return NULL;
  873. }
  874. /* Iterator used for walking memslots that overlap a gfn range. */
  875. struct kvm_memslot_iter {
  876. struct kvm_memslots *slots;
  877. struct rb_node *node;
  878. struct kvm_memory_slot *slot;
  879. };
  880. static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
  881. {
  882. iter->node = rb_next(iter->node);
  883. if (!iter->node)
  884. return;
  885. iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
  886. }
  887. static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
  888. struct kvm_memslots *slots,
  889. gfn_t start)
  890. {
  891. int idx = slots->node_idx;
  892. struct rb_node *tmp;
  893. struct kvm_memory_slot *slot;
  894. iter->slots = slots;
  895. /*
  896. * Find the so called "upper bound" of a key - the first node that has
  897. * its key strictly greater than the searched one (the start gfn in our case).
  898. */
  899. iter->node = NULL;
  900. for (tmp = slots->gfn_tree.rb_node; tmp; ) {
  901. slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
  902. if (start < slot->base_gfn) {
  903. iter->node = tmp;
  904. tmp = tmp->rb_left;
  905. } else {
  906. tmp = tmp->rb_right;
  907. }
  908. }
  909. /*
  910. * Find the slot with the lowest gfn that can possibly intersect with
  911. * the range, so we'll ideally have slot start <= range start
  912. */
  913. if (iter->node) {
  914. /*
  915. * A NULL previous node means that the very first slot
  916. * already has a higher start gfn.
  917. * In this case slot start > range start.
  918. */
  919. tmp = rb_prev(iter->node);
  920. if (tmp)
  921. iter->node = tmp;
  922. } else {
  923. /* a NULL node below means no slots */
  924. iter->node = rb_last(&slots->gfn_tree);
  925. }
  926. if (iter->node) {
  927. iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
  928. /*
  929. * It is possible in the slot start < range start case that the
  930. * found slot ends before or at range start (slot end <= range start)
  931. * and so it does not overlap the requested range.
  932. *
  933. * In such non-overlapping case the next slot (if it exists) will
  934. * already have slot start > range start, otherwise the logic above
  935. * would have found it instead of the current slot.
  936. */
  937. if (iter->slot->base_gfn + iter->slot->npages <= start)
  938. kvm_memslot_iter_next(iter);
  939. }
  940. }
  941. static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
  942. {
  943. if (!iter->node)
  944. return false;
  945. /*
  946. * If this slot starts beyond or at the end of the range so does
  947. * every next one
  948. */
  949. return iter->slot->base_gfn < end;
  950. }
  951. /* Iterate over each memslot at least partially intersecting [start, end) range */
  952. #define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
  953. for (kvm_memslot_iter_start(iter, slots, start); \
  954. kvm_memslot_iter_is_valid(iter, end); \
  955. kvm_memslot_iter_next(iter))
  956. /*
  957. * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
  958. * - create a new memory slot
  959. * - delete an existing memory slot
  960. * - modify an existing memory slot
  961. * -- move it in the guest physical memory space
  962. * -- just change its flags
  963. *
  964. * Since flags can be changed by some of these operations, the following
  965. * differentiation is the best we can do for __kvm_set_memory_region():
  966. */
  967. enum kvm_mr_change {
  968. KVM_MR_CREATE,
  969. KVM_MR_DELETE,
  970. KVM_MR_MOVE,
  971. KVM_MR_FLAGS_ONLY,
  972. };
  973. int kvm_set_memory_region(struct kvm *kvm,
  974. const struct kvm_userspace_memory_region *mem);
  975. int __kvm_set_memory_region(struct kvm *kvm,
  976. const struct kvm_userspace_memory_region *mem);
  977. void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
  978. void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
  979. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  980. const struct kvm_memory_slot *old,
  981. struct kvm_memory_slot *new,
  982. enum kvm_mr_change change);
  983. void kvm_arch_commit_memory_region(struct kvm *kvm,
  984. struct kvm_memory_slot *old,
  985. const struct kvm_memory_slot *new,
  986. enum kvm_mr_change change);
  987. /* flush all memory translations */
  988. void kvm_arch_flush_shadow_all(struct kvm *kvm);
  989. /* flush memory translations pointing to 'slot' */
  990. void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
  991. struct kvm_memory_slot *slot);
  992. int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
  993. struct page **pages, int nr_pages);
  994. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
  995. unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
  996. unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
  997. unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
  998. unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
  999. bool *writable);
  1000. void kvm_release_page_clean(struct page *page);
  1001. void kvm_release_page_dirty(struct page *page);
  1002. kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
  1003. kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
  1004. bool *writable);
  1005. kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
  1006. kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
  1007. kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
  1008. bool atomic, bool *async, bool write_fault,
  1009. bool *writable, hva_t *hva);
  1010. void kvm_release_pfn_clean(kvm_pfn_t pfn);
  1011. void kvm_release_pfn_dirty(kvm_pfn_t pfn);
  1012. void kvm_set_pfn_dirty(kvm_pfn_t pfn);
  1013. void kvm_set_pfn_accessed(kvm_pfn_t pfn);
  1014. void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
  1015. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  1016. int len);
  1017. int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
  1018. int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1019. void *data, unsigned long len);
  1020. int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1021. void *data, unsigned int offset,
  1022. unsigned long len);
  1023. int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
  1024. int offset, int len);
  1025. int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
  1026. unsigned long len);
  1027. int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1028. void *data, unsigned long len);
  1029. int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1030. void *data, unsigned int offset,
  1031. unsigned long len);
  1032. int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1033. gpa_t gpa, unsigned long len);
  1034. #define __kvm_get_guest(kvm, gfn, offset, v) \
  1035. ({ \
  1036. unsigned long __addr = gfn_to_hva(kvm, gfn); \
  1037. typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
  1038. int __ret = -EFAULT; \
  1039. \
  1040. if (!kvm_is_error_hva(__addr)) \
  1041. __ret = get_user(v, __uaddr); \
  1042. __ret; \
  1043. })
  1044. #define kvm_get_guest(kvm, gpa, v) \
  1045. ({ \
  1046. gpa_t __gpa = gpa; \
  1047. struct kvm *__kvm = kvm; \
  1048. \
  1049. __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
  1050. offset_in_page(__gpa), v); \
  1051. })
  1052. #define __kvm_put_guest(kvm, gfn, offset, v) \
  1053. ({ \
  1054. unsigned long __addr = gfn_to_hva(kvm, gfn); \
  1055. typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
  1056. int __ret = -EFAULT; \
  1057. \
  1058. if (!kvm_is_error_hva(__addr)) \
  1059. __ret = put_user(v, __uaddr); \
  1060. if (!__ret) \
  1061. mark_page_dirty(kvm, gfn); \
  1062. __ret; \
  1063. })
  1064. #define kvm_put_guest(kvm, gpa, v) \
  1065. ({ \
  1066. gpa_t __gpa = gpa; \
  1067. struct kvm *__kvm = kvm; \
  1068. \
  1069. __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
  1070. offset_in_page(__gpa), v); \
  1071. })
  1072. int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
  1073. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
  1074. bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
  1075. bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
  1076. unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
  1077. void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
  1078. void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
  1079. struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
  1080. struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
  1081. kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
  1082. kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
  1083. int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
  1084. void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
  1085. unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
  1086. unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
  1087. int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
  1088. int len);
  1089. int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
  1090. unsigned long len);
  1091. int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
  1092. unsigned long len);
  1093. int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
  1094. int offset, int len);
  1095. int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
  1096. unsigned long len);
  1097. void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
  1098. /**
  1099. * kvm_gpc_init - initialize gfn_to_pfn_cache.
  1100. *
  1101. * @gpc: struct gfn_to_pfn_cache object.
  1102. *
  1103. * This sets up a gfn_to_pfn_cache by initializing locks. Note, the cache must
  1104. * be zero-allocated (or zeroed by the caller before init).
  1105. */
  1106. void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
  1107. /**
  1108. * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
  1109. * physical address.
  1110. *
  1111. * @kvm: pointer to kvm instance.
  1112. * @gpc: struct gfn_to_pfn_cache object.
  1113. * @vcpu: vCPU to be used for marking pages dirty and to be woken on
  1114. * invalidation.
  1115. * @usage: indicates if the resulting host physical PFN is used while
  1116. * the @vcpu is IN_GUEST_MODE (in which case invalidation of
  1117. * the cache from MMU notifiers---but not for KVM memslot
  1118. * changes!---will also force @vcpu to exit the guest and
  1119. * refresh the cache); and/or if the PFN used directly
  1120. * by KVM (and thus needs a kernel virtual mapping).
  1121. * @gpa: guest physical address to map.
  1122. * @len: sanity check; the range being access must fit a single page.
  1123. *
  1124. * @return: 0 for success.
  1125. * -EINVAL for a mapping which would cross a page boundary.
  1126. * -EFAULT for an untranslatable guest physical address.
  1127. *
  1128. * This primes a gfn_to_pfn_cache and links it into the @kvm's list for
  1129. * invalidations to be processed. Callers are required to use
  1130. * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before
  1131. * accessing the target page.
  1132. */
  1133. int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
  1134. struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
  1135. gpa_t gpa, unsigned long len);
  1136. /**
  1137. * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
  1138. *
  1139. * @kvm: pointer to kvm instance.
  1140. * @gpc: struct gfn_to_pfn_cache object.
  1141. * @gpa: current guest physical address to map.
  1142. * @len: sanity check; the range being access must fit a single page.
  1143. *
  1144. * @return: %true if the cache is still valid and the address matches.
  1145. * %false if the cache is not valid.
  1146. *
  1147. * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
  1148. * while calling this function, and then continue to hold the lock until the
  1149. * access is complete.
  1150. *
  1151. * Callers in IN_GUEST_MODE may do so without locking, although they should
  1152. * still hold a read lock on kvm->scru for the memslot checks.
  1153. */
  1154. bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
  1155. gpa_t gpa, unsigned long len);
  1156. /**
  1157. * kvm_gfn_to_pfn_cache_refresh - update a previously initialized cache.
  1158. *
  1159. * @kvm: pointer to kvm instance.
  1160. * @gpc: struct gfn_to_pfn_cache object.
  1161. * @gpa: updated guest physical address to map.
  1162. * @len: sanity check; the range being access must fit a single page.
  1163. *
  1164. * @return: 0 for success.
  1165. * -EINVAL for a mapping which would cross a page boundary.
  1166. * -EFAULT for an untranslatable guest physical address.
  1167. *
  1168. * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
  1169. * returm from this function does not mean the page can be immediately
  1170. * accessed because it may have raced with an invalidation. Callers must
  1171. * still lock and check the cache status, as this function does not return
  1172. * with the lock still held to permit access.
  1173. */
  1174. int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
  1175. gpa_t gpa, unsigned long len);
  1176. /**
  1177. * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache.
  1178. *
  1179. * @kvm: pointer to kvm instance.
  1180. * @gpc: struct gfn_to_pfn_cache object.
  1181. *
  1182. * This unmaps the referenced page. The cache is left in the invalid state
  1183. * but at least the mapping from GPA to userspace HVA will remain cached
  1184. * and can be reused on a subsequent refresh.
  1185. */
  1186. void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
  1187. /**
  1188. * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
  1189. *
  1190. * @kvm: pointer to kvm instance.
  1191. * @gpc: struct gfn_to_pfn_cache object.
  1192. *
  1193. * This removes a cache from the @kvm's list to be processed on MMU notifier
  1194. * invocation.
  1195. */
  1196. void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
  1197. void kvm_sigset_activate(struct kvm_vcpu *vcpu);
  1198. void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
  1199. void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
  1200. bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
  1201. void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
  1202. void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
  1203. bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
  1204. void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
  1205. int kvm_vcpu_yield_to(struct kvm_vcpu *target);
  1206. void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
  1207. void kvm_flush_remote_tlbs(struct kvm *kvm);
  1208. #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
  1209. int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
  1210. int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
  1211. int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
  1212. void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
  1213. void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
  1214. #endif
  1215. void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
  1216. unsigned long end);
  1217. void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
  1218. unsigned long end);
  1219. long kvm_arch_dev_ioctl(struct file *filp,
  1220. unsigned int ioctl, unsigned long arg);
  1221. long kvm_arch_vcpu_ioctl(struct file *filp,
  1222. unsigned int ioctl, unsigned long arg);
  1223. vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
  1224. int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
  1225. void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
  1226. struct kvm_memory_slot *slot,
  1227. gfn_t gfn_offset,
  1228. unsigned long mask);
  1229. void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
  1230. #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
  1231. void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
  1232. const struct kvm_memory_slot *memslot);
  1233. #else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
  1234. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
  1235. int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
  1236. int *is_dirty, struct kvm_memory_slot **memslot);
  1237. #endif
  1238. int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
  1239. bool line_status);
  1240. int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
  1241. struct kvm_enable_cap *cap);
  1242. long kvm_arch_vm_ioctl(struct file *filp,
  1243. unsigned int ioctl, unsigned long arg);
  1244. long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
  1245. unsigned long arg);
  1246. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
  1247. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
  1248. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  1249. struct kvm_translation *tr);
  1250. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
  1251. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
  1252. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  1253. struct kvm_sregs *sregs);
  1254. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  1255. struct kvm_sregs *sregs);
  1256. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  1257. struct kvm_mp_state *mp_state);
  1258. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  1259. struct kvm_mp_state *mp_state);
  1260. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  1261. struct kvm_guest_debug *dbg);
  1262. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
  1263. int kvm_arch_init(void *opaque);
  1264. void kvm_arch_exit(void);
  1265. void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
  1266. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
  1267. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
  1268. int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
  1269. int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
  1270. void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
  1271. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
  1272. #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
  1273. int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
  1274. #endif
  1275. #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
  1276. void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
  1277. #else
  1278. static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
  1279. #endif
  1280. int kvm_arch_hardware_enable(void);
  1281. void kvm_arch_hardware_disable(void);
  1282. int kvm_arch_hardware_setup(void *opaque);
  1283. void kvm_arch_hardware_unsetup(void);
  1284. int kvm_arch_check_processor_compat(void *opaque);
  1285. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
  1286. bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
  1287. int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
  1288. bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
  1289. bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
  1290. int kvm_arch_post_init_vm(struct kvm *kvm);
  1291. void kvm_arch_pre_destroy_vm(struct kvm *kvm);
  1292. int kvm_arch_create_vm_debugfs(struct kvm *kvm);
  1293. #ifndef __KVM_HAVE_ARCH_VM_ALLOC
  1294. /*
  1295. * All architectures that want to use vzalloc currently also
  1296. * need their own kvm_arch_alloc_vm implementation.
  1297. */
  1298. static inline struct kvm *kvm_arch_alloc_vm(void)
  1299. {
  1300. return kzalloc(sizeof(struct kvm), GFP_KERNEL);
  1301. }
  1302. #endif
  1303. static inline void __kvm_arch_free_vm(struct kvm *kvm)
  1304. {
  1305. kvfree(kvm);
  1306. }
  1307. #ifndef __KVM_HAVE_ARCH_VM_FREE
  1308. static inline void kvm_arch_free_vm(struct kvm *kvm)
  1309. {
  1310. __kvm_arch_free_vm(kvm);
  1311. }
  1312. #endif
  1313. #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
  1314. static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
  1315. {
  1316. return -ENOTSUPP;
  1317. }
  1318. #endif
  1319. #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
  1320. void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
  1321. void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
  1322. bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
  1323. #else
  1324. static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
  1325. {
  1326. }
  1327. static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
  1328. {
  1329. }
  1330. static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
  1331. {
  1332. return false;
  1333. }
  1334. #endif
  1335. #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
  1336. void kvm_arch_start_assignment(struct kvm *kvm);
  1337. void kvm_arch_end_assignment(struct kvm *kvm);
  1338. bool kvm_arch_has_assigned_device(struct kvm *kvm);
  1339. #else
  1340. static inline void kvm_arch_start_assignment(struct kvm *kvm)
  1341. {
  1342. }
  1343. static inline void kvm_arch_end_assignment(struct kvm *kvm)
  1344. {
  1345. }
  1346. static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
  1347. {
  1348. return false;
  1349. }
  1350. #endif
  1351. static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
  1352. {
  1353. #ifdef __KVM_HAVE_ARCH_WQP
  1354. return vcpu->arch.waitp;
  1355. #else
  1356. return &vcpu->wait;
  1357. #endif
  1358. }
  1359. /*
  1360. * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns
  1361. * true if the vCPU was blocking and was awakened, false otherwise.
  1362. */
  1363. static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
  1364. {
  1365. return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
  1366. }
  1367. static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
  1368. {
  1369. return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
  1370. }
  1371. #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
  1372. /*
  1373. * returns true if the virtual interrupt controller is initialized and
  1374. * ready to accept virtual IRQ. On some architectures the virtual interrupt
  1375. * controller is dynamically instantiated and this is not always true.
  1376. */
  1377. bool kvm_arch_intc_initialized(struct kvm *kvm);
  1378. #else
  1379. static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
  1380. {
  1381. return true;
  1382. }
  1383. #endif
  1384. #ifdef CONFIG_GUEST_PERF_EVENTS
  1385. unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
  1386. void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
  1387. void kvm_unregister_perf_callbacks(void);
  1388. #else
  1389. static inline void kvm_register_perf_callbacks(void *ign) {}
  1390. static inline void kvm_unregister_perf_callbacks(void) {}
  1391. #endif /* CONFIG_GUEST_PERF_EVENTS */
  1392. int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
  1393. void kvm_arch_destroy_vm(struct kvm *kvm);
  1394. void kvm_arch_sync_events(struct kvm *kvm);
  1395. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
  1396. struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn);
  1397. bool kvm_is_zone_device_page(struct page *page);
  1398. struct kvm_irq_ack_notifier {
  1399. struct hlist_node link;
  1400. unsigned gsi;
  1401. void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
  1402. };
  1403. int kvm_irq_map_gsi(struct kvm *kvm,
  1404. struct kvm_kernel_irq_routing_entry *entries, int gsi);
  1405. int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
  1406. int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
  1407. bool line_status);
  1408. int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
  1409. int irq_source_id, int level, bool line_status);
  1410. int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
  1411. struct kvm *kvm, int irq_source_id,
  1412. int level, bool line_status);
  1413. bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
  1414. void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
  1415. void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
  1416. void kvm_register_irq_ack_notifier(struct kvm *kvm,
  1417. struct kvm_irq_ack_notifier *kian);
  1418. void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
  1419. struct kvm_irq_ack_notifier *kian);
  1420. int kvm_request_irq_source_id(struct kvm *kvm);
  1421. void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
  1422. bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
  1423. /*
  1424. * Returns a pointer to the memslot if it contains gfn.
  1425. * Otherwise returns NULL.
  1426. */
  1427. static inline struct kvm_memory_slot *
  1428. try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
  1429. {
  1430. if (!slot)
  1431. return NULL;
  1432. if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
  1433. return slot;
  1434. else
  1435. return NULL;
  1436. }
  1437. /*
  1438. * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
  1439. *
  1440. * With "approx" set returns the memslot also when the address falls
  1441. * in a hole. In that case one of the memslots bordering the hole is
  1442. * returned.
  1443. */
  1444. static inline struct kvm_memory_slot *
  1445. search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
  1446. {
  1447. struct kvm_memory_slot *slot;
  1448. struct rb_node *node;
  1449. int idx = slots->node_idx;
  1450. slot = NULL;
  1451. for (node = slots->gfn_tree.rb_node; node; ) {
  1452. slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
  1453. if (gfn >= slot->base_gfn) {
  1454. if (gfn < slot->base_gfn + slot->npages)
  1455. return slot;
  1456. node = node->rb_right;
  1457. } else
  1458. node = node->rb_left;
  1459. }
  1460. return approx ? slot : NULL;
  1461. }
  1462. static inline struct kvm_memory_slot *
  1463. ____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
  1464. {
  1465. struct kvm_memory_slot *slot;
  1466. slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
  1467. slot = try_get_memslot(slot, gfn);
  1468. if (slot)
  1469. return slot;
  1470. slot = search_memslots(slots, gfn, approx);
  1471. if (slot) {
  1472. atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
  1473. return slot;
  1474. }
  1475. return NULL;
  1476. }
  1477. /*
  1478. * __gfn_to_memslot() and its descendants are here to allow arch code to inline
  1479. * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline
  1480. * because that would bloat other code too much.
  1481. */
  1482. static inline struct kvm_memory_slot *
  1483. __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
  1484. {
  1485. return ____gfn_to_memslot(slots, gfn, false);
  1486. }
  1487. static inline unsigned long
  1488. __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
  1489. {
  1490. /*
  1491. * The index was checked originally in search_memslots. To avoid
  1492. * that a malicious guest builds a Spectre gadget out of e.g. page
  1493. * table walks, do not let the processor speculate loads outside
  1494. * the guest's registered memslots.
  1495. */
  1496. unsigned long offset = gfn - slot->base_gfn;
  1497. offset = array_index_nospec(offset, slot->npages);
  1498. return slot->userspace_addr + offset * PAGE_SIZE;
  1499. }
  1500. static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
  1501. {
  1502. return gfn_to_memslot(kvm, gfn)->id;
  1503. }
  1504. static inline gfn_t
  1505. hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
  1506. {
  1507. gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
  1508. return slot->base_gfn + gfn_offset;
  1509. }
  1510. static inline gpa_t gfn_to_gpa(gfn_t gfn)
  1511. {
  1512. return (gpa_t)gfn << PAGE_SHIFT;
  1513. }
  1514. static inline gfn_t gpa_to_gfn(gpa_t gpa)
  1515. {
  1516. return (gfn_t)(gpa >> PAGE_SHIFT);
  1517. }
  1518. static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
  1519. {
  1520. return (hpa_t)pfn << PAGE_SHIFT;
  1521. }
  1522. static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
  1523. {
  1524. unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
  1525. return kvm_is_error_hva(hva);
  1526. }
  1527. enum kvm_stat_kind {
  1528. KVM_STAT_VM,
  1529. KVM_STAT_VCPU,
  1530. };
  1531. struct kvm_stat_data {
  1532. struct kvm *kvm;
  1533. const struct _kvm_stats_desc *desc;
  1534. enum kvm_stat_kind kind;
  1535. };
  1536. struct _kvm_stats_desc {
  1537. struct kvm_stats_desc desc;
  1538. char name[KVM_STATS_NAME_SIZE];
  1539. };
  1540. #define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
  1541. .flags = type | unit | base | \
  1542. BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
  1543. BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
  1544. BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
  1545. .exponent = exp, \
  1546. .size = sz, \
  1547. .bucket_size = bsz
  1548. #define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
  1549. { \
  1550. { \
  1551. STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
  1552. .offset = offsetof(struct kvm_vm_stat, generic.stat) \
  1553. }, \
  1554. .name = #stat, \
  1555. }
  1556. #define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
  1557. { \
  1558. { \
  1559. STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
  1560. .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
  1561. }, \
  1562. .name = #stat, \
  1563. }
  1564. #define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
  1565. { \
  1566. { \
  1567. STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
  1568. .offset = offsetof(struct kvm_vm_stat, stat) \
  1569. }, \
  1570. .name = #stat, \
  1571. }
  1572. #define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
  1573. { \
  1574. { \
  1575. STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
  1576. .offset = offsetof(struct kvm_vcpu_stat, stat) \
  1577. }, \
  1578. .name = #stat, \
  1579. }
  1580. /* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
  1581. #define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
  1582. SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
  1583. #define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
  1584. STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \
  1585. unit, base, exponent, 1, 0)
  1586. #define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
  1587. STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \
  1588. unit, base, exponent, 1, 0)
  1589. #define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
  1590. STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \
  1591. unit, base, exponent, 1, 0)
  1592. #define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \
  1593. STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \
  1594. unit, base, exponent, sz, bsz)
  1595. #define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \
  1596. STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \
  1597. unit, base, exponent, sz, 0)
  1598. /* Cumulative counter, read/write */
  1599. #define STATS_DESC_COUNTER(SCOPE, name) \
  1600. STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
  1601. KVM_STATS_BASE_POW10, 0)
  1602. /* Instantaneous counter, read only */
  1603. #define STATS_DESC_ICOUNTER(SCOPE, name) \
  1604. STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
  1605. KVM_STATS_BASE_POW10, 0)
  1606. /* Peak counter, read/write */
  1607. #define STATS_DESC_PCOUNTER(SCOPE, name) \
  1608. STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
  1609. KVM_STATS_BASE_POW10, 0)
  1610. /* Instantaneous boolean value, read only */
  1611. #define STATS_DESC_IBOOLEAN(SCOPE, name) \
  1612. STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
  1613. KVM_STATS_BASE_POW10, 0)
  1614. /* Peak (sticky) boolean value, read/write */
  1615. #define STATS_DESC_PBOOLEAN(SCOPE, name) \
  1616. STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
  1617. KVM_STATS_BASE_POW10, 0)
  1618. /* Cumulative time in nanosecond */
  1619. #define STATS_DESC_TIME_NSEC(SCOPE, name) \
  1620. STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
  1621. KVM_STATS_BASE_POW10, -9)
  1622. /* Linear histogram for time in nanosecond */
  1623. #define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \
  1624. STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
  1625. KVM_STATS_BASE_POW10, -9, sz, bsz)
  1626. /* Logarithmic histogram for time in nanosecond */
  1627. #define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \
  1628. STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
  1629. KVM_STATS_BASE_POW10, -9, sz)
  1630. #define KVM_GENERIC_VM_STATS() \
  1631. STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \
  1632. STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
  1633. #define KVM_GENERIC_VCPU_STATS() \
  1634. STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
  1635. STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
  1636. STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
  1637. STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
  1638. STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
  1639. STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \
  1640. STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \
  1641. STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \
  1642. HALT_POLL_HIST_COUNT), \
  1643. STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
  1644. HALT_POLL_HIST_COUNT), \
  1645. STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
  1646. HALT_POLL_HIST_COUNT), \
  1647. STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
  1648. extern struct dentry *kvm_debugfs_dir;
  1649. ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
  1650. const struct _kvm_stats_desc *desc,
  1651. void *stats, size_t size_stats,
  1652. char __user *user_buffer, size_t size, loff_t *offset);
  1653. /**
  1654. * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
  1655. * statistics data.
  1656. *
  1657. * @data: start address of the stats data
  1658. * @size: the number of bucket of the stats data
  1659. * @value: the new value used to update the linear histogram's bucket
  1660. * @bucket_size: the size (width) of a bucket
  1661. */
  1662. static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
  1663. u64 value, size_t bucket_size)
  1664. {
  1665. size_t index = div64_u64(value, bucket_size);
  1666. index = min(index, size - 1);
  1667. ++data[index];
  1668. }
  1669. /**
  1670. * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
  1671. * statistics data.
  1672. *
  1673. * @data: start address of the stats data
  1674. * @size: the number of bucket of the stats data
  1675. * @value: the new value used to update the logarithmic histogram's bucket
  1676. */
  1677. static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
  1678. {
  1679. size_t index = fls64(value);
  1680. index = min(index, size - 1);
  1681. ++data[index];
  1682. }
  1683. #define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \
  1684. kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
  1685. #define KVM_STATS_LOG_HIST_UPDATE(array, value) \
  1686. kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
  1687. extern const struct kvm_stats_header kvm_vm_stats_header;
  1688. extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
  1689. extern const struct kvm_stats_header kvm_vcpu_stats_header;
  1690. extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
  1691. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  1692. static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
  1693. {
  1694. if (unlikely(kvm->mmu_invalidate_in_progress))
  1695. return 1;
  1696. /*
  1697. * Ensure the read of mmu_invalidate_in_progress happens before
  1698. * the read of mmu_invalidate_seq. This interacts with the
  1699. * smp_wmb() in mmu_notifier_invalidate_range_end to make sure
  1700. * that the caller either sees the old (non-zero) value of
  1701. * mmu_invalidate_in_progress or the new (incremented) value of
  1702. * mmu_invalidate_seq.
  1703. *
  1704. * PowerPC Book3s HV KVM calls this under a per-page lock rather
  1705. * than under kvm->mmu_lock, for scalability, so can't rely on
  1706. * kvm->mmu_lock to keep things ordered.
  1707. */
  1708. smp_rmb();
  1709. if (kvm->mmu_invalidate_seq != mmu_seq)
  1710. return 1;
  1711. return 0;
  1712. }
  1713. static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
  1714. unsigned long mmu_seq,
  1715. unsigned long hva)
  1716. {
  1717. lockdep_assert_held(&kvm->mmu_lock);
  1718. /*
  1719. * If mmu_invalidate_in_progress is non-zero, then the range maintained
  1720. * by kvm_mmu_notifier_invalidate_range_start contains all addresses
  1721. * that might be being invalidated. Note that it may include some false
  1722. * positives, due to shortcuts when handing concurrent invalidations.
  1723. */
  1724. if (unlikely(kvm->mmu_invalidate_in_progress) &&
  1725. hva >= kvm->mmu_invalidate_range_start &&
  1726. hva < kvm->mmu_invalidate_range_end)
  1727. return 1;
  1728. if (kvm->mmu_invalidate_seq != mmu_seq)
  1729. return 1;
  1730. return 0;
  1731. }
  1732. #endif
  1733. #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
  1734. #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
  1735. bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
  1736. int kvm_set_irq_routing(struct kvm *kvm,
  1737. const struct kvm_irq_routing_entry *entries,
  1738. unsigned nr,
  1739. unsigned flags);
  1740. int kvm_set_routing_entry(struct kvm *kvm,
  1741. struct kvm_kernel_irq_routing_entry *e,
  1742. const struct kvm_irq_routing_entry *ue);
  1743. void kvm_free_irq_routing(struct kvm *kvm);
  1744. #else
  1745. static inline void kvm_free_irq_routing(struct kvm *kvm) {}
  1746. #endif
  1747. int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
  1748. #ifdef CONFIG_HAVE_KVM_EVENTFD
  1749. void kvm_eventfd_init(struct kvm *kvm);
  1750. int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
  1751. #ifdef CONFIG_HAVE_KVM_IRQFD
  1752. int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
  1753. void kvm_irqfd_release(struct kvm *kvm);
  1754. void kvm_irq_routing_update(struct kvm *);
  1755. #else
  1756. static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
  1757. {
  1758. return -EINVAL;
  1759. }
  1760. static inline void kvm_irqfd_release(struct kvm *kvm) {}
  1761. #endif
  1762. #else
  1763. static inline void kvm_eventfd_init(struct kvm *kvm) {}
  1764. static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
  1765. {
  1766. return -EINVAL;
  1767. }
  1768. static inline void kvm_irqfd_release(struct kvm *kvm) {}
  1769. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  1770. static inline void kvm_irq_routing_update(struct kvm *kvm)
  1771. {
  1772. }
  1773. #endif
  1774. static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
  1775. {
  1776. return -ENOSYS;
  1777. }
  1778. #endif /* CONFIG_HAVE_KVM_EVENTFD */
  1779. void kvm_arch_irq_routing_update(struct kvm *kvm);
  1780. static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
  1781. {
  1782. /*
  1783. * Ensure the rest of the request is published to kvm_check_request's
  1784. * caller. Paired with the smp_mb__after_atomic in kvm_check_request.
  1785. */
  1786. smp_wmb();
  1787. set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
  1788. }
  1789. static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
  1790. {
  1791. /*
  1792. * Request that don't require vCPU action should never be logged in
  1793. * vcpu->requests. The vCPU won't clear the request, so it will stay
  1794. * logged indefinitely and prevent the vCPU from entering the guest.
  1795. */
  1796. BUILD_BUG_ON(!__builtin_constant_p(req) ||
  1797. (req & KVM_REQUEST_NO_ACTION));
  1798. __kvm_make_request(req, vcpu);
  1799. }
  1800. static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
  1801. {
  1802. return READ_ONCE(vcpu->requests);
  1803. }
  1804. static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
  1805. {
  1806. return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
  1807. }
  1808. static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
  1809. {
  1810. clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
  1811. }
  1812. static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
  1813. {
  1814. if (kvm_test_request(req, vcpu)) {
  1815. kvm_clear_request(req, vcpu);
  1816. /*
  1817. * Ensure the rest of the request is visible to kvm_check_request's
  1818. * caller. Paired with the smp_wmb in kvm_make_request.
  1819. */
  1820. smp_mb__after_atomic();
  1821. return true;
  1822. } else {
  1823. return false;
  1824. }
  1825. }
  1826. extern bool kvm_rebooting;
  1827. extern unsigned int halt_poll_ns;
  1828. extern unsigned int halt_poll_ns_grow;
  1829. extern unsigned int halt_poll_ns_grow_start;
  1830. extern unsigned int halt_poll_ns_shrink;
  1831. struct kvm_device {
  1832. const struct kvm_device_ops *ops;
  1833. struct kvm *kvm;
  1834. void *private;
  1835. struct list_head vm_node;
  1836. };
  1837. /* create, destroy, and name are mandatory */
  1838. struct kvm_device_ops {
  1839. const char *name;
  1840. /*
  1841. * create is called holding kvm->lock and any operations not suitable
  1842. * to do while holding the lock should be deferred to init (see
  1843. * below).
  1844. */
  1845. int (*create)(struct kvm_device *dev, u32 type);
  1846. /*
  1847. * init is called after create if create is successful and is called
  1848. * outside of holding kvm->lock.
  1849. */
  1850. void (*init)(struct kvm_device *dev);
  1851. /*
  1852. * Destroy is responsible for freeing dev.
  1853. *
  1854. * Destroy may be called before or after destructors are called
  1855. * on emulated I/O regions, depending on whether a reference is
  1856. * held by a vcpu or other kvm component that gets destroyed
  1857. * after the emulated I/O.
  1858. */
  1859. void (*destroy)(struct kvm_device *dev);
  1860. /*
  1861. * Release is an alternative method to free the device. It is
  1862. * called when the device file descriptor is closed. Once
  1863. * release is called, the destroy method will not be called
  1864. * anymore as the device is removed from the device list of
  1865. * the VM. kvm->lock is held.
  1866. */
  1867. void (*release)(struct kvm_device *dev);
  1868. int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
  1869. int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
  1870. int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
  1871. long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
  1872. unsigned long arg);
  1873. int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
  1874. };
  1875. void kvm_device_get(struct kvm_device *dev);
  1876. void kvm_device_put(struct kvm_device *dev);
  1877. struct kvm_device *kvm_device_from_filp(struct file *filp);
  1878. int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
  1879. void kvm_unregister_device_ops(u32 type);
  1880. extern struct kvm_device_ops kvm_mpic_ops;
  1881. extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
  1882. extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
  1883. #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
  1884. static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
  1885. {
  1886. vcpu->spin_loop.in_spin_loop = val;
  1887. }
  1888. static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
  1889. {
  1890. vcpu->spin_loop.dy_eligible = val;
  1891. }
  1892. #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
  1893. static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
  1894. {
  1895. }
  1896. static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
  1897. {
  1898. }
  1899. #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
  1900. static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
  1901. {
  1902. return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
  1903. !(memslot->flags & KVM_MEMSLOT_INVALID));
  1904. }
  1905. struct kvm_vcpu *kvm_get_running_vcpu(void);
  1906. struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
  1907. #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
  1908. bool kvm_arch_has_irq_bypass(void);
  1909. int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
  1910. struct irq_bypass_producer *);
  1911. void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
  1912. struct irq_bypass_producer *);
  1913. void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
  1914. void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
  1915. int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
  1916. uint32_t guest_irq, bool set);
  1917. bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
  1918. struct kvm_kernel_irq_routing_entry *);
  1919. #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
  1920. #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
  1921. /* If we wakeup during the poll time, was it a sucessful poll? */
  1922. static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
  1923. {
  1924. return vcpu->valid_wakeup;
  1925. }
  1926. #else
  1927. static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
  1928. {
  1929. return true;
  1930. }
  1931. #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
  1932. #ifdef CONFIG_HAVE_KVM_NO_POLL
  1933. /* Callback that tells if we must not poll */
  1934. bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
  1935. #else
  1936. static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
  1937. {
  1938. return false;
  1939. }
  1940. #endif /* CONFIG_HAVE_KVM_NO_POLL */
  1941. #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
  1942. long kvm_arch_vcpu_async_ioctl(struct file *filp,
  1943. unsigned int ioctl, unsigned long arg);
  1944. #else
  1945. static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
  1946. unsigned int ioctl,
  1947. unsigned long arg)
  1948. {
  1949. return -ENOIOCTLCMD;
  1950. }
  1951. #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
  1952. void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
  1953. unsigned long start, unsigned long end);
  1954. void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
  1955. #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
  1956. int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
  1957. #else
  1958. static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
  1959. {
  1960. return 0;
  1961. }
  1962. #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
  1963. typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
  1964. int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
  1965. uintptr_t data, const char *name,
  1966. struct task_struct **thread_ptr);
  1967. #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
  1968. static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
  1969. {
  1970. vcpu->run->exit_reason = KVM_EXIT_INTR;
  1971. vcpu->stat.signal_exits++;
  1972. }
  1973. #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
  1974. /*
  1975. * If more than one page is being (un)accounted, @virt must be the address of
  1976. * the first page of a block of pages what were allocated together (i.e
  1977. * accounted together).
  1978. *
  1979. * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
  1980. * is thread-safe.
  1981. */
  1982. static inline void kvm_account_pgtable_pages(void *virt, int nr)
  1983. {
  1984. mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
  1985. }
  1986. /*
  1987. * This defines how many reserved entries we want to keep before we
  1988. * kick the vcpu to the userspace to avoid dirty ring full. This
  1989. * value can be tuned to higher if e.g. PML is enabled on the host.
  1990. */
  1991. #define KVM_DIRTY_RING_RSVD_ENTRIES 64
  1992. /* Max number of entries allowed for each kvm dirty ring */
  1993. #define KVM_DIRTY_RING_MAX_ENTRIES 65536
  1994. #endif