paravirt.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_PARAVIRT_H
  3. #define _ASM_X86_PARAVIRT_H
  4. /* Various instructions on x86 need to be replaced for
  5. * para-virtualization: those hooks are defined here. */
  6. #ifdef CONFIG_PARAVIRT
  7. #include <asm/pgtable_types.h>
  8. #include <asm/asm.h>
  9. #include <asm/nospec-branch.h>
  10. #include <asm/paravirt_types.h>
  11. #ifndef __ASSEMBLY__
  12. #include <linux/bug.h>
  13. #include <linux/types.h>
  14. #include <linux/cpumask.h>
  15. #include <linux/static_call_types.h>
  16. #include <asm/frame.h>
  17. u64 dummy_steal_clock(int cpu);
  18. u64 dummy_sched_clock(void);
  19. DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
  20. DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
  21. void paravirt_set_sched_clock(u64 (*func)(void));
  22. static inline u64 paravirt_sched_clock(void)
  23. {
  24. return static_call(pv_sched_clock)();
  25. }
  26. struct static_key;
  27. extern struct static_key paravirt_steal_enabled;
  28. extern struct static_key paravirt_steal_rq_enabled;
  29. __visible void __native_queued_spin_unlock(struct qspinlock *lock);
  30. bool pv_is_native_spin_unlock(void);
  31. __visible bool __native_vcpu_is_preempted(long cpu);
  32. bool pv_is_native_vcpu_is_preempted(void);
  33. static inline u64 paravirt_steal_clock(int cpu)
  34. {
  35. return static_call(pv_steal_clock)(cpu);
  36. }
  37. #ifdef CONFIG_PARAVIRT_SPINLOCKS
  38. void __init paravirt_set_cap(void);
  39. #endif
  40. /* The paravirtualized I/O functions */
  41. static inline void slow_down_io(void)
  42. {
  43. PVOP_VCALL0(cpu.io_delay);
  44. #ifdef REALLY_SLOW_IO
  45. PVOP_VCALL0(cpu.io_delay);
  46. PVOP_VCALL0(cpu.io_delay);
  47. PVOP_VCALL0(cpu.io_delay);
  48. #endif
  49. }
  50. void native_flush_tlb_local(void);
  51. void native_flush_tlb_global(void);
  52. void native_flush_tlb_one_user(unsigned long addr);
  53. void native_flush_tlb_multi(const struct cpumask *cpumask,
  54. const struct flush_tlb_info *info);
  55. static inline void __flush_tlb_local(void)
  56. {
  57. PVOP_VCALL0(mmu.flush_tlb_user);
  58. }
  59. static inline void __flush_tlb_global(void)
  60. {
  61. PVOP_VCALL0(mmu.flush_tlb_kernel);
  62. }
  63. static inline void __flush_tlb_one_user(unsigned long addr)
  64. {
  65. PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
  66. }
  67. static inline void __flush_tlb_multi(const struct cpumask *cpumask,
  68. const struct flush_tlb_info *info)
  69. {
  70. PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info);
  71. }
  72. static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
  73. {
  74. PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
  75. }
  76. static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
  77. {
  78. PVOP_VCALL1(mmu.exit_mmap, mm);
  79. }
  80. static inline void notify_page_enc_status_changed(unsigned long pfn,
  81. int npages, bool enc)
  82. {
  83. PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
  84. }
  85. #ifdef CONFIG_PARAVIRT_XXL
  86. static inline void load_sp0(unsigned long sp0)
  87. {
  88. PVOP_VCALL1(cpu.load_sp0, sp0);
  89. }
  90. /* The paravirtualized CPUID instruction. */
  91. static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  92. unsigned int *ecx, unsigned int *edx)
  93. {
  94. PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
  95. }
  96. /*
  97. * These special macros can be used to get or set a debugging register
  98. */
  99. static __always_inline unsigned long paravirt_get_debugreg(int reg)
  100. {
  101. return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
  102. }
  103. #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
  104. static __always_inline void set_debugreg(unsigned long val, int reg)
  105. {
  106. PVOP_VCALL2(cpu.set_debugreg, reg, val);
  107. }
  108. static inline unsigned long read_cr0(void)
  109. {
  110. return PVOP_CALL0(unsigned long, cpu.read_cr0);
  111. }
  112. static inline void write_cr0(unsigned long x)
  113. {
  114. PVOP_VCALL1(cpu.write_cr0, x);
  115. }
  116. static __always_inline unsigned long read_cr2(void)
  117. {
  118. return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
  119. "mov %%cr2, %%rax;",
  120. ALT_NOT(X86_FEATURE_XENPV));
  121. }
  122. static __always_inline void write_cr2(unsigned long x)
  123. {
  124. PVOP_VCALL1(mmu.write_cr2, x);
  125. }
  126. static inline unsigned long __read_cr3(void)
  127. {
  128. return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3,
  129. "mov %%cr3, %%rax;", ALT_NOT(X86_FEATURE_XENPV));
  130. }
  131. static inline void write_cr3(unsigned long x)
  132. {
  133. PVOP_ALT_VCALL1(mmu.write_cr3, x,
  134. "mov %%rdi, %%cr3", ALT_NOT(X86_FEATURE_XENPV));
  135. }
  136. static inline void __write_cr4(unsigned long x)
  137. {
  138. PVOP_VCALL1(cpu.write_cr4, x);
  139. }
  140. static inline void arch_safe_halt(void)
  141. {
  142. PVOP_VCALL0(irq.safe_halt);
  143. }
  144. static inline void halt(void)
  145. {
  146. PVOP_VCALL0(irq.halt);
  147. }
  148. static inline void wbinvd(void)
  149. {
  150. PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV));
  151. }
  152. static inline u64 paravirt_read_msr(unsigned msr)
  153. {
  154. return PVOP_CALL1(u64, cpu.read_msr, msr);
  155. }
  156. static inline void paravirt_write_msr(unsigned msr,
  157. unsigned low, unsigned high)
  158. {
  159. PVOP_VCALL3(cpu.write_msr, msr, low, high);
  160. }
  161. static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
  162. {
  163. return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
  164. }
  165. static inline int paravirt_write_msr_safe(unsigned msr,
  166. unsigned low, unsigned high)
  167. {
  168. return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
  169. }
  170. #define rdmsr(msr, val1, val2) \
  171. do { \
  172. u64 _l = paravirt_read_msr(msr); \
  173. val1 = (u32)_l; \
  174. val2 = _l >> 32; \
  175. } while (0)
  176. #define wrmsr(msr, val1, val2) \
  177. do { \
  178. paravirt_write_msr(msr, val1, val2); \
  179. } while (0)
  180. #define rdmsrl(msr, val) \
  181. do { \
  182. val = paravirt_read_msr(msr); \
  183. } while (0)
  184. static inline void wrmsrl(unsigned msr, u64 val)
  185. {
  186. wrmsr(msr, (u32)val, (u32)(val>>32));
  187. }
  188. #define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
  189. /* rdmsr with exception handling */
  190. #define rdmsr_safe(msr, a, b) \
  191. ({ \
  192. int _err; \
  193. u64 _l = paravirt_read_msr_safe(msr, &_err); \
  194. (*a) = (u32)_l; \
  195. (*b) = _l >> 32; \
  196. _err; \
  197. })
  198. static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
  199. {
  200. int err;
  201. *p = paravirt_read_msr_safe(msr, &err);
  202. return err;
  203. }
  204. static inline unsigned long long paravirt_read_pmc(int counter)
  205. {
  206. return PVOP_CALL1(u64, cpu.read_pmc, counter);
  207. }
  208. #define rdpmc(counter, low, high) \
  209. do { \
  210. u64 _l = paravirt_read_pmc(counter); \
  211. low = (u32)_l; \
  212. high = _l >> 32; \
  213. } while (0)
  214. #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
  215. static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
  216. {
  217. PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
  218. }
  219. static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
  220. {
  221. PVOP_VCALL2(cpu.free_ldt, ldt, entries);
  222. }
  223. static inline void load_TR_desc(void)
  224. {
  225. PVOP_VCALL0(cpu.load_tr_desc);
  226. }
  227. static inline void load_gdt(const struct desc_ptr *dtr)
  228. {
  229. PVOP_VCALL1(cpu.load_gdt, dtr);
  230. }
  231. static inline void load_idt(const struct desc_ptr *dtr)
  232. {
  233. PVOP_VCALL1(cpu.load_idt, dtr);
  234. }
  235. static inline void set_ldt(const void *addr, unsigned entries)
  236. {
  237. PVOP_VCALL2(cpu.set_ldt, addr, entries);
  238. }
  239. static inline unsigned long paravirt_store_tr(void)
  240. {
  241. return PVOP_CALL0(unsigned long, cpu.store_tr);
  242. }
  243. #define store_tr(tr) ((tr) = paravirt_store_tr())
  244. static inline void load_TLS(struct thread_struct *t, unsigned cpu)
  245. {
  246. PVOP_VCALL2(cpu.load_tls, t, cpu);
  247. }
  248. static inline void load_gs_index(unsigned int gs)
  249. {
  250. PVOP_VCALL1(cpu.load_gs_index, gs);
  251. }
  252. static inline void write_ldt_entry(struct desc_struct *dt, int entry,
  253. const void *desc)
  254. {
  255. PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
  256. }
  257. static inline void write_gdt_entry(struct desc_struct *dt, int entry,
  258. void *desc, int type)
  259. {
  260. PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
  261. }
  262. static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
  263. {
  264. PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
  265. }
  266. #ifdef CONFIG_X86_IOPL_IOPERM
  267. static inline void tss_invalidate_io_bitmap(void)
  268. {
  269. PVOP_VCALL0(cpu.invalidate_io_bitmap);
  270. }
  271. static inline void tss_update_io_bitmap(void)
  272. {
  273. PVOP_VCALL0(cpu.update_io_bitmap);
  274. }
  275. #endif
  276. static inline void paravirt_activate_mm(struct mm_struct *prev,
  277. struct mm_struct *next)
  278. {
  279. PVOP_VCALL2(mmu.activate_mm, prev, next);
  280. }
  281. static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
  282. struct mm_struct *mm)
  283. {
  284. PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
  285. }
  286. static inline int paravirt_pgd_alloc(struct mm_struct *mm)
  287. {
  288. return PVOP_CALL1(int, mmu.pgd_alloc, mm);
  289. }
  290. static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
  291. {
  292. PVOP_VCALL2(mmu.pgd_free, mm, pgd);
  293. }
  294. static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
  295. {
  296. PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
  297. }
  298. static inline void paravirt_release_pte(unsigned long pfn)
  299. {
  300. PVOP_VCALL1(mmu.release_pte, pfn);
  301. }
  302. static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
  303. {
  304. PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
  305. }
  306. static inline void paravirt_release_pmd(unsigned long pfn)
  307. {
  308. PVOP_VCALL1(mmu.release_pmd, pfn);
  309. }
  310. static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
  311. {
  312. PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
  313. }
  314. static inline void paravirt_release_pud(unsigned long pfn)
  315. {
  316. PVOP_VCALL1(mmu.release_pud, pfn);
  317. }
  318. static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
  319. {
  320. PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
  321. }
  322. static inline void paravirt_release_p4d(unsigned long pfn)
  323. {
  324. PVOP_VCALL1(mmu.release_p4d, pfn);
  325. }
  326. static inline pte_t __pte(pteval_t val)
  327. {
  328. return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val,
  329. "mov %%rdi, %%rax",
  330. ALT_NOT(X86_FEATURE_XENPV)) };
  331. }
  332. static inline pteval_t pte_val(pte_t pte)
  333. {
  334. return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte,
  335. "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
  336. }
  337. static inline pgd_t __pgd(pgdval_t val)
  338. {
  339. return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val,
  340. "mov %%rdi, %%rax",
  341. ALT_NOT(X86_FEATURE_XENPV)) };
  342. }
  343. static inline pgdval_t pgd_val(pgd_t pgd)
  344. {
  345. return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd,
  346. "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
  347. }
  348. #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
  349. static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
  350. pte_t *ptep)
  351. {
  352. pteval_t ret;
  353. ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
  354. return (pte_t) { .pte = ret };
  355. }
  356. static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
  357. pte_t *ptep, pte_t old_pte, pte_t pte)
  358. {
  359. PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
  360. }
  361. static inline void set_pte(pte_t *ptep, pte_t pte)
  362. {
  363. PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
  364. }
  365. static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
  366. {
  367. PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
  368. }
  369. static inline pmd_t __pmd(pmdval_t val)
  370. {
  371. return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val,
  372. "mov %%rdi, %%rax",
  373. ALT_NOT(X86_FEATURE_XENPV)) };
  374. }
  375. static inline pmdval_t pmd_val(pmd_t pmd)
  376. {
  377. return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
  378. "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
  379. }
  380. static inline void set_pud(pud_t *pudp, pud_t pud)
  381. {
  382. PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
  383. }
  384. static inline pud_t __pud(pudval_t val)
  385. {
  386. pudval_t ret;
  387. ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val,
  388. "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
  389. return (pud_t) { ret };
  390. }
  391. static inline pudval_t pud_val(pud_t pud)
  392. {
  393. return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud,
  394. "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
  395. }
  396. static inline void pud_clear(pud_t *pudp)
  397. {
  398. set_pud(pudp, native_make_pud(0));
  399. }
  400. static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
  401. {
  402. p4dval_t val = native_p4d_val(p4d);
  403. PVOP_VCALL2(mmu.set_p4d, p4dp, val);
  404. }
  405. #if CONFIG_PGTABLE_LEVELS >= 5
  406. static inline p4d_t __p4d(p4dval_t val)
  407. {
  408. p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
  409. "mov %%rdi, %%rax",
  410. ALT_NOT(X86_FEATURE_XENPV));
  411. return (p4d_t) { ret };
  412. }
  413. static inline p4dval_t p4d_val(p4d_t p4d)
  414. {
  415. return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d,
  416. "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
  417. }
  418. static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
  419. {
  420. PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
  421. }
  422. #define set_pgd(pgdp, pgdval) do { \
  423. if (pgtable_l5_enabled()) \
  424. __set_pgd(pgdp, pgdval); \
  425. else \
  426. set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \
  427. } while (0)
  428. #define pgd_clear(pgdp) do { \
  429. if (pgtable_l5_enabled()) \
  430. set_pgd(pgdp, native_make_pgd(0)); \
  431. } while (0)
  432. #endif /* CONFIG_PGTABLE_LEVELS == 5 */
  433. static inline void p4d_clear(p4d_t *p4dp)
  434. {
  435. set_p4d(p4dp, native_make_p4d(0));
  436. }
  437. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  438. {
  439. set_pte(ptep, pte);
  440. }
  441. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  442. pte_t *ptep)
  443. {
  444. set_pte(ptep, native_make_pte(0));
  445. }
  446. static inline void pmd_clear(pmd_t *pmdp)
  447. {
  448. set_pmd(pmdp, native_make_pmd(0));
  449. }
  450. #define __HAVE_ARCH_START_CONTEXT_SWITCH
  451. static inline void arch_start_context_switch(struct task_struct *prev)
  452. {
  453. PVOP_VCALL1(cpu.start_context_switch, prev);
  454. }
  455. static inline void arch_end_context_switch(struct task_struct *next)
  456. {
  457. PVOP_VCALL1(cpu.end_context_switch, next);
  458. }
  459. #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  460. static inline void arch_enter_lazy_mmu_mode(void)
  461. {
  462. PVOP_VCALL0(mmu.lazy_mode.enter);
  463. }
  464. static inline void arch_leave_lazy_mmu_mode(void)
  465. {
  466. PVOP_VCALL0(mmu.lazy_mode.leave);
  467. }
  468. static inline void arch_flush_lazy_mmu_mode(void)
  469. {
  470. PVOP_VCALL0(mmu.lazy_mode.flush);
  471. }
  472. static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
  473. phys_addr_t phys, pgprot_t flags)
  474. {
  475. pv_ops.mmu.set_fixmap(idx, phys, flags);
  476. }
  477. #endif
  478. #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
  479. static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
  480. u32 val)
  481. {
  482. PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
  483. }
  484. static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
  485. {
  486. PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock,
  487. "movb $0, (%%" _ASM_ARG1 ");",
  488. ALT_NOT(X86_FEATURE_PVUNLOCK));
  489. }
  490. static __always_inline void pv_wait(u8 *ptr, u8 val)
  491. {
  492. PVOP_VCALL2(lock.wait, ptr, val);
  493. }
  494. static __always_inline void pv_kick(int cpu)
  495. {
  496. PVOP_VCALL1(lock.kick, cpu);
  497. }
  498. static __always_inline bool pv_vcpu_is_preempted(long cpu)
  499. {
  500. return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu,
  501. "xor %%" _ASM_AX ", %%" _ASM_AX ";",
  502. ALT_NOT(X86_FEATURE_VCPUPREEMPT));
  503. }
  504. void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
  505. bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
  506. #endif /* SMP && PARAVIRT_SPINLOCKS */
  507. #ifdef CONFIG_X86_32
  508. /* save and restore all caller-save registers, except return value */
  509. #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
  510. #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
  511. #else
  512. /* save and restore all caller-save registers, except return value */
  513. #define PV_SAVE_ALL_CALLER_REGS \
  514. "push %rcx;" \
  515. "push %rdx;" \
  516. "push %rsi;" \
  517. "push %rdi;" \
  518. "push %r8;" \
  519. "push %r9;" \
  520. "push %r10;" \
  521. "push %r11;"
  522. #define PV_RESTORE_ALL_CALLER_REGS \
  523. "pop %r11;" \
  524. "pop %r10;" \
  525. "pop %r9;" \
  526. "pop %r8;" \
  527. "pop %rdi;" \
  528. "pop %rsi;" \
  529. "pop %rdx;" \
  530. "pop %rcx;"
  531. #endif
  532. /*
  533. * Generate a thunk around a function which saves all caller-save
  534. * registers except for the return value. This allows C functions to
  535. * be called from assembler code where fewer than normal registers are
  536. * available. It may also help code generation around calls from C
  537. * code if the common case doesn't use many registers.
  538. *
  539. * When a callee is wrapped in a thunk, the caller can assume that all
  540. * arg regs and all scratch registers are preserved across the
  541. * call. The return value in rax/eax will not be saved, even for void
  542. * functions.
  543. */
  544. #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
  545. #define __PV_CALLEE_SAVE_REGS_THUNK(func, section) \
  546. extern typeof(func) __raw_callee_save_##func; \
  547. \
  548. asm(".pushsection " section ", \"ax\";" \
  549. ".globl " PV_THUNK_NAME(func) ";" \
  550. ".type " PV_THUNK_NAME(func) ", @function;" \
  551. PV_THUNK_NAME(func) ":" \
  552. ASM_ENDBR \
  553. FRAME_BEGIN \
  554. PV_SAVE_ALL_CALLER_REGS \
  555. "call " #func ";" \
  556. PV_RESTORE_ALL_CALLER_REGS \
  557. FRAME_END \
  558. ASM_RET \
  559. ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
  560. ".popsection")
  561. #define PV_CALLEE_SAVE_REGS_THUNK(func) \
  562. __PV_CALLEE_SAVE_REGS_THUNK(func, ".text")
  563. /* Get a reference to a callee-save function */
  564. #define PV_CALLEE_SAVE(func) \
  565. ((struct paravirt_callee_save) { __raw_callee_save_##func })
  566. /* Promise that "func" already uses the right calling convention */
  567. #define __PV_IS_CALLEE_SAVE(func) \
  568. ((struct paravirt_callee_save) { func })
  569. #ifdef CONFIG_PARAVIRT_XXL
  570. static __always_inline unsigned long arch_local_save_flags(void)
  571. {
  572. return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
  573. ALT_NOT(X86_FEATURE_XENPV));
  574. }
  575. static __always_inline void arch_local_irq_disable(void)
  576. {
  577. PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT(X86_FEATURE_XENPV));
  578. }
  579. static __always_inline void arch_local_irq_enable(void)
  580. {
  581. PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT(X86_FEATURE_XENPV));
  582. }
  583. static __always_inline unsigned long arch_local_irq_save(void)
  584. {
  585. unsigned long f;
  586. f = arch_local_save_flags();
  587. arch_local_irq_disable();
  588. return f;
  589. }
  590. #endif
  591. /* Make sure as little as possible of this mess escapes. */
  592. #undef PARAVIRT_CALL
  593. #undef __PVOP_CALL
  594. #undef __PVOP_VCALL
  595. #undef PVOP_VCALL0
  596. #undef PVOP_CALL0
  597. #undef PVOP_VCALL1
  598. #undef PVOP_CALL1
  599. #undef PVOP_VCALL2
  600. #undef PVOP_CALL2
  601. #undef PVOP_VCALL3
  602. #undef PVOP_CALL3
  603. #undef PVOP_VCALL4
  604. #undef PVOP_CALL4
  605. extern void default_banner(void);
  606. #else /* __ASSEMBLY__ */
  607. #define _PVSITE(ptype, ops, word, algn) \
  608. 771:; \
  609. ops; \
  610. 772:; \
  611. .pushsection .parainstructions,"a"; \
  612. .align algn; \
  613. word 771b; \
  614. .byte ptype; \
  615. .byte 772b-771b; \
  616. _ASM_ALIGN; \
  617. .popsection
  618. #ifdef CONFIG_X86_64
  619. #ifdef CONFIG_PARAVIRT_XXL
  620. #define PARA_PATCH(off) ((off) / 8)
  621. #define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
  622. #define PARA_INDIRECT(addr) *addr(%rip)
  623. #ifdef CONFIG_DEBUG_ENTRY
  624. .macro PARA_IRQ_save_fl
  625. PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),
  626. ANNOTATE_RETPOLINE_SAFE;
  627. call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);)
  628. .endm
  629. #define SAVE_FLAGS ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \
  630. ALT_NOT(X86_FEATURE_XENPV)
  631. #endif
  632. #endif /* CONFIG_PARAVIRT_XXL */
  633. #endif /* CONFIG_X86_64 */
  634. #endif /* __ASSEMBLY__ */
  635. #else /* CONFIG_PARAVIRT */
  636. # define default_banner x86_init_noop
  637. #endif /* !CONFIG_PARAVIRT */
  638. #ifndef __ASSEMBLY__
  639. #ifndef CONFIG_PARAVIRT_XXL
  640. static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
  641. struct mm_struct *mm)
  642. {
  643. }
  644. #endif
  645. #ifndef CONFIG_PARAVIRT
  646. static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
  647. {
  648. }
  649. #endif
  650. #ifndef CONFIG_PARAVIRT_SPINLOCKS
  651. static inline void paravirt_set_cap(void)
  652. {
  653. }
  654. #endif
  655. #endif /* __ASSEMBLY__ */
  656. #endif /* _ASM_X86_PARAVIRT_H */