gzvm_drv.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (c) 2023 MediaTek Inc.
  4. */
  5. #ifndef __GZVM_DRV_H__
  6. #define __GZVM_DRV_H__
  7. #include <linux/eventfd.h>
  8. #include <linux/list.h>
  9. #include <linux/mm.h>
  10. #include <linux/mutex.h>
  11. #include <linux/gzvm.h>
  12. #include <linux/srcu.h>
  13. #include <linux/rbtree.h>
  14. /*
  15. * For the normal physical address, the highest 12 bits should be zero, so we
  16. * can mask bit 62 ~ bit 52 to indicate the error physical address
  17. */
  18. #define GZVM_PA_ERR_BAD (0x7ffULL << 52)
  19. #define GZVM_VCPU_MMAP_SIZE PAGE_SIZE
  20. #define INVALID_VM_ID 0xffff
  21. /*
  22. * These are the definitions of APIs between GenieZone hypervisor and driver,
  23. * there's no need to be visible to uapi. Furthermore, we need GenieZone
  24. * specific error code in order to map to Linux errno
  25. */
  26. #define NO_ERROR (0)
  27. #define ERR_NO_MEMORY (-5)
  28. #define ERR_INVALID_ARGS (-8)
  29. #define ERR_NOT_SUPPORTED (-24)
  30. #define ERR_NOT_IMPLEMENTED (-27)
  31. #define ERR_FAULT (-40)
  32. #define GZVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
  33. /*
  34. * The following data structures are for data transferring between driver and
  35. * hypervisor, and they're aligned with hypervisor definitions
  36. */
  37. #define GZVM_MAX_VCPUS 8
  38. #define GZVM_MAX_MEM_REGION 10
  39. #define GZVM_VCPU_RUN_MAP_SIZE (PAGE_SIZE * 2)
  40. #define GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE (2 * 1024 * 1024) /* 2MB */
  41. /* struct mem_region_addr_range - Identical to ffa memory constituent */
  42. struct mem_region_addr_range {
  43. /* the base IPA of the constituent memory region, aligned to 4 kiB */
  44. __u64 address;
  45. /* the number of 4 kiB pages in the constituent memory region. */
  46. __u32 pg_cnt;
  47. __u32 reserved;
  48. };
  49. struct gzvm_memory_region_ranges {
  50. __u32 slot;
  51. __u32 constituent_cnt;
  52. __u64 total_pages;
  53. __u64 gpa;
  54. struct mem_region_addr_range constituents[];
  55. };
  56. /* struct gzvm_memslot - VM's memory slot descriptor */
  57. struct gzvm_memslot {
  58. u64 base_gfn; /* begin of guest page frame */
  59. unsigned long npages; /* number of pages this slot covers */
  60. unsigned long userspace_addr; /* corresponding userspace va */
  61. struct vm_area_struct *vma; /* vma related to this userspace addr */
  62. u32 flags;
  63. u32 slot_id;
  64. };
  65. struct gzvm_vcpu {
  66. struct gzvm *gzvm;
  67. int vcpuid;
  68. /* lock of vcpu*/
  69. struct mutex lock;
  70. struct gzvm_vcpu_run *run;
  71. struct gzvm_vcpu_hwstate *hwstate;
  72. };
  73. struct gzvm_pinned_page {
  74. struct rb_node node;
  75. struct page *page;
  76. u64 ipa;
  77. };
  78. struct gzvm {
  79. struct gzvm_vcpu *vcpus[GZVM_MAX_VCPUS];
  80. /* userspace tied to this vm */
  81. struct mm_struct *mm;
  82. struct gzvm_memslot memslot[GZVM_MAX_MEM_REGION];
  83. /* lock for list_add*/
  84. struct mutex lock;
  85. struct {
  86. /* lock for irqfds list operation */
  87. spinlock_t lock;
  88. struct list_head items;
  89. struct list_head resampler_list;
  90. /* lock for irqfds resampler */
  91. struct mutex resampler_lock;
  92. } irqfds;
  93. struct list_head ioevents;
  94. struct list_head vm_list;
  95. u16 vm_id;
  96. struct hlist_head irq_ack_notifier_list;
  97. struct srcu_struct irq_srcu;
  98. /* lock for irq injection */
  99. struct mutex irq_lock;
  100. /*
  101. * demand page granularity: how much memory we allocate for VM in a
  102. * single page fault
  103. */
  104. u32 demand_page_gran;
  105. /* the mailbox for transferring large portion pages */
  106. u64 *demand_page_buffer;
  107. /*
  108. * lock for preventing multiple cpu using the same demand page mailbox
  109. * at the same time
  110. */
  111. struct mutex demand_paging_lock;
  112. /* Use rb-tree to record pin/unpin page */
  113. struct rb_root pinned_pages;
  114. };
  115. long gzvm_dev_ioctl_check_extension(struct gzvm *gzvm, unsigned long args);
  116. int gzvm_dev_ioctl_create_vm(unsigned long vm_type);
  117. int gzvm_err_to_errno(unsigned long err);
  118. void gzvm_destroy_all_vms(void);
  119. void gzvm_destroy_vcpus(struct gzvm *gzvm);
  120. /* arch-dependant functions */
  121. int gzvm_arch_probe(void);
  122. int gzvm_arch_set_memregion(u16 vm_id, size_t buf_size,
  123. phys_addr_t region);
  124. int gzvm_arch_check_extension(struct gzvm *gzvm, __u64 cap, void __user *argp);
  125. int gzvm_arch_create_vm(unsigned long vm_type);
  126. int gzvm_arch_destroy_vm(u16 vm_id);
  127. int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
  128. u64 nr_pages);
  129. int gzvm_arch_map_guest_block(u16 vm_id, int memslot_id, u64 gfn, u64 nr_pages);
  130. int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
  131. struct gzvm_enable_cap *cap,
  132. void __user *argp);
  133. u64 gzvm_hva_to_pa_arch(u64 hva);
  134. int gzvm_vm_ioctl_create_vcpu(struct gzvm *gzvm, u32 cpuid);
  135. int gzvm_arch_vcpu_update_one_reg(struct gzvm_vcpu *vcpu, __u64 reg_id,
  136. bool is_write, __u64 *data);
  137. int gzvm_arch_create_vcpu(u16 vm_id, int vcpuid, void *run);
  138. int gzvm_arch_vcpu_run(struct gzvm_vcpu *vcpu, __u64 *exit_reason);
  139. int gzvm_arch_destroy_vcpu(u16 vm_id, int vcpuid);
  140. int gzvm_arch_inform_exit(u16 vm_id);
  141. u64 gzvm_gfn_to_hva_memslot(struct gzvm_memslot *memslot, u64 gfn);
  142. u64 hva_to_pa_fast(u64 hva);
  143. u64 hva_to_pa_slow(u64 hva);
  144. int gzvm_gfn_to_pfn_memslot(struct gzvm_memslot *memslot, u64 gfn, u64 *pfn);
  145. int gzvm_find_memslot(struct gzvm *vm, u64 gpa);
  146. int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu);
  147. bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu);
  148. int gzvm_handle_relinquish(struct gzvm_vcpu *vcpu, phys_addr_t ipa);
  149. bool gzvm_handle_guest_hvc(struct gzvm_vcpu *vcpu);
  150. int gzvm_arch_create_device(u16 vm_id, struct gzvm_create_device *gzvm_dev);
  151. int gzvm_arch_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx,
  152. u32 irq, bool level);
  153. void gzvm_notify_acked_irq(struct gzvm *gzvm, unsigned int gsi);
  154. int gzvm_irqfd(struct gzvm *gzvm, struct gzvm_irqfd *args);
  155. int gzvm_drv_irqfd_init(void);
  156. void gzvm_drv_irqfd_exit(void);
  157. int gzvm_vm_irqfd_init(struct gzvm *gzvm);
  158. void gzvm_vm_irqfd_release(struct gzvm *gzvm);
  159. int gzvm_arch_memregion_purpose(struct gzvm *gzvm,
  160. struct gzvm_userspace_memory_region *mem);
  161. int gzvm_arch_set_dtb_config(struct gzvm *gzvm, struct gzvm_dtb_config *args);
  162. int gzvm_init_ioeventfd(struct gzvm *gzvm);
  163. int gzvm_ioeventfd(struct gzvm *gzvm, struct gzvm_ioeventfd *args);
  164. bool gzvm_ioevent_write(struct gzvm_vcpu *vcpu, __u64 addr, int len,
  165. const void *val);
  166. void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
  167. struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr);
  168. void add_wait_queue_priority(struct wait_queue_head *wq_head,
  169. struct wait_queue_entry *wq_entry);
  170. #endif /* __GZVM_DRV_H__ */