kvm_dirty_ring.h 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. #ifndef KVM_DIRTY_RING_H
  2. #define KVM_DIRTY_RING_H
  3. #include <linux/kvm.h>
  4. /**
  5. * kvm_dirty_ring: KVM internal dirty ring structure
  6. *
  7. * @dirty_index: free running counter that points to the next slot in
  8. * dirty_ring->dirty_gfns, where a new dirty page should go
  9. * @reset_index: free running counter that points to the next dirty page
  10. * in dirty_ring->dirty_gfns for which dirty trap needs to
  11. * be reenabled
  12. * @size: size of the compact list, dirty_ring->dirty_gfns
  13. * @soft_limit: when the number of dirty pages in the list reaches this
  14. * limit, vcpu that owns this ring should exit to userspace
  15. * to allow userspace to harvest all the dirty pages
  16. * @dirty_gfns: the array to keep the dirty gfns
  17. * @index: index of this dirty ring
  18. */
  19. struct kvm_dirty_ring {
  20. u32 dirty_index;
  21. u32 reset_index;
  22. u32 size;
  23. u32 soft_limit;
  24. struct kvm_dirty_gfn *dirty_gfns;
  25. int index;
  26. };
  27. #ifndef CONFIG_HAVE_KVM_DIRTY_RING
  28. /*
  29. * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
  30. * not be included as well, so define these nop functions for the arch.
  31. */
  32. static inline u32 kvm_dirty_ring_get_rsvd_entries(void)
  33. {
  34. return 0;
  35. }
  36. static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring,
  37. int index, u32 size)
  38. {
  39. return 0;
  40. }
  41. static inline int kvm_dirty_ring_reset(struct kvm *kvm,
  42. struct kvm_dirty_ring *ring)
  43. {
  44. return 0;
  45. }
  46. static inline void kvm_dirty_ring_push(struct kvm_dirty_ring *ring,
  47. u32 slot, u64 offset)
  48. {
  49. }
  50. static inline struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring,
  51. u32 offset)
  52. {
  53. return NULL;
  54. }
  55. static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
  56. {
  57. }
  58. static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
  59. {
  60. return true;
  61. }
  62. #else /* CONFIG_HAVE_KVM_DIRTY_RING */
  63. u32 kvm_dirty_ring_get_rsvd_entries(void);
  64. int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
  65. /*
  66. * called with kvm->slots_lock held, returns the number of
  67. * processed pages.
  68. */
  69. int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
  70. /*
  71. * returns =0: successfully pushed
  72. * <0: unable to push, need to wait
  73. */
  74. void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset);
  75. /* for use in vm_operations_struct */
  76. struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
  77. void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
  78. bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring);
  79. #endif /* CONFIG_HAVE_KVM_DIRTY_RING */
  80. #endif /* KVM_DIRTY_RING_H */