cmd.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
  2. /*
  3. * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
  4. */
  5. #ifndef MLX5_VFIO_CMD_H
  6. #define MLX5_VFIO_CMD_H
  7. #include <linux/kernel.h>
  8. #include <linux/vfio_pci_core.h>
  9. #include <linux/mlx5/driver.h>
  10. #include <linux/mlx5/cq.h>
  11. #include <linux/mlx5/qp.h>
  12. struct mlx5vf_async_data {
  13. struct mlx5_async_work cb_work;
  14. struct work_struct work;
  15. int status;
  16. u32 pdn;
  17. u32 mkey;
  18. void *out;
  19. };
  20. struct mlx5_vf_migration_file {
  21. struct file *filp;
  22. struct mutex lock;
  23. u8 disabled:1;
  24. u8 is_err:1;
  25. struct sg_append_table table;
  26. size_t total_length;
  27. size_t allocated_length;
  28. /* Optimize mlx5vf_get_migration_page() for sequential access */
  29. struct scatterlist *last_offset_sg;
  30. unsigned int sg_last_entry;
  31. unsigned long last_offset;
  32. struct mlx5vf_pci_core_device *mvdev;
  33. wait_queue_head_t poll_wait;
  34. struct mlx5_async_ctx async_ctx;
  35. struct mlx5vf_async_data async_data;
  36. };
  37. struct mlx5_vhca_cq_buf {
  38. struct mlx5_frag_buf_ctrl fbc;
  39. struct mlx5_frag_buf frag_buf;
  40. int cqe_size;
  41. int nent;
  42. };
  43. struct mlx5_vhca_cq {
  44. struct mlx5_vhca_cq_buf buf;
  45. struct mlx5_db db;
  46. struct mlx5_core_cq mcq;
  47. size_t ncqe;
  48. };
  49. struct mlx5_vhca_recv_buf {
  50. u32 npages;
  51. struct page **page_list;
  52. dma_addr_t *dma_addrs;
  53. u32 next_rq_offset;
  54. u32 mkey;
  55. };
  56. struct mlx5_vhca_qp {
  57. struct mlx5_frag_buf buf;
  58. struct mlx5_db db;
  59. struct mlx5_vhca_recv_buf recv_buf;
  60. u32 tracked_page_size;
  61. u32 max_msg_size;
  62. u32 qpn;
  63. struct {
  64. unsigned int pc;
  65. unsigned int cc;
  66. unsigned int wqe_cnt;
  67. __be32 *db;
  68. struct mlx5_frag_buf_ctrl fbc;
  69. } rq;
  70. };
  71. struct mlx5_vhca_page_tracker {
  72. u32 id;
  73. u32 pdn;
  74. u8 is_err:1;
  75. struct mlx5_uars_page *uar;
  76. struct mlx5_vhca_cq cq;
  77. struct mlx5_vhca_qp *host_qp;
  78. struct mlx5_vhca_qp *fw_qp;
  79. struct mlx5_nb nb;
  80. int status;
  81. };
  82. struct mlx5vf_pci_core_device {
  83. struct vfio_pci_core_device core_device;
  84. int vf_id;
  85. u16 vhca_id;
  86. u8 migrate_cap:1;
  87. u8 deferred_reset:1;
  88. u8 mdev_detach:1;
  89. u8 log_active:1;
  90. struct completion tracker_comp;
  91. /* protect migration state */
  92. struct mutex state_mutex;
  93. enum vfio_device_mig_state mig_state;
  94. /* protect the reset_done flow */
  95. spinlock_t reset_lock;
  96. struct mlx5_vf_migration_file *resuming_migf;
  97. struct mlx5_vf_migration_file *saving_migf;
  98. struct mlx5_vhca_page_tracker tracker;
  99. struct workqueue_struct *cb_wq;
  100. struct notifier_block nb;
  101. struct mlx5_core_dev *mdev;
  102. };
  103. int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
  104. int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
  105. int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
  106. size_t *state_size);
  107. void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
  108. const struct vfio_migration_ops *mig_ops,
  109. const struct vfio_log_ops *log_ops);
  110. void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
  111. void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev);
  112. int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
  113. struct mlx5_vf_migration_file *migf);
  114. int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
  115. struct mlx5_vf_migration_file *migf);
  116. void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
  117. void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev);
  118. void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work);
  119. int mlx5vf_start_page_tracker(struct vfio_device *vdev,
  120. struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
  121. int mlx5vf_stop_page_tracker(struct vfio_device *vdev);
  122. int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
  123. unsigned long length, struct iova_bitmap *dirty);
  124. #endif /* MLX5_VFIO_CMD_H */