Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next-queued
Pull in latest drm-next from Dave Airlie to get at all the drm-misc goodies, specifically: - dma_fence error state handling rework (Chris needs that for error recovery) - crc support locking changes (Tomeu's i915 crc patches need that). Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
这个提交包含在:
@@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
|
||||
u8 changed = old ^ new;
|
||||
int ret;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
|
||||
if (!(changed & PCI_COMMAND_MEMORY))
|
||||
return 0;
|
||||
|
||||
@@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
if (WARN_ON(bytes > 4))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
|
||||
if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
|
||||
return -EINVAL;
|
||||
|
||||
/* First check if it's PCI_COMMAND */
|
||||
|
@@ -1998,6 +1998,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
|
||||
INIT_LIST_HEAD(>t->oos_page_list_head);
|
||||
INIT_LIST_HEAD(>t->post_shadow_list_head);
|
||||
|
||||
intel_vgpu_reset_ggtt(vgpu);
|
||||
|
||||
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
|
||||
NULL, 1, 0);
|
||||
if (IS_ERR(ggtt_mm)) {
|
||||
@@ -2206,6 +2208,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
{
|
||||
int ret;
|
||||
void *page_addr;
|
||||
|
||||
gvt_dbg_core("init gtt\n");
|
||||
|
||||
@@ -2218,6 +2221,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
gvt->gtt.scratch_ggtt_page =
|
||||
alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
|
||||
if (!gvt->gtt.scratch_ggtt_page) {
|
||||
gvt_err("fail to allocate scratch ggtt page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
page_addr = page_address(gvt->gtt.scratch_ggtt_page);
|
||||
|
||||
gvt->gtt.scratch_ggtt_mfn =
|
||||
intel_gvt_hypervisor_virt_to_mfn(page_addr);
|
||||
if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("fail to translate scratch ggtt page\n");
|
||||
__free_page(gvt->gtt.scratch_ggtt_page);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (enable_out_of_sync) {
|
||||
ret = setup_spt_oos(gvt);
|
||||
if (ret) {
|
||||
@@ -2239,6 +2259,41 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
*/
|
||||
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
|
||||
{
|
||||
__free_page(gvt->gtt.scratch_ggtt_page);
|
||||
|
||||
if (enable_out_of_sync)
|
||||
clean_spt_oos(gvt);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_ggtt - reset the GGTT entry
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is called at the vGPU create stage
|
||||
* to reset all the GGTT entries.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
u32 index;
|
||||
u32 offset;
|
||||
u32 num_entries;
|
||||
struct intel_gvt_gtt_entry e;
|
||||
|
||||
memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
|
||||
e.type = GTT_TYPE_GGTT_PTE;
|
||||
ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
|
||||
e.val64 |= _PAGE_PRESENT;
|
||||
|
||||
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
|
||||
num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
|
||||
for (offset = 0; offset < num_entries; offset++)
|
||||
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
|
||||
|
||||
index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
|
||||
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
|
||||
for (offset = 0; offset < num_entries; offset++)
|
||||
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
|
||||
}
|
||||
|
@@ -81,6 +81,9 @@ struct intel_gvt_gtt {
|
||||
struct list_head oos_page_use_list_head;
|
||||
struct list_head oos_page_free_list_head;
|
||||
struct list_head mm_lru_list_head;
|
||||
|
||||
struct page *scratch_ggtt_page;
|
||||
unsigned long scratch_ggtt_mfn;
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -202,6 +205,7 @@ struct intel_vgpu_gtt {
|
||||
|
||||
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
|
||||
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
|
||||
|
||||
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
||||
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
|
||||
|
@@ -175,6 +175,7 @@ struct intel_vgpu {
|
||||
struct notifier_block group_notifier;
|
||||
struct kvm *kvm;
|
||||
struct work_struct release_work;
|
||||
atomic_t released;
|
||||
} vdev;
|
||||
#endif
|
||||
};
|
||||
|
@@ -114,12 +114,15 @@ out:
|
||||
static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
||||
{
|
||||
struct gvt_dma *entry;
|
||||
kvm_pfn_t pfn;
|
||||
|
||||
mutex_lock(&vgpu->vdev.cache_lock);
|
||||
entry = __gvt_cache_find(vgpu, gfn);
|
||||
mutex_unlock(&vgpu->vdev.cache_lock);
|
||||
|
||||
return entry == NULL ? 0 : entry->pfn;
|
||||
entry = __gvt_cache_find(vgpu, gfn);
|
||||
pfn = (entry == NULL) ? 0 : entry->pfn;
|
||||
|
||||
mutex_unlock(&vgpu->vdev.cache_lock);
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
|
||||
@@ -166,7 +169,7 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
|
||||
|
||||
static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
|
||||
{
|
||||
struct device *dev = &vgpu->vdev.mdev->dev;
|
||||
struct device *dev = mdev_dev(vgpu->vdev.mdev);
|
||||
struct gvt_dma *this;
|
||||
unsigned long g1;
|
||||
int rc;
|
||||
@@ -195,7 +198,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct gvt_dma *dma;
|
||||
struct rb_node *node = NULL;
|
||||
struct device *dev = &vgpu->vdev.mdev->dev;
|
||||
struct device *dev = mdev_dev(vgpu->vdev.mdev);
|
||||
unsigned long gfn;
|
||||
|
||||
mutex_lock(&vgpu->vdev.cache_lock);
|
||||
@@ -396,7 +399,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||
struct device *pdev;
|
||||
void *gvt;
|
||||
|
||||
pdev = mdev->parent->dev;
|
||||
pdev = mdev_parent_dev(mdev);
|
||||
gvt = kdev_to_i915(pdev)->gvt;
|
||||
|
||||
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
|
||||
@@ -418,7 +421,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||
mdev_set_drvdata(mdev, vgpu);
|
||||
|
||||
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
|
||||
dev_name(&mdev->dev));
|
||||
dev_name(mdev_dev(mdev)));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -482,7 +485,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
|
||||
vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
|
||||
|
||||
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
|
||||
ret = vfio_register_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, &events,
|
||||
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
|
||||
&vgpu->vdev.iommu_notifier);
|
||||
if (ret != 0) {
|
||||
gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
|
||||
@@ -490,17 +493,26 @@ static int intel_vgpu_open(struct mdev_device *mdev)
|
||||
}
|
||||
|
||||
events = VFIO_GROUP_NOTIFY_SET_KVM;
|
||||
ret = vfio_register_notifier(&mdev->dev, VFIO_GROUP_NOTIFY, &events,
|
||||
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
|
||||
&vgpu->vdev.group_notifier);
|
||||
if (ret != 0) {
|
||||
gvt_err("vfio_register_notifier for group failed: %d\n", ret);
|
||||
goto undo_iommu;
|
||||
}
|
||||
|
||||
return kvmgt_guest_init(mdev);
|
||||
ret = kvmgt_guest_init(mdev);
|
||||
if (ret)
|
||||
goto undo_group;
|
||||
|
||||
atomic_set(&vgpu->vdev.released, 0);
|
||||
return ret;
|
||||
|
||||
undo_group:
|
||||
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
|
||||
&vgpu->vdev.group_notifier);
|
||||
|
||||
undo_iommu:
|
||||
vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY,
|
||||
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
|
||||
&vgpu->vdev.iommu_notifier);
|
||||
out:
|
||||
return ret;
|
||||
@@ -509,17 +521,26 @@ out:
|
||||
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct kvmgt_guest_info *info;
|
||||
int ret;
|
||||
|
||||
if (!handle_valid(vgpu->handle))
|
||||
return;
|
||||
|
||||
vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
|
||||
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
|
||||
return;
|
||||
|
||||
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
|
||||
&vgpu->vdev.iommu_notifier);
|
||||
vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
|
||||
WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
|
||||
|
||||
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
|
||||
&vgpu->vdev.group_notifier);
|
||||
WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
|
||||
|
||||
info = (struct kvmgt_guest_info *)vgpu->handle;
|
||||
kvmgt_guest_exit(info);
|
||||
|
||||
vgpu->vdev.kvm = NULL;
|
||||
vgpu->handle = 0;
|
||||
}
|
||||
|
||||
@@ -534,6 +555,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
|
||||
{
|
||||
struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
|
||||
vdev.release_work);
|
||||
|
||||
__intel_vgpu_release(vgpu);
|
||||
}
|
||||
|
||||
@@ -1089,7 +1111,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct parent_ops intel_vgpu_ops = {
|
||||
static const struct mdev_parent_ops intel_vgpu_ops = {
|
||||
.supported_type_groups = intel_vgpu_type_groups,
|
||||
.create = intel_vgpu_create,
|
||||
.remove = intel_vgpu_remove,
|
||||
@@ -1134,6 +1156,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
slot = gfn_to_memslot(kvm, gfn);
|
||||
if (!slot) {
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
@@ -1164,6 +1190,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
slot = gfn_to_memslot(kvm, gfn);
|
||||
if (!slot) {
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
@@ -1311,18 +1341,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
|
||||
|
||||
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
|
||||
{
|
||||
struct intel_vgpu *vgpu;
|
||||
|
||||
if (!info) {
|
||||
gvt_err("kvmgt_guest_info invalid\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
vgpu = info->vgpu;
|
||||
|
||||
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
|
||||
kvmgt_protect_table_destroy(info);
|
||||
gvt_cache_destroy(vgpu);
|
||||
gvt_cache_destroy(info->vgpu);
|
||||
vfree(info);
|
||||
|
||||
return true;
|
||||
@@ -1372,7 +1398,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
|
||||
return pfn;
|
||||
|
||||
pfn = INTEL_GVT_INVALID_ADDR;
|
||||
dev = &info->vgpu->vdev.mdev->dev;
|
||||
dev = mdev_dev(info->vgpu->vdev.mdev);
|
||||
rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
|
||||
if (rc != 1) {
|
||||
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
|
||||
|
@@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
|
||||
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
|
||||
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
|
||||
+ i * PAGE_SIZE);
|
||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("fail to get MFN from VA\n");
|
||||
|
@@ -2218,15 +2218,6 @@ unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
}
|
||||
|
||||
static unsigned int swiotlb_max_size(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_SWIOTLB)
|
||||
return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void i915_sg_trim(struct sg_table *orig_st)
|
||||
{
|
||||
struct sg_table new_st;
|
||||
@@ -2275,7 +2266,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
|
||||
GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
|
||||
|
||||
max_segment = swiotlb_max_size();
|
||||
max_segment = swiotlb_max_segment();
|
||||
if (!max_segment)
|
||||
max_segment = rounddown(UINT_MAX, PAGE_SIZE);
|
||||
|
||||
|
@@ -447,7 +447,7 @@ retry:
|
||||
connector->name);
|
||||
|
||||
/* go for command line mode first */
|
||||
modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
|
||||
modes[i] = drm_pick_cmdline_mode(fb_conn);
|
||||
|
||||
/* try for preferred next */
|
||||
if (!modes[i]) {
|
||||
|
在新工单中引用
屏蔽一个用户