drm/nouveau: prepare for enabling svm with existing userspace interfaces

For a channel to make use of SVM features, it requires a different GPU MMU
configuration than we would normally use, which is not desirable to switch
to unless a client is actively going to use SVM.

In order to supporting SVM without more extensive changes to the userspace
interfaces, the SVM_INIT ioctl needs to replace the previous configuration
safely.

The only way we can currently do this safely, accounting for some unlikely
failure conditions, is to allocate the new VMM without destroying the last
one, and prioritising the SVM-enabled configuration in the code that cares.

This will get cleaned up again further down the track.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs
2019-02-19 17:21:48 +10:00
parent a261a20c01
commit bfe91afaca
9 changed files with 42 additions and 37 deletions

View File

@@ -68,10 +68,11 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
struct nouveau_vma *vma;
int ret;
if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
@@ -82,7 +83,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
if (ret < 0 && ret != -EACCES)
goto out;
ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
ret = nouveau_vma_new(nvbo, vmm, &vma);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
out:
@@ -142,17 +143,18 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
struct nouveau_vma *vma;
int ret;
if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return;
vma = nouveau_vma_find(nvbo, &cli->vmm);
vma = nouveau_vma_find(nvbo, vmm);
if (vma) {
if (--vma->refs == 0) {
ret = pm_runtime_get_sync(dev);
@@ -219,6 +221,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
struct nouveau_vma *vma;
if (is_power_of_2(nvbo->valid_domains))
@@ -228,8 +231,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->bo.offset;
if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
vma = nouveau_vma_find(nvbo, &cli->vmm);
if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
vma = nouveau_vma_find(nvbo, vmm);
if (!vma)
return -EINVAL;
@@ -321,7 +324,8 @@ struct validate_op {
};
static void
validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
struct nouveau_fence *fence,
struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
struct nouveau_bo *nvbo;
@@ -332,13 +336,11 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
b = &pbbo[nvbo->pbbo_index];
if (likely(fence)) {
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_vma *vma;
nouveau_bo_fence(nvbo, fence, !!b->write_domains);
if (drm->client.vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
vma = (void *)(unsigned long)b->user_priv;
if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
struct nouveau_vma *vma =
(void *)(unsigned long)b->user_priv;
nouveau_fence_unref(&vma->fence);
dma_fence_get(&fence->base);
vma->fence = fence;
@@ -358,10 +360,11 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
}
static void
validate_fini(struct validate_op *op, struct nouveau_fence *fence,
validate_fini(struct validate_op *op, struct nouveau_channel *chan,
struct nouveau_fence *fence,
struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
validate_fini_no_ticket(op, fence, pbbo);
validate_fini_no_ticket(op, chan, fence, pbbo);
ww_acquire_fini(&op->ticket);
}
@@ -416,7 +419,7 @@ retry:
list_splice_tail_init(&vram_list, &op->list);
list_splice_tail_init(&gart_list, &op->list);
list_splice_tail_init(&both_list, &op->list);
validate_fini_no_ticket(op, NULL, NULL);
validate_fini_no_ticket(op, chan, NULL, NULL);
if (unlikely(ret == -EDEADLK)) {
ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
&op->ticket);
@@ -430,8 +433,8 @@ retry:
}
}
if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
struct nouveau_vmm *vmm = &cli->vmm;
if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
struct nouveau_vmm *vmm = chan->vmm;
struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
if (!vma) {
NV_PRINTK(err, cli, "vma not found!\n");
@@ -471,7 +474,7 @@ retry:
list_splice_tail(&gart_list, &op->list);
list_splice_tail(&both_list, &op->list);
if (ret)
validate_fini(op, NULL, NULL);
validate_fini(op, chan, NULL, NULL);
return ret;
}
@@ -563,7 +566,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validating bo list\n");
validate_fini(op, NULL, NULL);
validate_fini(op, chan, NULL, NULL);
return ret;
}
*apply_relocs = ret;
@@ -842,7 +845,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
out:
validate_fini(&op, fence, bo);
validate_fini(&op, chan, fence, bo);
nouveau_fence_unref(&fence);
out_prevalid: