drm/nouveau: port all engines to new engine module format

This is a HUGE commit, but it's not nearly as bad as it looks - any problems
can be isolated to a particular chipset and engine combination.  It was
simply too difficult to port each one at a time, the compat layers are
*already* ridiculous.

Most of the changes here are simply to the glue, the process for each of the
engine modules was to start with a standard skeleton and copy+paste the old
code into the appropriate places, fixing up variable names etc as needed.

v2: Marcin Slusarz <marcin.slusarz@gmail.com>
- fix find/replace bug in license header

v3: Ben Skeggs <bskeggs@redhat.com>
- bump indirect pushbuf size to 8KiB, 4KiB barely enough for userspace and
  left no space for kernel's requirements during GEM pushbuf submission.
- fix duplicate assignments noticed by clang

v4: Marcin Slusarz <marcin.slusarz@gmail.com>
- add sparse annotations to nv04_fifo_pause/nv04_fifo_start
- use ioread32_native/iowrite32_native for fifo control registers

v5: Ben Skeggs <bskeggs@redhat.com>
- rebase on v3.6-rc4, modified to keep copy engine fix intact
- nv10/fence: unmap fence bo before destroying
- fixed fermi regression when using nvidia gr fuc
- fixed typo in supported dma_mask checking

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
这个提交包含在:
Ben Skeggs
2012-07-20 08:17:34 +10:00
父节点 ac1499d957
当前提交 ebb945a94b
修改 146 个文件,包含 14480 行新增11360 行删除

查看文件

@@ -24,9 +24,13 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#include <subdev/fb.h>
#include <subdev/vm.h>
#include <subdev/instmem.h>
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
@@ -60,11 +64,10 @@ static void
nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nouveau_fb *pfb = nouveau_fb(drm->device);
nouveau_mem_node_cleanup(mem->mm_node);
nvfb_vram_put(dev, (struct nouveau_mem **)&mem->mm_node);
pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node);
}
static int
@@ -73,8 +76,8 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nouveau_fb *pfb = nouveau_fb(drm->device);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_mem *node;
u32 size_nc = 0;
@@ -83,9 +86,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
size_nc = 1 << nvbo->page_shift;
ret = nvfb_vram_get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
(nvbo->tile_flags >> 8) & 0x3ff, &node);
ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
(nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret) {
mem->mm_node = NULL;
return (ret == -ENOSPC) ? 0 : ret;
@@ -158,11 +161,9 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_mem *node;
if (unlikely((mem->num_pages << PAGE_SHIFT) >=
dev_priv->gart_info.aper_size))
if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
return -ENOMEM;
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -188,13 +189,17 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
nouveau_gart_manager_debug
};
#include <core/subdev/vm/nv04.h>
static int
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct drm_device *dev = dev_priv->dev;
man->priv = nv04vm_ref(dev);
return (man->priv != NULL) ? 0 : -ENODEV;
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
struct nv04_vmmgr_priv *priv = (void *)vmm;
struct nouveau_vm *vm = NULL;
nouveau_vm_ref(priv->vm, &vm, NULL);
man->priv = vm;
return 0;
}
static int
@@ -260,13 +265,12 @@ int
nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct drm_nouveau_private *dev_priv =
file_priv->minor->dev->dev_private;
struct nouveau_drm *drm = nouveau_newpriv(file_priv->minor->dev);
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return drm_mmap(filp, vma);
return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
static int
@@ -282,12 +286,12 @@ nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
}
int
nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
nouveau_ttm_global_init(struct nouveau_drm *drm)
{
struct drm_global_reference *global_ref;
int ret;
global_ref = &dev_priv->ttm.mem_global_ref;
global_ref = &drm->ttm.mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &nouveau_ttm_mem_global_init;
@@ -296,12 +300,12 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM memory accounting\n");
dev_priv->ttm.mem_global_ref.release = NULL;
drm->ttm.mem_global_ref.release = NULL;
return ret;
}
dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
global_ref = &dev_priv->ttm.bo_global_ref.ref;
drm->ttm.bo_global_ref.mem_glob = global_ref->object;
global_ref = &drm->ttm.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
@@ -310,8 +314,8 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM BO subsystem\n");
drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
dev_priv->ttm.mem_global_ref.release = NULL;
drm_global_item_unref(&drm->ttm.mem_global_ref);
drm->ttm.mem_global_ref.release = NULL;
return ret;
}
@@ -319,12 +323,105 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
}
void
nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
nouveau_ttm_global_release(struct nouveau_drm *drm)
{
if (dev_priv->ttm.mem_global_ref.release == NULL)
if (drm->ttm.mem_global_ref.release == NULL)
return;
drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
dev_priv->ttm.mem_global_ref.release = NULL;
drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
drm_global_item_unref(&drm->ttm.mem_global_ref);
drm->ttm.mem_global_ref.release = NULL;
}
int
nouveau_ttm_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
u32 bits;
int ret;
if (nv_device(drm->device)->card_type >= NV_50) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
bits = 40;
else
bits = 32;
} else {
bits = 32;
}
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
if (ret)
return ret;
ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
if (ret)
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
ret = nouveau_ttm_global_init(drm);
if (ret)
return ret;
ret = ttm_bo_device_init(&drm->ttm.bdev,
drm->ttm.bo_global_ref.ref.object,
&nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
bits <= 32 ? true : false);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret;
}
/* VRAM init */
drm->gem.vram_available = nouveau_fb(drm->device)->ram.size;
drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
drm->gem.vram_available >> PAGE_SHIFT);
if (ret) {
NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
return ret;
}
drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
pci_resource_len(dev->pdev, 1),
DRM_MTRR_WC);
/* GART init */
if (drm->agp.stat != ENABLED) {
drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
if (drm->gem.gart_available > 512 * 1024 * 1024)
drm->gem.gart_available = 512 * 1024 * 1024;
} else {
drm->gem.gart_available = drm->agp.size;
}
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
drm->gem.gart_available >> PAGE_SHIFT);
if (ret) {
NV_ERROR(drm, "GART mm init failed, %d\n", ret);
return ret;
}
NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
return 0;
}
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
mutex_lock(&drm->dev->struct_mutex);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
mutex_unlock(&drm->dev->struct_mutex);
ttm_bo_device_release(&drm->ttm.bdev);
nouveau_ttm_global_release(drm);
if (drm->ttm.mtrr >= 0) {
drm_mtrr_del(drm->ttm.mtrr,
pci_resource_start(drm->dev->pdev, 1),
pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);
drm->ttm.mtrr = -1;
}
}