drm/ttm: move fpfn and lpfn into each placement v2
This allows us to more fine grained specify where to place the buffer object. v2: rebased on drm-next, add bochs changes as well Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
@@ -53,12 +53,13 @@ static struct attribute ttm_bo_count = {
|
||||
.mode = S_IRUGO
|
||||
};
|
||||
|
||||
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
|
||||
static inline int ttm_mem_type_from_place(const struct ttm_place *place,
|
||||
uint32_t *mem_type)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= TTM_PL_PRIV5; i++)
|
||||
if (flags & (1 << i)) {
|
||||
if (place->flags & (1 << i)) {
|
||||
*mem_type = i;
|
||||
return 0;
|
||||
}
|
||||
@@ -89,12 +90,12 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
|
||||
bo, bo->mem.num_pages, bo->mem.size >> 10,
|
||||
bo->mem.size >> 20);
|
||||
for (i = 0; i < placement->num_placement; i++) {
|
||||
ret = ttm_mem_type_from_flags(placement->placement[i],
|
||||
ret = ttm_mem_type_from_place(&placement->placement[i],
|
||||
&mem_type);
|
||||
if (ret)
|
||||
return;
|
||||
pr_err(" placement[%d]=0x%08X (%d)\n",
|
||||
i, placement->placement[i], mem_type);
|
||||
i, placement->placement[i].flags, mem_type);
|
||||
ttm_mem_type_debug(bo->bdev, mem_type);
|
||||
}
|
||||
}
|
||||
@@ -685,8 +686,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
||||
evict_mem.bus.io_reserved_vm = false;
|
||||
evict_mem.bus.io_reserved_count = 0;
|
||||
|
||||
placement.fpfn = 0;
|
||||
placement.lpfn = 0;
|
||||
placement.num_placement = 0;
|
||||
placement.num_busy_placement = 0;
|
||||
bdev->driver->evict_flags(bo, &placement);
|
||||
@@ -774,7 +773,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
|
||||
*/
|
||||
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
||||
uint32_t mem_type,
|
||||
struct ttm_placement *placement,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_mem_reg *mem,
|
||||
bool interruptible,
|
||||
bool no_wait_gpu)
|
||||
@@ -784,7 +783,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
||||
int ret;
|
||||
|
||||
do {
|
||||
ret = (*man->func->get_node)(man, bo, placement, 0, mem);
|
||||
ret = (*man->func->get_node)(man, bo, place, mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
if (mem->mm_node)
|
||||
@@ -827,18 +826,18 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
|
||||
|
||||
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
|
||||
uint32_t mem_type,
|
||||
uint32_t proposed_placement,
|
||||
const struct ttm_place *place,
|
||||
uint32_t *masked_placement)
|
||||
{
|
||||
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
|
||||
|
||||
if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
|
||||
if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
|
||||
return false;
|
||||
|
||||
if ((proposed_placement & man->available_caching) == 0)
|
||||
if ((place->flags & man->available_caching) == 0)
|
||||
return false;
|
||||
|
||||
cur_flags |= (proposed_placement & man->available_caching);
|
||||
cur_flags |= (place->flags & man->available_caching);
|
||||
|
||||
*masked_placement = cur_flags;
|
||||
return true;
|
||||
@@ -869,15 +868,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
|
||||
mem->mm_node = NULL;
|
||||
for (i = 0; i < placement->num_placement; ++i) {
|
||||
ret = ttm_mem_type_from_flags(placement->placement[i],
|
||||
&mem_type);
|
||||
const struct ttm_place *place = &placement->placement[i];
|
||||
|
||||
ret = ttm_mem_type_from_place(place, &mem_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
man = &bdev->man[mem_type];
|
||||
|
||||
type_ok = ttm_bo_mt_compatible(man,
|
||||
mem_type,
|
||||
placement->placement[i],
|
||||
type_ok = ttm_bo_mt_compatible(man, mem_type, place,
|
||||
&cur_flags);
|
||||
|
||||
if (!type_ok)
|
||||
@@ -889,7 +887,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
* Use the access and other non-mapping-related flag bits from
|
||||
* the memory placement flags to the current flags
|
||||
*/
|
||||
ttm_flag_masked(&cur_flags, placement->placement[i],
|
||||
ttm_flag_masked(&cur_flags, place->flags,
|
||||
~TTM_PL_MASK_MEMTYPE);
|
||||
|
||||
if (mem_type == TTM_PL_SYSTEM)
|
||||
@@ -897,8 +895,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
|
||||
if (man->has_type && man->use_type) {
|
||||
type_found = true;
|
||||
ret = (*man->func->get_node)(man, bo, placement,
|
||||
cur_flags, mem);
|
||||
ret = (*man->func->get_node)(man, bo, place, mem);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
@@ -916,17 +913,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < placement->num_busy_placement; ++i) {
|
||||
ret = ttm_mem_type_from_flags(placement->busy_placement[i],
|
||||
&mem_type);
|
||||
const struct ttm_place *place = &placement->busy_placement[i];
|
||||
|
||||
ret = ttm_mem_type_from_place(place, &mem_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
man = &bdev->man[mem_type];
|
||||
if (!man->has_type)
|
||||
continue;
|
||||
if (!ttm_bo_mt_compatible(man,
|
||||
mem_type,
|
||||
placement->busy_placement[i],
|
||||
&cur_flags))
|
||||
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
|
||||
continue;
|
||||
|
||||
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
|
||||
@@ -935,7 +930,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
* Use the access and other non-mapping-related flag bits from
|
||||
* the memory placement flags to the current flags
|
||||
*/
|
||||
ttm_flag_masked(&cur_flags, placement->busy_placement[i],
|
||||
ttm_flag_masked(&cur_flags, place->flags,
|
||||
~TTM_PL_MASK_MEMTYPE);
|
||||
|
||||
if (mem_type == TTM_PL_SYSTEM) {
|
||||
@@ -945,7 +940,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
|
||||
ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
|
||||
interruptible, no_wait_gpu);
|
||||
if (ret == 0 && mem->mm_node) {
|
||||
mem->placement = cur_flags;
|
||||
@@ -1006,20 +1001,27 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
|
||||
{
|
||||
int i;
|
||||
|
||||
if (mem->mm_node && placement->lpfn != 0 &&
|
||||
(mem->start < placement->fpfn ||
|
||||
mem->start + mem->num_pages > placement->lpfn))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < placement->num_placement; i++) {
|
||||
*new_flags = placement->placement[i];
|
||||
const struct ttm_place *heap = &placement->placement[i];
|
||||
if (mem->mm_node && heap->lpfn != 0 &&
|
||||
(mem->start < heap->fpfn ||
|
||||
mem->start + mem->num_pages > heap->lpfn))
|
||||
continue;
|
||||
|
||||
*new_flags = heap->flags;
|
||||
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
|
||||
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
|
||||
return true;
|
||||
}
|
||||
|
||||
for (i = 0; i < placement->num_busy_placement; i++) {
|
||||
*new_flags = placement->busy_placement[i];
|
||||
const struct ttm_place *heap = &placement->busy_placement[i];
|
||||
if (mem->mm_node && heap->lpfn != 0 &&
|
||||
(mem->start < heap->fpfn ||
|
||||
mem->start + mem->num_pages > heap->lpfn))
|
||||
continue;
|
||||
|
||||
*new_flags = heap->flags;
|
||||
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
|
||||
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
|
||||
return true;
|
||||
@@ -1037,11 +1039,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
||||
uint32_t new_flags;
|
||||
|
||||
lockdep_assert_held(&bo->resv->lock.base);
|
||||
/* Check that range is valid */
|
||||
if (placement->lpfn || placement->fpfn)
|
||||
if (placement->fpfn > placement->lpfn ||
|
||||
(placement->lpfn - placement->fpfn) < bo->num_pages)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Check whether we need to move buffer.
|
||||
*/
|
||||
@@ -1070,15 +1067,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_validate);
|
||||
|
||||
int ttm_bo_check_placement(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
BUG_ON((placement->fpfn || placement->lpfn) &&
|
||||
(bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
struct ttm_buffer_object *bo,
|
||||
unsigned long size,
|
||||
@@ -1147,15 +1135,12 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
atomic_inc(&bo->glob->bo_count);
|
||||
drm_vma_node_reset(&bo->vma_node);
|
||||
|
||||
ret = ttm_bo_check_placement(bo, placement);
|
||||
|
||||
/*
|
||||
* For ttm_bo_type_device buffers, allocate
|
||||
* address space from the device.
|
||||
*/
|
||||
if (likely(!ret) &&
|
||||
(bo->type == ttm_bo_type_device ||
|
||||
bo->type == ttm_bo_type_sg))
|
||||
if (bo->type == ttm_bo_type_device ||
|
||||
bo->type == ttm_bo_type_sg)
|
||||
ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
|
||||
bo->mem.num_pages);
|
||||
|
||||
|
@@ -49,8 +49,7 @@ struct ttm_range_manager {
|
||||
|
||||
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
uint32_t flags,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
@@ -60,7 +59,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
|
||||
unsigned long lpfn;
|
||||
int ret;
|
||||
|
||||
lpfn = placement->lpfn;
|
||||
lpfn = place->lpfn;
|
||||
if (!lpfn)
|
||||
lpfn = man->size;
|
||||
|
||||
@@ -68,13 +67,13 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
if (flags & TTM_PL_FLAG_TOPDOWN)
|
||||
if (place->flags & TTM_PL_FLAG_TOPDOWN)
|
||||
aflags = DRM_MM_CREATE_TOP;
|
||||
|
||||
spin_lock(&rman->lock);
|
||||
ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
|
||||
mem->page_alignment, 0,
|
||||
placement->fpfn, lpfn,
|
||||
place->fpfn, lpfn,
|
||||
DRM_MM_SEARCH_BEST,
|
||||
aflags);
|
||||
spin_unlock(&rman->lock);
|
||||
|
Reference in New Issue
Block a user