Merge branch 'drm-next-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-next
Fixes for 4.19: - Fix UVD 7.2 instance handling - Fix UVD 7.2 harvesting - GPU scheduler fix for when a process is killed - TTM cleanups - amdgpu CS bo_list fixes - Powerplay fixes for polaris12 and CZ/ST - DC fixes for link training certain HMDs - DC fix for vega10 blank screen in certain cases From: Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180801222906.1016-1-alexander.deucher@amd.com
This commit is contained in:
@@ -74,6 +74,7 @@
|
|||||||
#include "amdgpu_gart.h"
|
#include "amdgpu_gart.h"
|
||||||
#include "amdgpu_debugfs.h"
|
#include "amdgpu_debugfs.h"
|
||||||
#include "amdgpu_job.h"
|
#include "amdgpu_job.h"
|
||||||
|
#include "amdgpu_bo_list.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Modules parameters.
|
* Modules parameters.
|
||||||
@@ -689,45 +690,6 @@ struct amdgpu_fpriv {
|
|||||||
struct amdgpu_ctx_mgr ctx_mgr;
|
struct amdgpu_ctx_mgr ctx_mgr;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* residency list
|
|
||||||
*/
|
|
||||||
struct amdgpu_bo_list_entry {
|
|
||||||
struct amdgpu_bo *robj;
|
|
||||||
struct ttm_validate_buffer tv;
|
|
||||||
struct amdgpu_bo_va *bo_va;
|
|
||||||
uint32_t priority;
|
|
||||||
struct page **user_pages;
|
|
||||||
int user_invalidated;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct amdgpu_bo_list {
|
|
||||||
struct mutex lock;
|
|
||||||
struct rcu_head rhead;
|
|
||||||
struct kref refcount;
|
|
||||||
struct amdgpu_bo *gds_obj;
|
|
||||||
struct amdgpu_bo *gws_obj;
|
|
||||||
struct amdgpu_bo *oa_obj;
|
|
||||||
unsigned first_userptr;
|
|
||||||
unsigned num_entries;
|
|
||||||
struct amdgpu_bo_list_entry *array;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct amdgpu_bo_list *
|
|
||||||
amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
|
|
||||||
void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
|
|
||||||
struct list_head *validated);
|
|
||||||
void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
|
|
||||||
void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
|
|
||||||
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
|
|
||||||
struct drm_amdgpu_bo_list_entry **info_param);
|
|
||||||
|
|
||||||
int amdgpu_bo_list_create(struct amdgpu_device *adev,
|
|
||||||
struct drm_file *filp,
|
|
||||||
struct drm_amdgpu_bo_list_entry *info,
|
|
||||||
unsigned num_entries,
|
|
||||||
struct amdgpu_bo_list **list);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GFX stuff
|
* GFX stuff
|
||||||
*/
|
*/
|
||||||
@@ -1748,6 +1710,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
|||||||
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
|
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
|
||||||
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
|
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
|
||||||
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
|
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
|
||||||
|
#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
|
||||||
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
|
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
|
||||||
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
|
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
|
||||||
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
|
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
|
||||||
|
|||||||
@@ -35,83 +35,53 @@
|
|||||||
#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
|
#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
|
||||||
#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
|
#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
|
||||||
|
|
||||||
static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
|
||||||
struct drm_file *filp,
|
|
||||||
struct amdgpu_bo_list *list,
|
|
||||||
struct drm_amdgpu_bo_list_entry *info,
|
|
||||||
unsigned num_entries);
|
|
||||||
|
|
||||||
static void amdgpu_bo_list_release_rcu(struct kref *ref)
|
|
||||||
{
|
{
|
||||||
unsigned i;
|
struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
|
||||||
struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
|
rhead);
|
||||||
refcount);
|
|
||||||
|
|
||||||
for (i = 0; i < list->num_entries; ++i)
|
kvfree(list);
|
||||||
amdgpu_bo_unref(&list->array[i].robj);
|
|
||||||
|
|
||||||
mutex_destroy(&list->lock);
|
|
||||||
kvfree(list->array);
|
|
||||||
kfree_rcu(list, rhead);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_bo_list_create(struct amdgpu_device *adev,
|
static void amdgpu_bo_list_free(struct kref *ref)
|
||||||
struct drm_file *filp,
|
|
||||||
struct drm_amdgpu_bo_list_entry *info,
|
|
||||||
unsigned num_entries,
|
|
||||||
struct amdgpu_bo_list **list_out)
|
|
||||||
{
|
{
|
||||||
|
struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
|
||||||
|
refcount);
|
||||||
|
struct amdgpu_bo_list_entry *e;
|
||||||
|
|
||||||
|
amdgpu_bo_list_for_each_entry(e, list)
|
||||||
|
amdgpu_bo_unref(&e->robj);
|
||||||
|
|
||||||
|
call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
|
||||||
|
struct drm_amdgpu_bo_list_entry *info,
|
||||||
|
unsigned num_entries, struct amdgpu_bo_list **result)
|
||||||
|
{
|
||||||
|
unsigned last_entry = 0, first_userptr = num_entries;
|
||||||
|
struct amdgpu_bo_list_entry *array;
|
||||||
struct amdgpu_bo_list *list;
|
struct amdgpu_bo_list *list;
|
||||||
|
uint64_t total_size = 0;
|
||||||
|
size_t size;
|
||||||
|
unsigned i;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
|
size = sizeof(struct amdgpu_bo_list);
|
||||||
|
size += num_entries * sizeof(struct amdgpu_bo_list_entry);
|
||||||
|
list = kvmalloc(size, GFP_KERNEL);
|
||||||
if (!list)
|
if (!list)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* initialize bo list*/
|
|
||||||
mutex_init(&list->lock);
|
|
||||||
kref_init(&list->refcount);
|
kref_init(&list->refcount);
|
||||||
r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
|
list->gds_obj = adev->gds.gds_gfx_bo;
|
||||||
if (r) {
|
list->gws_obj = adev->gds.gws_gfx_bo;
|
||||||
kfree(list);
|
list->oa_obj = adev->gds.oa_gfx_bo;
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
*list_out = list;
|
array = amdgpu_bo_list_array_entry(list, 0);
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
|
|
||||||
{
|
|
||||||
struct amdgpu_bo_list *list;
|
|
||||||
|
|
||||||
mutex_lock(&fpriv->bo_list_lock);
|
|
||||||
list = idr_remove(&fpriv->bo_list_handles, id);
|
|
||||||
mutex_unlock(&fpriv->bo_list_lock);
|
|
||||||
if (list)
|
|
||||||
kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|
||||||
struct drm_file *filp,
|
|
||||||
struct amdgpu_bo_list *list,
|
|
||||||
struct drm_amdgpu_bo_list_entry *info,
|
|
||||||
unsigned num_entries)
|
|
||||||
{
|
|
||||||
struct amdgpu_bo_list_entry *array;
|
|
||||||
struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
|
|
||||||
struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
|
|
||||||
struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
|
|
||||||
|
|
||||||
unsigned last_entry = 0, first_userptr = num_entries;
|
|
||||||
unsigned i;
|
|
||||||
int r;
|
|
||||||
unsigned long total_size = 0;
|
|
||||||
|
|
||||||
array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
|
|
||||||
if (!array)
|
|
||||||
return -ENOMEM;
|
|
||||||
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
|
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
|
||||||
|
|
||||||
for (i = 0; i < num_entries; ++i) {
|
for (i = 0; i < num_entries; ++i) {
|
||||||
@@ -148,59 +118,56 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|||||||
entry->tv.shared = !entry->robj->prime_shared_count;
|
entry->tv.shared = !entry->robj->prime_shared_count;
|
||||||
|
|
||||||
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
|
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
|
||||||
gds_obj = entry->robj;
|
list->gds_obj = entry->robj;
|
||||||
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
|
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
|
||||||
gws_obj = entry->robj;
|
list->gws_obj = entry->robj;
|
||||||
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
|
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
|
||||||
oa_obj = entry->robj;
|
list->oa_obj = entry->robj;
|
||||||
|
|
||||||
total_size += amdgpu_bo_size(entry->robj);
|
total_size += amdgpu_bo_size(entry->robj);
|
||||||
trace_amdgpu_bo_list_set(list, entry->robj);
|
trace_amdgpu_bo_list_set(list, entry->robj);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < list->num_entries; ++i)
|
|
||||||
amdgpu_bo_unref(&list->array[i].robj);
|
|
||||||
|
|
||||||
kvfree(list->array);
|
|
||||||
|
|
||||||
list->gds_obj = gds_obj;
|
|
||||||
list->gws_obj = gws_obj;
|
|
||||||
list->oa_obj = oa_obj;
|
|
||||||
list->first_userptr = first_userptr;
|
list->first_userptr = first_userptr;
|
||||||
list->array = array;
|
|
||||||
list->num_entries = num_entries;
|
list->num_entries = num_entries;
|
||||||
|
|
||||||
trace_amdgpu_cs_bo_status(list->num_entries, total_size);
|
trace_amdgpu_cs_bo_status(list->num_entries, total_size);
|
||||||
|
|
||||||
|
*result = list;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_free:
|
error_free:
|
||||||
while (i--)
|
while (i--)
|
||||||
amdgpu_bo_unref(&array[i].robj);
|
amdgpu_bo_unref(&array[i].robj);
|
||||||
kvfree(array);
|
kvfree(list);
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct amdgpu_bo_list *
|
static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
|
||||||
amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
|
|
||||||
{
|
{
|
||||||
struct amdgpu_bo_list *result;
|
struct amdgpu_bo_list *list;
|
||||||
|
|
||||||
|
mutex_lock(&fpriv->bo_list_lock);
|
||||||
|
list = idr_remove(&fpriv->bo_list_handles, id);
|
||||||
|
mutex_unlock(&fpriv->bo_list_lock);
|
||||||
|
if (list)
|
||||||
|
kref_put(&list->refcount, amdgpu_bo_list_free);
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
|
||||||
|
struct amdgpu_bo_list **result)
|
||||||
|
{
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
result = idr_find(&fpriv->bo_list_handles, id);
|
*result = idr_find(&fpriv->bo_list_handles, id);
|
||||||
|
|
||||||
if (result) {
|
if (*result && kref_get_unless_zero(&(*result)->refcount)) {
|
||||||
if (kref_get_unless_zero(&result->refcount)) {
|
|
||||||
rcu_read_unlock();
|
|
||||||
mutex_lock(&result->lock);
|
|
||||||
} else {
|
|
||||||
rcu_read_unlock();
|
|
||||||
result = NULL;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
rcu_read_unlock();
|
||||||
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
|
void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
|
||||||
@@ -211,6 +178,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
|
|||||||
* concatenated in descending order.
|
* concatenated in descending order.
|
||||||
*/
|
*/
|
||||||
struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
|
struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
|
||||||
|
struct amdgpu_bo_list_entry *e;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
|
for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
|
||||||
@@ -221,14 +189,13 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
|
|||||||
* in the list, the sort mustn't change the ordering of buffers
|
* in the list, the sort mustn't change the ordering of buffers
|
||||||
* with the same priority, i.e. it must be stable.
|
* with the same priority, i.e. it must be stable.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < list->num_entries; i++) {
|
amdgpu_bo_list_for_each_entry(e, list) {
|
||||||
unsigned priority = list->array[i].priority;
|
unsigned priority = e->priority;
|
||||||
|
|
||||||
if (!list->array[i].robj->parent)
|
if (!e->robj->parent)
|
||||||
list_add_tail(&list->array[i].tv.head,
|
list_add_tail(&e->tv.head, &bucket[priority]);
|
||||||
&bucket[priority]);
|
|
||||||
|
|
||||||
list->array[i].user_pages = NULL;
|
e->user_pages = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Connect the sorted buckets in the output list. */
|
/* Connect the sorted buckets in the output list. */
|
||||||
@@ -238,20 +205,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
|
|||||||
|
|
||||||
void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
|
void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
|
||||||
{
|
{
|
||||||
mutex_unlock(&list->lock);
|
kref_put(&list->refcount, amdgpu_bo_list_free);
|
||||||
kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
|
|
||||||
}
|
|
||||||
|
|
||||||
void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
|
|
||||||
{
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
for (i = 0; i < list->num_entries; ++i)
|
|
||||||
amdgpu_bo_unref(&list->array[i].robj);
|
|
||||||
|
|
||||||
mutex_destroy(&list->lock);
|
|
||||||
kvfree(list->array);
|
|
||||||
kfree(list);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
|
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
|
||||||
@@ -304,7 +258,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
|||||||
union drm_amdgpu_bo_list *args = data;
|
union drm_amdgpu_bo_list *args = data;
|
||||||
uint32_t handle = args->in.list_handle;
|
uint32_t handle = args->in.list_handle;
|
||||||
struct drm_amdgpu_bo_list_entry *info = NULL;
|
struct drm_amdgpu_bo_list_entry *info = NULL;
|
||||||
struct amdgpu_bo_list *list;
|
struct amdgpu_bo_list *list, *old;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = amdgpu_bo_create_list_entry_array(&args->in, &info);
|
r = amdgpu_bo_create_list_entry_array(&args->in, &info);
|
||||||
@@ -322,7 +276,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
|||||||
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
|
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
|
||||||
mutex_unlock(&fpriv->bo_list_lock);
|
mutex_unlock(&fpriv->bo_list_lock);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
amdgpu_bo_list_free(list);
|
amdgpu_bo_list_put(list);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -335,17 +289,22 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case AMDGPU_BO_LIST_OP_UPDATE:
|
case AMDGPU_BO_LIST_OP_UPDATE:
|
||||||
r = -ENOENT;
|
r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
|
||||||
list = amdgpu_bo_list_get(fpriv, handle);
|
&list);
|
||||||
if (!list)
|
|
||||||
goto error_free;
|
|
||||||
|
|
||||||
r = amdgpu_bo_list_set(adev, filp, list, info,
|
|
||||||
args->in.bo_number);
|
|
||||||
amdgpu_bo_list_put(list);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free;
|
goto error_free;
|
||||||
|
|
||||||
|
mutex_lock(&fpriv->bo_list_lock);
|
||||||
|
old = idr_replace(&fpriv->bo_list_handles, list, handle);
|
||||||
|
mutex_unlock(&fpriv->bo_list_lock);
|
||||||
|
|
||||||
|
if (IS_ERR(old)) {
|
||||||
|
amdgpu_bo_list_put(list);
|
||||||
|
r = PTR_ERR(old);
|
||||||
|
goto error_free;
|
||||||
|
}
|
||||||
|
|
||||||
|
amdgpu_bo_list_put(old);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|||||||
85
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
Normal file
85
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#ifndef __AMDGPU_BO_LIST_H__
|
||||||
|
#define __AMDGPU_BO_LIST_H__
|
||||||
|
|
||||||
|
#include <drm/ttm/ttm_execbuf_util.h>
|
||||||
|
#include <drm/amdgpu_drm.h>
|
||||||
|
|
||||||
|
struct amdgpu_device;
|
||||||
|
struct amdgpu_bo;
|
||||||
|
struct amdgpu_bo_va;
|
||||||
|
struct amdgpu_fpriv;
|
||||||
|
|
||||||
|
struct amdgpu_bo_list_entry {
|
||||||
|
struct amdgpu_bo *robj;
|
||||||
|
struct ttm_validate_buffer tv;
|
||||||
|
struct amdgpu_bo_va *bo_va;
|
||||||
|
uint32_t priority;
|
||||||
|
struct page **user_pages;
|
||||||
|
int user_invalidated;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct amdgpu_bo_list {
|
||||||
|
struct rcu_head rhead;
|
||||||
|
struct kref refcount;
|
||||||
|
struct amdgpu_bo *gds_obj;
|
||||||
|
struct amdgpu_bo *gws_obj;
|
||||||
|
struct amdgpu_bo *oa_obj;
|
||||||
|
unsigned first_userptr;
|
||||||
|
unsigned num_entries;
|
||||||
|
};
|
||||||
|
|
||||||
|
int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
|
||||||
|
struct amdgpu_bo_list **result);
|
||||||
|
void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
|
||||||
|
struct list_head *validated);
|
||||||
|
void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
|
||||||
|
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
|
||||||
|
struct drm_amdgpu_bo_list_entry **info_param);
|
||||||
|
|
||||||
|
int amdgpu_bo_list_create(struct amdgpu_device *adev,
|
||||||
|
struct drm_file *filp,
|
||||||
|
struct drm_amdgpu_bo_list_entry *info,
|
||||||
|
unsigned num_entries,
|
||||||
|
struct amdgpu_bo_list **list);
|
||||||
|
|
||||||
|
static inline struct amdgpu_bo_list_entry *
|
||||||
|
amdgpu_bo_list_array_entry(struct amdgpu_bo_list *list, unsigned index)
|
||||||
|
{
|
||||||
|
struct amdgpu_bo_list_entry *array = (void *)&list[1];
|
||||||
|
|
||||||
|
return &array[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
#define amdgpu_bo_list_for_each_entry(e, list) \
|
||||||
|
for (e = amdgpu_bo_list_array_entry(list, 0); \
|
||||||
|
e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
|
||||||
|
++e)
|
||||||
|
|
||||||
|
#define amdgpu_bo_list_for_each_userptr_entry(e, list) \
|
||||||
|
for (e = amdgpu_bo_list_array_entry(list, (list)->first_userptr); \
|
||||||
|
e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
|
||||||
|
++e)
|
||||||
|
|
||||||
|
#endif
|
||||||
@@ -561,27 +561,37 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
union drm_amdgpu_cs *cs)
|
union drm_amdgpu_cs *cs)
|
||||||
{
|
{
|
||||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||||
|
struct amdgpu_vm *vm = &fpriv->vm;
|
||||||
struct amdgpu_bo_list_entry *e;
|
struct amdgpu_bo_list_entry *e;
|
||||||
struct list_head duplicates;
|
struct list_head duplicates;
|
||||||
unsigned i, tries = 10;
|
|
||||||
struct amdgpu_bo *gds;
|
struct amdgpu_bo *gds;
|
||||||
struct amdgpu_bo *gws;
|
struct amdgpu_bo *gws;
|
||||||
struct amdgpu_bo *oa;
|
struct amdgpu_bo *oa;
|
||||||
|
unsigned tries = 10;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&p->validated);
|
INIT_LIST_HEAD(&p->validated);
|
||||||
|
|
||||||
/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
|
/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
|
||||||
if (!p->bo_list)
|
if (cs->in.bo_list_handle) {
|
||||||
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
|
if (p->bo_list)
|
||||||
else
|
return -EINVAL;
|
||||||
mutex_lock(&p->bo_list->lock);
|
|
||||||
|
r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
|
||||||
|
&p->bo_list);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
} else if (!p->bo_list) {
|
||||||
|
/* Create a empty bo_list when no handle is provided */
|
||||||
|
r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
|
||||||
|
&p->bo_list);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
if (p->bo_list) {
|
|
||||||
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
|
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
|
||||||
if (p->bo_list->first_userptr != p->bo_list->num_entries)
|
if (p->bo_list->first_userptr != p->bo_list->num_entries)
|
||||||
p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
|
p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
|
||||||
}
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&duplicates);
|
INIT_LIST_HEAD(&duplicates);
|
||||||
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
|
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
|
||||||
@@ -591,7 +601,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
struct list_head need_pages;
|
struct list_head need_pages;
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
|
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
|
||||||
&duplicates);
|
&duplicates);
|
||||||
@@ -601,17 +610,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
goto error_free_pages;
|
goto error_free_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Without a BO list we don't have userptr BOs */
|
|
||||||
if (!p->bo_list)
|
|
||||||
break;
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&need_pages);
|
INIT_LIST_HEAD(&need_pages);
|
||||||
for (i = p->bo_list->first_userptr;
|
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
|
||||||
i < p->bo_list->num_entries; ++i) {
|
struct amdgpu_bo *bo = e->robj;
|
||||||
struct amdgpu_bo *bo;
|
|
||||||
|
|
||||||
e = &p->bo_list->array[i];
|
|
||||||
bo = e->robj;
|
|
||||||
|
|
||||||
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
|
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
|
||||||
&e->user_invalidated) && e->user_pages) {
|
&e->user_invalidated) && e->user_pages) {
|
||||||
@@ -703,23 +704,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
|
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
|
||||||
p->bytes_moved_vis);
|
p->bytes_moved_vis);
|
||||||
|
|
||||||
if (p->bo_list) {
|
|
||||||
struct amdgpu_vm *vm = &fpriv->vm;
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
gds = p->bo_list->gds_obj;
|
gds = p->bo_list->gds_obj;
|
||||||
gws = p->bo_list->gws_obj;
|
gws = p->bo_list->gws_obj;
|
||||||
oa = p->bo_list->oa_obj;
|
oa = p->bo_list->oa_obj;
|
||||||
for (i = 0; i < p->bo_list->num_entries; i++) {
|
|
||||||
struct amdgpu_bo *bo = p->bo_list->array[i].robj;
|
|
||||||
|
|
||||||
p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
|
amdgpu_bo_list_for_each_entry(e, p->bo_list)
|
||||||
}
|
e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
|
||||||
} else {
|
|
||||||
gds = p->adev->gds.gds_gfx_bo;
|
|
||||||
gws = p->adev->gds.gws_gfx_bo;
|
|
||||||
oa = p->adev->gds.oa_gfx_bo;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (gds) {
|
if (gds) {
|
||||||
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
|
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
|
||||||
@@ -747,11 +737,7 @@ error_validate:
|
|||||||
|
|
||||||
error_free_pages:
|
error_free_pages:
|
||||||
|
|
||||||
if (p->bo_list) {
|
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
|
||||||
for (i = p->bo_list->first_userptr;
|
|
||||||
i < p->bo_list->num_entries; ++i) {
|
|
||||||
e = &p->bo_list->array[i];
|
|
||||||
|
|
||||||
if (!e->user_pages)
|
if (!e->user_pages)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -759,7 +745,6 @@ error_free_pages:
|
|||||||
e->robj->tbo.ttm->num_pages);
|
e->robj->tbo.ttm->num_pages);
|
||||||
kvfree(e->user_pages);
|
kvfree(e->user_pages);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@@ -820,12 +805,13 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
|
|||||||
|
|
||||||
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = p->adev;
|
|
||||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||||
|
struct amdgpu_device *adev = p->adev;
|
||||||
struct amdgpu_vm *vm = &fpriv->vm;
|
struct amdgpu_vm *vm = &fpriv->vm;
|
||||||
|
struct amdgpu_bo_list_entry *e;
|
||||||
struct amdgpu_bo_va *bo_va;
|
struct amdgpu_bo_va *bo_va;
|
||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
int i, r;
|
int r;
|
||||||
|
|
||||||
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
@@ -855,16 +841,15 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p->bo_list) {
|
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
|
||||||
for (i = 0; i < p->bo_list->num_entries; i++) {
|
|
||||||
struct dma_fence *f;
|
struct dma_fence *f;
|
||||||
|
|
||||||
/* ignore duplicates */
|
/* ignore duplicates */
|
||||||
bo = p->bo_list->array[i].robj;
|
bo = e->robj;
|
||||||
if (!bo)
|
if (!bo)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
bo_va = p->bo_list->array[i].bo_va;
|
bo_va = e->bo_va;
|
||||||
if (bo_va == NULL)
|
if (bo_va == NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -878,8 +863,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
r = amdgpu_vm_handle_moved(adev, vm);
|
r = amdgpu_vm_handle_moved(adev, vm);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
@@ -892,15 +875,14 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (amdgpu_vm_debug && p->bo_list) {
|
if (amdgpu_vm_debug) {
|
||||||
/* Invalidate all BOs to test for userspace bugs */
|
/* Invalidate all BOs to test for userspace bugs */
|
||||||
for (i = 0; i < p->bo_list->num_entries; i++) {
|
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
|
||||||
/* ignore duplicates */
|
/* ignore duplicates */
|
||||||
bo = p->bo_list->array[i].robj;
|
if (!e->robj)
|
||||||
if (!bo)
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
amdgpu_vm_bo_invalidate(adev, bo, false);
|
amdgpu_vm_bo_invalidate(adev, e->robj, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -916,7 +898,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
|||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* Only for UVD/VCE VM emulation */
|
/* Only for UVD/VCE VM emulation */
|
||||||
if (p->ring->funcs->parse_cs) {
|
if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) {
|
||||||
unsigned i, j;
|
unsigned i, j;
|
||||||
|
|
||||||
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
|
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
|
||||||
@@ -957,12 +939,20 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
|||||||
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
|
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
|
||||||
kptr += va_start - offset;
|
kptr += va_start - offset;
|
||||||
|
|
||||||
|
if (p->ring->funcs->parse_cs) {
|
||||||
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
|
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
|
||||||
amdgpu_bo_kunmap(aobj);
|
amdgpu_bo_kunmap(aobj);
|
||||||
|
|
||||||
r = amdgpu_ring_parse_cs(ring, p, j);
|
r = amdgpu_ring_parse_cs(ring, p, j);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
} else {
|
||||||
|
ib->ptr = (uint32_t *)kptr;
|
||||||
|
r = amdgpu_ring_patch_cs_in_place(ring, p, j);
|
||||||
|
amdgpu_bo_kunmap(aobj);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
j++;
|
j++;
|
||||||
}
|
}
|
||||||
@@ -1207,27 +1197,25 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
|
|||||||
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||||
union drm_amdgpu_cs *cs)
|
union drm_amdgpu_cs *cs)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||||
struct amdgpu_ring *ring = p->ring;
|
struct amdgpu_ring *ring = p->ring;
|
||||||
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
|
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
|
||||||
enum drm_sched_priority priority;
|
enum drm_sched_priority priority;
|
||||||
|
struct amdgpu_bo_list_entry *e;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
unsigned i;
|
|
||||||
uint64_t seq;
|
uint64_t seq;
|
||||||
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
amdgpu_mn_lock(p->mn);
|
amdgpu_mn_lock(p->mn);
|
||||||
if (p->bo_list) {
|
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
|
||||||
for (i = p->bo_list->first_userptr;
|
struct amdgpu_bo *bo = e->robj;
|
||||||
i < p->bo_list->num_entries; ++i) {
|
|
||||||
struct amdgpu_bo *bo = p->bo_list->array[i].robj;
|
|
||||||
|
|
||||||
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
|
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
|
||||||
amdgpu_mn_unlock(p->mn);
|
amdgpu_mn_unlock(p->mn);
|
||||||
return -ERESTARTSYS;
|
return -ERESTARTSYS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
job = p->job;
|
job = p->job;
|
||||||
p->job = NULL;
|
p->job = NULL;
|
||||||
@@ -1259,6 +1247,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|||||||
amdgpu_job_free_resources(job);
|
amdgpu_job_free_resources(job);
|
||||||
|
|
||||||
trace_amdgpu_cs_ioctl(job);
|
trace_amdgpu_cs_ioctl(job);
|
||||||
|
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
|
||||||
priority = job->base.s_priority;
|
priority = job->base.s_priority;
|
||||||
drm_sched_entity_push_job(&job->base, entity);
|
drm_sched_entity_push_job(&job->base, entity);
|
||||||
|
|
||||||
|
|||||||
@@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
uint32_t ui32 = 0;
|
uint32_t ui32 = 0;
|
||||||
uint64_t ui64 = 0;
|
uint64_t ui64 = 0;
|
||||||
int i, found;
|
int i, j, found;
|
||||||
int ui32_size = sizeof(ui32);
|
int ui32_size = sizeof(ui32);
|
||||||
|
|
||||||
if (!info->return_size || !info->return_pointer)
|
if (!info->return_size || !info->return_pointer)
|
||||||
@@ -348,7 +348,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_UVD:
|
case AMDGPU_HW_IP_UVD:
|
||||||
type = AMD_IP_BLOCK_TYPE_UVD;
|
type = AMD_IP_BLOCK_TYPE_UVD;
|
||||||
ring_mask |= adev->uvd.inst[0].ring.ready;
|
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
|
ring_mask |= adev->uvd.inst[i].ring.ready;
|
||||||
|
}
|
||||||
ib_start_alignment = 64;
|
ib_start_alignment = 64;
|
||||||
ib_size_alignment = 64;
|
ib_size_alignment = 64;
|
||||||
break;
|
break;
|
||||||
@@ -361,9 +365,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_UVD_ENC:
|
case AMDGPU_HW_IP_UVD_ENC:
|
||||||
type = AMD_IP_BLOCK_TYPE_UVD;
|
type = AMD_IP_BLOCK_TYPE_UVD;
|
||||||
for (i = 0; i < adev->uvd.num_enc_rings; i++)
|
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
|
||||||
ring_mask |=
|
if (adev->uvd.harvest_config & (1 << i))
|
||||||
adev->uvd.inst[0].ring_enc[i].ready << i;
|
continue;
|
||||||
|
for (j = 0; j < adev->uvd.num_enc_rings; j++)
|
||||||
|
ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
|
||||||
|
}
|
||||||
ib_start_alignment = 64;
|
ib_start_alignment = 64;
|
||||||
ib_size_alignment = 64;
|
ib_size_alignment = 64;
|
||||||
break;
|
break;
|
||||||
@@ -960,7 +967,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
|||||||
amdgpu_bo_unref(&pd);
|
amdgpu_bo_unref(&pd);
|
||||||
|
|
||||||
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
|
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
|
||||||
amdgpu_bo_list_free(list);
|
amdgpu_bo_list_put(list);
|
||||||
|
|
||||||
idr_destroy(&fpriv->bo_list_handles);
|
idr_destroy(&fpriv->bo_list_handles);
|
||||||
mutex_destroy(&fpriv->bo_list_lock);
|
mutex_destroy(&fpriv->bo_list_lock);
|
||||||
|
|||||||
@@ -825,7 +825,7 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
|
|||||||
if (bo == NULL)
|
if (bo == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ttm_bo_reference(&bo->tbo);
|
ttm_bo_get(&bo->tbo);
|
||||||
return bo;
|
return bo;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -843,8 +843,7 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
tbo = &((*bo)->tbo);
|
tbo = &((*bo)->tbo);
|
||||||
ttm_bo_unref(&tbo);
|
ttm_bo_put(tbo);
|
||||||
if (tbo == NULL)
|
|
||||||
*bo = NULL;
|
*bo = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -31,7 +31,7 @@
|
|||||||
#include <linux/power_supply.h>
|
#include <linux/power_supply.h>
|
||||||
#include <linux/hwmon.h>
|
#include <linux/hwmon.h>
|
||||||
#include <linux/hwmon-sysfs.h>
|
#include <linux/hwmon-sysfs.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
|
|
||||||
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
|
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
|
||||||
|
|
||||||
@@ -403,6 +403,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
|
|||||||
count = -EINVAL;
|
count = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
|
||||||
|
|
||||||
amdgpu_dpm_get_pp_num_states(adev, &data);
|
amdgpu_dpm_get_pp_num_states(adev, &data);
|
||||||
state = data.states[idx];
|
state = data.states[idx];
|
||||||
@@ -1185,7 +1186,7 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
|
|||||||
int r, size = sizeof(vddnb);
|
int r, size = sizeof(vddnb);
|
||||||
|
|
||||||
/* only APUs have vddnb */
|
/* only APUs have vddnb */
|
||||||
if (adev->flags & AMD_IS_APU)
|
if (!(adev->flags & AMD_IS_APU))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Can't get voltage when the card is off */
|
/* Can't get voltage when the card is off */
|
||||||
|
|||||||
@@ -214,7 +214,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
|
|||||||
u32 hw_ip, u32 instance, u32 ring,
|
u32 hw_ip, u32 instance, u32 ring,
|
||||||
struct amdgpu_ring **out_ring)
|
struct amdgpu_ring **out_ring)
|
||||||
{
|
{
|
||||||
int r, ip_num_rings;
|
int i, r, ip_num_rings = 0;
|
||||||
struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
|
struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
|
||||||
|
|
||||||
if (!adev || !mgr || !out_ring)
|
if (!adev || !mgr || !out_ring)
|
||||||
@@ -243,14 +243,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
|
|||||||
ip_num_rings = adev->sdma.num_instances;
|
ip_num_rings = adev->sdma.num_instances;
|
||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_UVD:
|
case AMDGPU_HW_IP_UVD:
|
||||||
ip_num_rings = adev->uvd.num_uvd_inst;
|
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
|
||||||
|
if (!(adev->uvd.harvest_config & (1 << i)))
|
||||||
|
ip_num_rings++;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_VCE:
|
case AMDGPU_HW_IP_VCE:
|
||||||
ip_num_rings = adev->vce.num_rings;
|
ip_num_rings = adev->vce.num_rings;
|
||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_UVD_ENC:
|
case AMDGPU_HW_IP_UVD_ENC:
|
||||||
|
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
|
||||||
|
if (!(adev->uvd.harvest_config & (1 << i)))
|
||||||
|
ip_num_rings++;
|
||||||
|
}
|
||||||
ip_num_rings =
|
ip_num_rings =
|
||||||
adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
|
adev->uvd.num_enc_rings * ip_num_rings;
|
||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_VCN_DEC:
|
case AMDGPU_HW_IP_VCN_DEC:
|
||||||
ip_num_rings = 1;
|
ip_num_rings = 1;
|
||||||
|
|||||||
@@ -123,6 +123,7 @@ struct amdgpu_ring_funcs {
|
|||||||
void (*set_wptr)(struct amdgpu_ring *ring);
|
void (*set_wptr)(struct amdgpu_ring *ring);
|
||||||
/* validating and patching of IBs */
|
/* validating and patching of IBs */
|
||||||
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||||
|
int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||||
/* constants to calculate how many DW are needed for an emit */
|
/* constants to calculate how many DW are needed for an emit */
|
||||||
unsigned emit_frame_size;
|
unsigned emit_frame_size;
|
||||||
unsigned emit_ib_size;
|
unsigned emit_ib_size;
|
||||||
|
|||||||
@@ -314,6 +314,11 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
|
|||||||
TP_ARGS(mapping)
|
TP_ARGS(mapping)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
|
||||||
|
TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
|
||||||
|
TP_ARGS(mapping)
|
||||||
|
);
|
||||||
|
|
||||||
TRACE_EVENT(amdgpu_vm_set_ptes,
|
TRACE_EVENT(amdgpu_vm_set_ptes,
|
||||||
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
|
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
|
||||||
uint32_t incr, uint64_t flags),
|
uint32_t incr, uint64_t flags),
|
||||||
|
|||||||
@@ -92,11 +92,9 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_global_init - Initialize global TTM memory reference
|
* amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
|
||||||
* structures.
|
|
||||||
*
|
*
|
||||||
* @adev: AMDGPU device for which the global structures need to be
|
* @adev: AMDGPU device for which the global structures need to be registered.
|
||||||
* registered.
|
|
||||||
*
|
*
|
||||||
* This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
|
* This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
|
||||||
* during bring up.
|
* during bring up.
|
||||||
@@ -162,13 +160,12 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_init_mem_type - Initialize a memory manager for a specific
|
* amdgpu_init_mem_type - Initialize a memory manager for a specific type of
|
||||||
* type of memory request.
|
* memory request.
|
||||||
*
|
*
|
||||||
* @bdev: The TTM BO device object (contains a reference to
|
* @bdev: The TTM BO device object (contains a reference to amdgpu_device)
|
||||||
* amdgpu_device)
|
|
||||||
* @type: The type of memory requested
|
* @type: The type of memory requested
|
||||||
* @man:
|
* @man: The memory type manager for each domain
|
||||||
*
|
*
|
||||||
* This is called by ttm_bo_init_mm() when a buffer object is being
|
* This is called by ttm_bo_init_mm() when a buffer object is being
|
||||||
* initialized.
|
* initialized.
|
||||||
@@ -321,8 +318,7 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
|||||||
* @bo: The bo to assign the memory to
|
* @bo: The bo to assign the memory to
|
||||||
* @new_mem: The memory to be assigned.
|
* @new_mem: The memory to be assigned.
|
||||||
*
|
*
|
||||||
* Assign the memory from new_mem to the memory of the buffer object
|
* Assign the memory from new_mem to the memory of the buffer object bo.
|
||||||
* bo.
|
|
||||||
*/
|
*/
|
||||||
static void amdgpu_move_null(struct ttm_buffer_object *bo,
|
static void amdgpu_move_null(struct ttm_buffer_object *bo,
|
||||||
struct ttm_mem_reg *new_mem)
|
struct ttm_mem_reg *new_mem)
|
||||||
@@ -335,8 +331,12 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT
|
* amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
|
||||||
* buffer.
|
*
|
||||||
|
* @bo: The bo to assign the memory to.
|
||||||
|
* @mm_node: Memory manager node for drm allocator.
|
||||||
|
* @mem: The region where the bo resides.
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
|
static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
|
||||||
struct drm_mm_node *mm_node,
|
struct drm_mm_node *mm_node,
|
||||||
@@ -352,10 +352,12 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_find_mm_node - Helper function finds the drm_mm_node
|
* amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
|
||||||
* corresponding to @offset. It also modifies
|
* @offset. It also modifies the offset to be within the drm_mm_node returned
|
||||||
* the offset to be within the drm_mm_node
|
*
|
||||||
* returned
|
* @mem: The region where the bo resides.
|
||||||
|
* @offset: The offset that drm_mm_node is used for finding.
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
|
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
|
||||||
unsigned long *offset)
|
unsigned long *offset)
|
||||||
@@ -497,8 +499,8 @@ error:
|
|||||||
/**
|
/**
|
||||||
* amdgpu_move_blit - Copy an entire buffer to another buffer
|
* amdgpu_move_blit - Copy an entire buffer to another buffer
|
||||||
*
|
*
|
||||||
* This is a helper called by amdgpu_bo_move() and
|
* This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
|
||||||
* amdgpu_move_vram_ram() to help move buffers to and from VRAM.
|
* help move buffers to and from VRAM.
|
||||||
*/
|
*/
|
||||||
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
||||||
bool evict, bool no_wait_gpu,
|
bool evict, bool no_wait_gpu,
|
||||||
@@ -580,7 +582,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* blit VRAM to GTT */
|
/* blit VRAM to GTT */
|
||||||
r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
|
r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
@@ -632,7 +634,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* copy to VRAM */
|
/* copy to VRAM */
|
||||||
r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
|
r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
@@ -794,8 +796,8 @@ struct amdgpu_ttm_tt {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to
|
* amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR
|
||||||
* by a USERPTR pointer to memory
|
* pointer to memory
|
||||||
*
|
*
|
||||||
* Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
|
* Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
|
||||||
* This provides a wrapper around the get_user_pages() call to provide
|
* This provides a wrapper around the get_user_pages() call to provide
|
||||||
@@ -818,8 +820,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
|||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
|
|
||||||
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
|
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
|
||||||
/* check that we only use anonymous memory
|
/*
|
||||||
to prevent problems with writeback */
|
* check that we only use anonymous memory to prevent problems
|
||||||
|
* with writeback
|
||||||
|
*/
|
||||||
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
|
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
@@ -870,8 +874,7 @@ release_pages:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages
|
* amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
|
||||||
* as necessary.
|
|
||||||
*
|
*
|
||||||
* Called by amdgpu_cs_list_validate(). This creates the page list
|
* Called by amdgpu_cs_list_validate(). This creates the page list
|
||||||
* that backs user memory and will ultimately be mapped into the device
|
* that backs user memory and will ultimately be mapped into the device
|
||||||
@@ -915,8 +918,7 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_tt_pin_userptr - prepare the sg table with the
|
* amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
|
||||||
* user pages
|
|
||||||
*
|
*
|
||||||
* Called by amdgpu_ttm_backend_bind()
|
* Called by amdgpu_ttm_backend_bind()
|
||||||
**/
|
**/
|
||||||
@@ -1295,8 +1297,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt
|
* amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
|
||||||
* for the current task
|
* task
|
||||||
*
|
*
|
||||||
* @ttm: The ttm_tt object to bind this userptr object to
|
* @ttm: The ttm_tt object to bind this userptr object to
|
||||||
* @addr: The address in the current tasks VM space to use
|
* @addr: The address in the current tasks VM space to use
|
||||||
@@ -1346,9 +1348,8 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays
|
* amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
|
||||||
* inside an address range for the
|
* address range for the current task.
|
||||||
* current task.
|
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||||
@@ -1386,8 +1387,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been
|
* amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated?
|
||||||
* invalidated?
|
|
||||||
*/
|
*/
|
||||||
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
|
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
|
||||||
int *last_invalidated)
|
int *last_invalidated)
|
||||||
@@ -1400,10 +1400,8 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this
|
* amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
|
||||||
* ttm_tt object been invalidated
|
* been invalidated since the last time they've been set?
|
||||||
* since the last time they've
|
|
||||||
* been set?
|
|
||||||
*/
|
*/
|
||||||
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
|
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
@@ -1459,13 +1457,12 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict
|
* amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
|
||||||
* a buffer object.
|
* object.
|
||||||
*
|
*
|
||||||
* Return true if eviction is sensible. Called by
|
* Return true if eviction is sensible. Called by ttm_mem_evict_first() on
|
||||||
* ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space()
|
* behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
|
||||||
* which tries to evict buffer objects until it can find space
|
* it can find space for a new object and by ttm_bo_force_list_clean() which is
|
||||||
* for a new object and by ttm_bo_force_list_clean() which is
|
|
||||||
* used to clean out a memory space.
|
* used to clean out a memory space.
|
||||||
*/
|
*/
|
||||||
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||||
@@ -1515,8 +1512,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_access_memory - Read or Write memory that backs a
|
* amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
|
||||||
* buffer object.
|
|
||||||
*
|
*
|
||||||
* @bo: The buffer object to read/write
|
* @bo: The buffer object to read/write
|
||||||
* @offset: Offset into buffer object
|
* @offset: Offset into buffer object
|
||||||
@@ -1704,8 +1700,8 @@ error_create:
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_init - Init the memory management (ttm) as well as
|
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
|
||||||
* various gtt/vram related fields.
|
* gtt/vram related fields.
|
||||||
*
|
*
|
||||||
* This initializes all of the memory space pools that the TTM layer
|
* This initializes all of the memory space pools that the TTM layer
|
||||||
* will need such as the GTT space (system memory mapped to the device),
|
* will need such as the GTT space (system memory mapped to the device),
|
||||||
@@ -1856,8 +1852,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_late_init - Handle any late initialization for
|
* amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
|
||||||
* amdgpu_ttm
|
|
||||||
*/
|
*/
|
||||||
void amdgpu_ttm_late_init(struct amdgpu_device *adev)
|
void amdgpu_ttm_late_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -255,7 +255,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||||||
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
||||||
|
|
||||||
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
|
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << j))
|
||||||
|
continue;
|
||||||
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
|
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
|
||||||
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
|
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
|
||||||
&adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
|
&adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
|
||||||
@@ -308,6 +309,8 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
|||||||
drm_sched_entity_destroy(&adev->uvd.entity);
|
drm_sched_entity_destroy(&adev->uvd.entity);
|
||||||
|
|
||||||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << j))
|
||||||
|
continue;
|
||||||
kfree(adev->uvd.inst[j].saved_bo);
|
kfree(adev->uvd.inst[j].saved_bo);
|
||||||
|
|
||||||
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
|
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
|
||||||
@@ -343,6 +346,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << j))
|
||||||
|
continue;
|
||||||
if (adev->uvd.inst[j].vcpu_bo == NULL)
|
if (adev->uvd.inst[j].vcpu_bo == NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -365,6 +370,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
|
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
if (adev->uvd.inst[i].vcpu_bo == NULL)
|
if (adev->uvd.inst[i].vcpu_bo == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@@ -1159,6 +1166,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
|
|||||||
unsigned fences = 0, i, j;
|
unsigned fences = 0, i, j;
|
||||||
|
|
||||||
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
|
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
|
||||||
for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
|
for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
|
||||||
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
|
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
|
||||||
|
|||||||
@@ -48,6 +48,9 @@ struct amdgpu_uvd_inst {
|
|||||||
uint32_t srbm_soft_reset;
|
uint32_t srbm_soft_reset;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define AMDGPU_UVD_HARVEST_UVD0 (1 << 0)
|
||||||
|
#define AMDGPU_UVD_HARVEST_UVD1 (1 << 1)
|
||||||
|
|
||||||
struct amdgpu_uvd {
|
struct amdgpu_uvd {
|
||||||
const struct firmware *fw; /* UVD firmware */
|
const struct firmware *fw; /* UVD firmware */
|
||||||
unsigned fw_version;
|
unsigned fw_version;
|
||||||
@@ -61,6 +64,7 @@ struct amdgpu_uvd {
|
|||||||
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
||||||
struct drm_sched_entity entity;
|
struct drm_sched_entity entity;
|
||||||
struct delayed_work idle_work;
|
struct delayed_work idle_work;
|
||||||
|
unsigned harvest_config;
|
||||||
};
|
};
|
||||||
|
|
||||||
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
|
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
|
||||||
|
|||||||
@@ -2344,6 +2344,35 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
|
|||||||
return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
|
return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_vm_bo_trace_cs - trace all reserved mappings
|
||||||
|
*
|
||||||
|
* @vm: the requested vm
|
||||||
|
* @ticket: CS ticket
|
||||||
|
*
|
||||||
|
* Trace all mappings of BOs reserved during a command submission.
|
||||||
|
*/
|
||||||
|
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
|
||||||
|
{
|
||||||
|
struct amdgpu_bo_va_mapping *mapping;
|
||||||
|
|
||||||
|
if (!trace_amdgpu_vm_bo_cs_enabled())
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
|
||||||
|
mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
|
||||||
|
if (mapping->bo_va && mapping->bo_va->base.bo) {
|
||||||
|
struct amdgpu_bo *bo;
|
||||||
|
|
||||||
|
bo = mapping->bo_va->base.bo;
|
||||||
|
if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
trace_amdgpu_vm_bo_cs(mapping);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
|
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -318,6 +318,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||||||
uint64_t saddr, uint64_t size);
|
uint64_t saddr, uint64_t size);
|
||||||
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
|
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
|
||||||
uint64_t addr);
|
uint64_t addr);
|
||||||
|
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
|
||||||
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||||
struct amdgpu_bo_va *bo_va);
|
struct amdgpu_bo_va *bo_va);
|
||||||
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
|
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
|
||||||
|
|||||||
@@ -41,6 +41,12 @@
|
|||||||
#include "mmhub/mmhub_1_0_sh_mask.h"
|
#include "mmhub/mmhub_1_0_sh_mask.h"
|
||||||
#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
|
#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
|
||||||
|
|
||||||
|
#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
|
||||||
|
#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
|
||||||
|
//UVD_PG0_CC_UVD_HARVESTING
|
||||||
|
#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
|
||||||
|
#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
|
||||||
|
|
||||||
#define UVD7_MAX_HW_INSTANCES_VEGA20 2
|
#define UVD7_MAX_HW_INSTANCES_VEGA20 2
|
||||||
|
|
||||||
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
|
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||||
@@ -370,10 +376,25 @@ error:
|
|||||||
static int uvd_v7_0_early_init(void *handle)
|
static int uvd_v7_0_early_init(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
if (adev->asic_type == CHIP_VEGA20)
|
|
||||||
|
if (adev->asic_type == CHIP_VEGA20) {
|
||||||
|
u32 harvest;
|
||||||
|
int i;
|
||||||
|
|
||||||
adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
|
adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
|
||||||
else
|
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
|
||||||
|
harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
|
||||||
|
if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
|
||||||
|
adev->uvd.harvest_config |= 1 << i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
|
||||||
|
AMDGPU_UVD_HARVEST_UVD1))
|
||||||
|
/* both instances are harvested, disable the block */
|
||||||
|
return -ENOENT;
|
||||||
|
} else {
|
||||||
adev->uvd.num_uvd_inst = 1;
|
adev->uvd.num_uvd_inst = 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
if (amdgpu_sriov_vf(adev))
|
||||||
adev->uvd.num_enc_rings = 1;
|
adev->uvd.num_enc_rings = 1;
|
||||||
@@ -393,6 +414,8 @@ static int uvd_v7_0_sw_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
|
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << j))
|
||||||
|
continue;
|
||||||
/* UVD TRAP */
|
/* UVD TRAP */
|
||||||
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
|
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
|
||||||
if (r)
|
if (r)
|
||||||
@@ -425,6 +448,8 @@ static int uvd_v7_0_sw_init(void *handle)
|
|||||||
return r;
|
return r;
|
||||||
|
|
||||||
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
|
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << j))
|
||||||
|
continue;
|
||||||
if (!amdgpu_sriov_vf(adev)) {
|
if (!amdgpu_sriov_vf(adev)) {
|
||||||
ring = &adev->uvd.inst[j].ring;
|
ring = &adev->uvd.inst[j].ring;
|
||||||
sprintf(ring->name, "uvd<%d>", j);
|
sprintf(ring->name, "uvd<%d>", j);
|
||||||
@@ -472,6 +497,8 @@ static int uvd_v7_0_sw_fini(void *handle)
|
|||||||
return r;
|
return r;
|
||||||
|
|
||||||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << j))
|
||||||
|
continue;
|
||||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
|
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
|
||||||
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
|
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
|
||||||
}
|
}
|
||||||
@@ -500,6 +527,8 @@ static int uvd_v7_0_hw_init(void *handle)
|
|||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << j))
|
||||||
|
continue;
|
||||||
ring = &adev->uvd.inst[j].ring;
|
ring = &adev->uvd.inst[j].ring;
|
||||||
|
|
||||||
if (!amdgpu_sriov_vf(adev)) {
|
if (!amdgpu_sriov_vf(adev)) {
|
||||||
@@ -579,8 +608,11 @@ static int uvd_v7_0_hw_fini(void *handle)
|
|||||||
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
|
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < adev->uvd.num_uvd_inst; ++i)
|
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
adev->uvd.inst[i].ring.ready = false;
|
adev->uvd.inst[i].ring.ready = false;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -623,6 +655,8 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||||
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
|
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
|
||||||
lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
|
lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
|
||||||
@@ -695,6 +729,8 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
|
|||||||
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
|
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
|
||||||
|
|
||||||
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
|
WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
|
||||||
adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
|
adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
|
||||||
adev->uvd.inst[i].ring_enc[0].wptr = 0;
|
adev->uvd.inst[i].ring_enc[0].wptr = 0;
|
||||||
@@ -751,6 +787,8 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
|
|||||||
init_table += header->uvd_table_offset;
|
init_table += header->uvd_table_offset;
|
||||||
|
|
||||||
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
ring = &adev->uvd.inst[i].ring;
|
ring = &adev->uvd.inst[i].ring;
|
||||||
ring->wptr = 0;
|
ring->wptr = 0;
|
||||||
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
|
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
|
||||||
@@ -890,6 +928,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
|
|||||||
int i, j, k, r;
|
int i, j, k, r;
|
||||||
|
|
||||||
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
|
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << k))
|
||||||
|
continue;
|
||||||
/* disable DPG */
|
/* disable DPG */
|
||||||
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
|
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
|
||||||
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
|
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
|
||||||
@@ -902,6 +942,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
|
|||||||
uvd_v7_0_mc_resume(adev);
|
uvd_v7_0_mc_resume(adev);
|
||||||
|
|
||||||
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
|
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << k))
|
||||||
|
continue;
|
||||||
ring = &adev->uvd.inst[k].ring;
|
ring = &adev->uvd.inst[k].ring;
|
||||||
/* disable clock gating */
|
/* disable clock gating */
|
||||||
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
|
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
|
||||||
@@ -1069,6 +1111,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
|
|||||||
uint8_t i = 0;
|
uint8_t i = 0;
|
||||||
|
|
||||||
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
/* force RBC into idle state */
|
/* force RBC into idle state */
|
||||||
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
|
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
|
||||||
|
|
||||||
@@ -1205,6 +1249,34 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
|
||||||
|
*
|
||||||
|
* @p: the CS parser with the IBs
|
||||||
|
* @ib_idx: which IB to patch
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
|
||||||
|
uint32_t ib_idx)
|
||||||
|
{
|
||||||
|
struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
|
||||||
|
unsigned i;
|
||||||
|
|
||||||
|
/* No patching necessary for the first instance */
|
||||||
|
if (!p->ring->me)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
for (i = 0; i < ib->length_dw; i += 2) {
|
||||||
|
uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
|
||||||
|
|
||||||
|
reg -= p->adev->reg_offset[UVD_HWIP][0][1];
|
||||||
|
reg += p->adev->reg_offset[UVD_HWIP][1][1];
|
||||||
|
|
||||||
|
amdgpu_set_ib_value(p, ib_idx, i, reg);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* uvd_v7_0_ring_emit_ib - execute indirect buffer
|
* uvd_v7_0_ring_emit_ib - execute indirect buffer
|
||||||
*
|
*
|
||||||
@@ -1697,6 +1769,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
|
|||||||
.get_rptr = uvd_v7_0_ring_get_rptr,
|
.get_rptr = uvd_v7_0_ring_get_rptr,
|
||||||
.get_wptr = uvd_v7_0_ring_get_wptr,
|
.get_wptr = uvd_v7_0_ring_get_wptr,
|
||||||
.set_wptr = uvd_v7_0_ring_set_wptr,
|
.set_wptr = uvd_v7_0_ring_set_wptr,
|
||||||
|
.patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
|
||||||
.emit_frame_size =
|
.emit_frame_size =
|
||||||
6 + /* hdp invalidate */
|
6 + /* hdp invalidate */
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
|
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
|
||||||
@@ -1756,6 +1829,8 @@ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
|
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
|
adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
|
||||||
adev->uvd.inst[i].ring.me = i;
|
adev->uvd.inst[i].ring.me = i;
|
||||||
DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
|
DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
|
||||||
@@ -1767,6 +1842,8 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
|
|||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
|
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << j))
|
||||||
|
continue;
|
||||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||||
adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
|
adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
|
||||||
adev->uvd.inst[j].ring_enc[i].me = j;
|
adev->uvd.inst[j].ring_enc[i].me = j;
|
||||||
@@ -1786,6 +1863,8 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
|
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
|
||||||
|
if (adev->uvd.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
|
adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
|
||||||
adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
|
adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -60,7 +60,14 @@
|
|||||||
|
|
||||||
enum {
|
enum {
|
||||||
LINK_RATE_REF_FREQ_IN_MHZ = 27,
|
LINK_RATE_REF_FREQ_IN_MHZ = 27,
|
||||||
PEAK_FACTOR_X1000 = 1006
|
PEAK_FACTOR_X1000 = 1006,
|
||||||
|
/*
|
||||||
|
* Some receivers fail to train on first try and are good
|
||||||
|
* on subsequent tries. 2 retries should be plenty. If we
|
||||||
|
* don't have a successful training then we don't expect to
|
||||||
|
* ever get one.
|
||||||
|
*/
|
||||||
|
LINK_TRAINING_MAX_VERIFY_RETRY = 2
|
||||||
};
|
};
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
@@ -760,7 +767,16 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* deal with non-mst cases */
|
/* deal with non-mst cases */
|
||||||
dp_verify_link_cap(link, &link->reported_link_cap);
|
for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
|
||||||
|
int fail_count = 0;
|
||||||
|
|
||||||
|
dp_verify_link_cap(link,
|
||||||
|
&link->reported_link_cap,
|
||||||
|
&fail_count);
|
||||||
|
|
||||||
|
if (fail_count == 0)
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* HDMI-DVI Dongle */
|
/* HDMI-DVI Dongle */
|
||||||
|
|||||||
@@ -33,7 +33,6 @@
|
|||||||
#include "include/vector.h"
|
#include "include/vector.h"
|
||||||
#include "core_types.h"
|
#include "core_types.h"
|
||||||
#include "dc_link_ddc.h"
|
#include "dc_link_ddc.h"
|
||||||
#include "engine.h"
|
|
||||||
#include "aux_engine.h"
|
#include "aux_engine.h"
|
||||||
|
|
||||||
#define AUX_POWER_UP_WA_DELAY 500
|
#define AUX_POWER_UP_WA_DELAY 500
|
||||||
@@ -640,7 +639,6 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
|
|||||||
enum i2caux_transaction_action action)
|
enum i2caux_transaction_action action)
|
||||||
{
|
{
|
||||||
struct ddc *ddc_pin = ddc->ddc_pin;
|
struct ddc *ddc_pin = ddc->ddc_pin;
|
||||||
struct engine *engine;
|
|
||||||
struct aux_engine *aux_engine;
|
struct aux_engine *aux_engine;
|
||||||
enum aux_channel_operation_result operation_result;
|
enum aux_channel_operation_result operation_result;
|
||||||
struct aux_request_transaction_data aux_req;
|
struct aux_request_transaction_data aux_req;
|
||||||
@@ -652,8 +650,8 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
|
|||||||
memset(&aux_req, 0, sizeof(aux_req));
|
memset(&aux_req, 0, sizeof(aux_req));
|
||||||
memset(&aux_rep, 0, sizeof(aux_rep));
|
memset(&aux_rep, 0, sizeof(aux_rep));
|
||||||
|
|
||||||
engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
|
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
|
||||||
aux_engine = engine->funcs->acquire(engine, ddc_pin);
|
aux_engine->funcs->acquire(aux_engine, ddc_pin);
|
||||||
|
|
||||||
aux_req.type = type;
|
aux_req.type = type;
|
||||||
aux_req.action = action;
|
aux_req.action = action;
|
||||||
@@ -685,7 +683,7 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
|
|||||||
res = -1;
|
res = -1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
aux_engine->base.funcs->release_engine(&aux_engine->base);
|
aux_engine->funcs->release_engine(aux_engine);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1088,7 +1088,8 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
|
|||||||
|
|
||||||
bool dp_verify_link_cap(
|
bool dp_verify_link_cap(
|
||||||
struct dc_link *link,
|
struct dc_link *link,
|
||||||
struct dc_link_settings *known_limit_link_setting)
|
struct dc_link_settings *known_limit_link_setting,
|
||||||
|
int *fail_count)
|
||||||
{
|
{
|
||||||
struct dc_link_settings max_link_cap = {0};
|
struct dc_link_settings max_link_cap = {0};
|
||||||
struct dc_link_settings cur_link_setting = {0};
|
struct dc_link_settings cur_link_setting = {0};
|
||||||
@@ -1160,6 +1161,8 @@ bool dp_verify_link_cap(
|
|||||||
skip_video_pattern);
|
skip_video_pattern);
|
||||||
if (status == LINK_TRAINING_SUCCESS)
|
if (status == LINK_TRAINING_SUCCESS)
|
||||||
success = true;
|
success = true;
|
||||||
|
else
|
||||||
|
(*fail_count)++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (success)
|
if (success)
|
||||||
|
|||||||
@@ -192,7 +192,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
|
|||||||
kref_put(&tf->refcount, dc_transfer_func_free);
|
kref_put(&tf->refcount, dc_transfer_func_free);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct dc_transfer_func *dc_create_transfer_func()
|
struct dc_transfer_func *dc_create_transfer_func(void)
|
||||||
{
|
{
|
||||||
struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
|
struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
|
||||||
|
|
||||||
|
|||||||
@@ -38,7 +38,7 @@
|
|||||||
#include "inc/compressor.h"
|
#include "inc/compressor.h"
|
||||||
#include "dml/display_mode_lib.h"
|
#include "dml/display_mode_lib.h"
|
||||||
|
|
||||||
#define DC_VER "3.1.58"
|
#define DC_VER "3.1.59"
|
||||||
|
|
||||||
#define MAX_SURFACES 3
|
#define MAX_SURFACES 3
|
||||||
#define MAX_STREAMS 6
|
#define MAX_STREAMS 6
|
||||||
|
|||||||
@@ -28,12 +28,12 @@
|
|||||||
#include "dce/dce_11_0_sh_mask.h"
|
#include "dce/dce_11_0_sh_mask.h"
|
||||||
|
|
||||||
#define CTX \
|
#define CTX \
|
||||||
aux110->base.base.ctx
|
aux110->base.ctx
|
||||||
#define REG(reg_name)\
|
#define REG(reg_name)\
|
||||||
(aux110->regs->reg_name)
|
(aux110->regs->reg_name)
|
||||||
|
|
||||||
#define DC_LOGGER \
|
#define DC_LOGGER \
|
||||||
engine->base.ctx->logger
|
engine->ctx->logger
|
||||||
|
|
||||||
#include "reg_helper.h"
|
#include "reg_helper.h"
|
||||||
|
|
||||||
@@ -51,9 +51,9 @@ enum {
|
|||||||
AUX_DEFER_RETRY_COUNTER = 6
|
AUX_DEFER_RETRY_COUNTER = 6
|
||||||
};
|
};
|
||||||
static void release_engine(
|
static void release_engine(
|
||||||
struct engine *engine)
|
struct aux_engine *engine)
|
||||||
{
|
{
|
||||||
struct aux_engine_dce110 *aux110 = FROM_ENGINE(engine);
|
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
|
||||||
|
|
||||||
dal_ddc_close(engine->ddc);
|
dal_ddc_close(engine->ddc);
|
||||||
|
|
||||||
@@ -827,22 +827,21 @@ static bool end_of_transaction_command(
|
|||||||
|
|
||||||
/* according Syed, it does not need now DoDummyMOT */
|
/* according Syed, it does not need now DoDummyMOT */
|
||||||
}
|
}
|
||||||
bool submit_request(
|
static bool submit_request(
|
||||||
struct engine *engine,
|
struct aux_engine *engine,
|
||||||
struct i2caux_transaction_request *request,
|
struct i2caux_transaction_request *request,
|
||||||
bool middle_of_transaction)
|
bool middle_of_transaction)
|
||||||
{
|
{
|
||||||
struct aux_engine *aux_engine = FROM_AUX_ENGINE_ENGINE(engine);
|
|
||||||
|
|
||||||
bool result;
|
bool result;
|
||||||
bool mot_used = true;
|
bool mot_used = true;
|
||||||
|
|
||||||
switch (request->operation) {
|
switch (request->operation) {
|
||||||
case I2CAUX_TRANSACTION_READ:
|
case I2CAUX_TRANSACTION_READ:
|
||||||
result = read_command(aux_engine, request, mot_used);
|
result = read_command(engine, request, mot_used);
|
||||||
break;
|
break;
|
||||||
case I2CAUX_TRANSACTION_WRITE:
|
case I2CAUX_TRANSACTION_WRITE:
|
||||||
result = write_command(aux_engine, request, mot_used);
|
result = write_command(engine, request, mot_used);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result = false;
|
result = false;
|
||||||
@@ -854,45 +853,45 @@ bool submit_request(
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
if (!middle_of_transaction || !result)
|
if (!middle_of_transaction || !result)
|
||||||
end_of_transaction_command(aux_engine, request);
|
end_of_transaction_command(engine, request);
|
||||||
|
|
||||||
/* mask AUX interrupt */
|
/* mask AUX interrupt */
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
enum i2caux_engine_type get_engine_type(
|
enum i2caux_engine_type get_engine_type(
|
||||||
const struct engine *engine)
|
const struct aux_engine *engine)
|
||||||
{
|
{
|
||||||
return I2CAUX_ENGINE_TYPE_AUX;
|
return I2CAUX_ENGINE_TYPE_AUX;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct aux_engine *acquire(
|
static bool acquire(
|
||||||
struct engine *engine,
|
struct aux_engine *engine,
|
||||||
struct ddc *ddc)
|
struct ddc *ddc)
|
||||||
{
|
{
|
||||||
struct aux_engine *aux_engine = FROM_AUX_ENGINE_ENGINE(engine);
|
|
||||||
enum gpio_result result;
|
enum gpio_result result;
|
||||||
|
|
||||||
if (aux_engine->funcs->is_engine_available) {
|
if (engine->funcs->is_engine_available) {
|
||||||
/*check whether SW could use the engine*/
|
/*check whether SW could use the engine*/
|
||||||
if (!aux_engine->funcs->is_engine_available(aux_engine))
|
if (!engine->funcs->is_engine_available(engine))
|
||||||
return NULL;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
|
result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
|
||||||
GPIO_DDC_CONFIG_TYPE_MODE_AUX);
|
GPIO_DDC_CONFIG_TYPE_MODE_AUX);
|
||||||
|
|
||||||
if (result != GPIO_RESULT_OK)
|
if (result != GPIO_RESULT_OK)
|
||||||
return NULL;
|
return false;
|
||||||
|
|
||||||
if (!aux_engine->funcs->acquire_engine(aux_engine)) {
|
if (!engine->funcs->acquire_engine(engine)) {
|
||||||
dal_ddc_close(ddc);
|
dal_ddc_close(ddc);
|
||||||
return NULL;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
engine->ddc = ddc;
|
engine->ddc = ddc;
|
||||||
|
|
||||||
return aux_engine;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct aux_engine_funcs aux_engine_funcs = {
|
static const struct aux_engine_funcs aux_engine_funcs = {
|
||||||
@@ -902,9 +901,6 @@ static const struct aux_engine_funcs aux_engine_funcs = {
|
|||||||
.read_channel_reply = read_channel_reply,
|
.read_channel_reply = read_channel_reply,
|
||||||
.get_channel_status = get_channel_status,
|
.get_channel_status = get_channel_status,
|
||||||
.is_engine_available = is_engine_available,
|
.is_engine_available = is_engine_available,
|
||||||
};
|
|
||||||
|
|
||||||
static const struct engine_funcs engine_funcs = {
|
|
||||||
.release_engine = release_engine,
|
.release_engine = release_engine,
|
||||||
.destroy_engine = dce110_engine_destroy,
|
.destroy_engine = dce110_engine_destroy,
|
||||||
.submit_request = submit_request,
|
.submit_request = submit_request,
|
||||||
@@ -912,10 +908,10 @@ static const struct engine_funcs engine_funcs = {
|
|||||||
.acquire = acquire,
|
.acquire = acquire,
|
||||||
};
|
};
|
||||||
|
|
||||||
void dce110_engine_destroy(struct engine **engine)
|
void dce110_engine_destroy(struct aux_engine **engine)
|
||||||
{
|
{
|
||||||
|
|
||||||
struct aux_engine_dce110 *engine110 = FROM_ENGINE(*engine);
|
struct aux_engine_dce110 *engine110 = FROM_AUX_ENGINE(*engine);
|
||||||
|
|
||||||
kfree(engine110);
|
kfree(engine110);
|
||||||
*engine = NULL;
|
*engine = NULL;
|
||||||
@@ -927,13 +923,12 @@ struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_eng
|
|||||||
uint32_t timeout_period,
|
uint32_t timeout_period,
|
||||||
const struct dce110_aux_registers *regs)
|
const struct dce110_aux_registers *regs)
|
||||||
{
|
{
|
||||||
aux_engine110->base.base.ddc = NULL;
|
aux_engine110->base.ddc = NULL;
|
||||||
aux_engine110->base.base.ctx = ctx;
|
aux_engine110->base.ctx = ctx;
|
||||||
aux_engine110->base.delay = 0;
|
aux_engine110->base.delay = 0;
|
||||||
aux_engine110->base.max_defer_write_retry = 0;
|
aux_engine110->base.max_defer_write_retry = 0;
|
||||||
aux_engine110->base.base.funcs = &engine_funcs;
|
|
||||||
aux_engine110->base.funcs = &aux_engine_funcs;
|
aux_engine110->base.funcs = &aux_engine_funcs;
|
||||||
aux_engine110->base.base.inst = inst;
|
aux_engine110->base.inst = inst;
|
||||||
aux_engine110->timeout_period = timeout_period;
|
aux_engine110->timeout_period = timeout_period;
|
||||||
aux_engine110->regs = regs;
|
aux_engine110->regs = regs;
|
||||||
|
|
||||||
|
|||||||
@@ -103,9 +103,9 @@ struct aux_engine *dce110_aux_engine_construct(
|
|||||||
uint32_t timeout_period,
|
uint32_t timeout_period,
|
||||||
const struct dce110_aux_registers *regs);
|
const struct dce110_aux_registers *regs);
|
||||||
|
|
||||||
void dce110_engine_destroy(struct engine **engine);
|
void dce110_engine_destroy(struct aux_engine **engine);
|
||||||
|
|
||||||
bool dce110_aux_engine_acquire(
|
bool dce110_aux_engine_acquire(
|
||||||
struct engine *aux_engine,
|
struct aux_engine *aux_engine,
|
||||||
struct ddc *ddc);
|
struct ddc *ddc);
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -150,7 +150,7 @@ static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dce_dmcu_setup_psr(struct dmcu *dmcu,
|
static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
|
||||||
struct dc_link *link,
|
struct dc_link *link,
|
||||||
struct psr_context *psr_context)
|
struct psr_context *psr_context)
|
||||||
{
|
{
|
||||||
@@ -261,6 +261,8 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
|
|||||||
|
|
||||||
/* notifyDMCUMsg */
|
/* notifyDMCUMsg */
|
||||||
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
|
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
|
static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
|
||||||
@@ -558,11 +560,12 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* assert if max retry hit */
|
/* assert if max retry hit */
|
||||||
ASSERT(retryCount <= 1000);
|
if (retryCount >= 1000)
|
||||||
|
ASSERT(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
|
static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
|
||||||
struct dc_link *link,
|
struct dc_link *link,
|
||||||
struct psr_context *psr_context)
|
struct psr_context *psr_context)
|
||||||
{
|
{
|
||||||
@@ -577,7 +580,7 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
|
|||||||
|
|
||||||
/* If microcontroller is not running, do nothing */
|
/* If microcontroller is not running, do nothing */
|
||||||
if (dmcu->dmcu_state != DMCU_RUNNING)
|
if (dmcu->dmcu_state != DMCU_RUNNING)
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
|
link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
|
||||||
psr_context->psrExitLinkTrainingRequired);
|
psr_context->psrExitLinkTrainingRequired);
|
||||||
@@ -677,6 +680,11 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
|
|||||||
|
|
||||||
/* notifyDMCUMsg */
|
/* notifyDMCUMsg */
|
||||||
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
|
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
|
||||||
|
|
||||||
|
/* waitDMCUReadyForCmd */
|
||||||
|
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dcn10_psr_wait_loop(
|
static void dcn10_psr_wait_loop(
|
||||||
|
|||||||
@@ -586,7 +586,7 @@ struct output_pixel_processor *dce100_opp_create(
|
|||||||
return &opp->base;
|
return &opp->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct engine *dce100_aux_engine_create(
|
struct aux_engine *dce100_aux_engine_create(
|
||||||
struct dc_context *ctx,
|
struct dc_context *ctx,
|
||||||
uint32_t inst)
|
uint32_t inst)
|
||||||
{
|
{
|
||||||
@@ -600,7 +600,7 @@ struct engine *dce100_aux_engine_create(
|
|||||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||||
&aux_engine_regs[inst]);
|
&aux_engine_regs[inst]);
|
||||||
|
|
||||||
return &aux_engine->base.base;
|
return &aux_engine->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct clock_source *dce100_clock_source_create(
|
struct clock_source *dce100_clock_source_create(
|
||||||
|
|||||||
@@ -2552,7 +2552,7 @@ static void pplib_apply_display_requirements(
|
|||||||
dc->prev_display_config = *pp_display_cfg;
|
dc->prev_display_config = *pp_display_cfg;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dce110_set_bandwidth(
|
void dce110_set_bandwidth(
|
||||||
struct dc *dc,
|
struct dc *dc,
|
||||||
struct dc_state *context,
|
struct dc_state *context,
|
||||||
bool decrease_allowed)
|
bool decrease_allowed)
|
||||||
|
|||||||
@@ -68,6 +68,11 @@ void dce110_fill_display_configs(
|
|||||||
const struct dc_state *context,
|
const struct dc_state *context,
|
||||||
struct dm_pp_display_configuration *pp_display_cfg);
|
struct dm_pp_display_configuration *pp_display_cfg);
|
||||||
|
|
||||||
|
void dce110_set_bandwidth(
|
||||||
|
struct dc *dc,
|
||||||
|
struct dc_state *context,
|
||||||
|
bool decrease_allowed);
|
||||||
|
|
||||||
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
|
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
|
||||||
|
|
||||||
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
|
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
|
||||||
|
|||||||
@@ -604,7 +604,7 @@ static struct output_pixel_processor *dce110_opp_create(
|
|||||||
return &opp->base;
|
return &opp->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct engine *dce110_aux_engine_create(
|
struct aux_engine *dce110_aux_engine_create(
|
||||||
struct dc_context *ctx,
|
struct dc_context *ctx,
|
||||||
uint32_t inst)
|
uint32_t inst)
|
||||||
{
|
{
|
||||||
@@ -618,7 +618,7 @@ struct engine *dce110_aux_engine_create(
|
|||||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||||
&aux_engine_regs[inst]);
|
&aux_engine_regs[inst]);
|
||||||
|
|
||||||
return &aux_engine->base.base;
|
return &aux_engine->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct clock_source *dce110_clock_source_create(
|
struct clock_source *dce110_clock_source_create(
|
||||||
|
|||||||
@@ -604,7 +604,7 @@ struct output_pixel_processor *dce112_opp_create(
|
|||||||
return &opp->base;
|
return &opp->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct engine *dce112_aux_engine_create(
|
struct aux_engine *dce112_aux_engine_create(
|
||||||
struct dc_context *ctx,
|
struct dc_context *ctx,
|
||||||
uint32_t inst)
|
uint32_t inst)
|
||||||
{
|
{
|
||||||
@@ -618,7 +618,7 @@ struct engine *dce112_aux_engine_create(
|
|||||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||||
&aux_engine_regs[inst]);
|
&aux_engine_regs[inst]);
|
||||||
|
|
||||||
return &aux_engine->base.base;
|
return &aux_engine->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct clock_source *dce112_clock_source_create(
|
struct clock_source *dce112_clock_source_create(
|
||||||
|
|||||||
@@ -244,7 +244,16 @@ static void dce120_update_dchub(
|
|||||||
dh_data->dchub_info_valid = false;
|
dh_data->dchub_info_valid = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dce120_set_bandwidth(
|
||||||
|
struct dc *dc,
|
||||||
|
struct dc_state *context,
|
||||||
|
bool decrease_allowed)
|
||||||
|
{
|
||||||
|
if (context->stream_count <= 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
dce110_set_bandwidth(dc, context, decrease_allowed);
|
||||||
|
}
|
||||||
|
|
||||||
void dce120_hw_sequencer_construct(struct dc *dc)
|
void dce120_hw_sequencer_construct(struct dc *dc)
|
||||||
{
|
{
|
||||||
@@ -254,5 +263,6 @@ void dce120_hw_sequencer_construct(struct dc *dc)
|
|||||||
dce110_hw_sequencer_construct(dc);
|
dce110_hw_sequencer_construct(dc);
|
||||||
dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
|
dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
|
||||||
dc->hwss.update_dchub = dce120_update_dchub;
|
dc->hwss.update_dchub = dce120_update_dchub;
|
||||||
|
dc->hwss.set_bandwidth = dce120_set_bandwidth;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -376,7 +376,7 @@ struct output_pixel_processor *dce120_opp_create(
|
|||||||
ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
|
ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
|
||||||
return &opp->base;
|
return &opp->base;
|
||||||
}
|
}
|
||||||
struct engine *dce120_aux_engine_create(
|
struct aux_engine *dce120_aux_engine_create(
|
||||||
struct dc_context *ctx,
|
struct dc_context *ctx,
|
||||||
uint32_t inst)
|
uint32_t inst)
|
||||||
{
|
{
|
||||||
@@ -390,7 +390,7 @@ struct engine *dce120_aux_engine_create(
|
|||||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||||
&aux_engine_regs[inst]);
|
&aux_engine_regs[inst]);
|
||||||
|
|
||||||
return &aux_engine->base.base;
|
return &aux_engine->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct bios_registers bios_regs = {
|
static const struct bios_registers bios_regs = {
|
||||||
|
|||||||
@@ -464,7 +464,7 @@ static struct output_pixel_processor *dce80_opp_create(
|
|||||||
return &opp->base;
|
return &opp->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct engine *dce80_aux_engine_create(
|
struct aux_engine *dce80_aux_engine_create(
|
||||||
struct dc_context *ctx,
|
struct dc_context *ctx,
|
||||||
uint32_t inst)
|
uint32_t inst)
|
||||||
{
|
{
|
||||||
@@ -478,7 +478,7 @@ struct engine *dce80_aux_engine_create(
|
|||||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||||
&aux_engine_regs[inst]);
|
&aux_engine_regs[inst]);
|
||||||
|
|
||||||
return &aux_engine->base.base;
|
return &aux_engine->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct stream_encoder *dce80_stream_encoder_create(
|
static struct stream_encoder *dce80_stream_encoder_create(
|
||||||
|
|||||||
@@ -594,7 +594,7 @@ static struct output_pixel_processor *dcn10_opp_create(
|
|||||||
return &opp->base;
|
return &opp->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct engine *dcn10_aux_engine_create(
|
struct aux_engine *dcn10_aux_engine_create(
|
||||||
struct dc_context *ctx,
|
struct dc_context *ctx,
|
||||||
uint32_t inst)
|
uint32_t inst)
|
||||||
{
|
{
|
||||||
@@ -608,7 +608,7 @@ struct engine *dcn10_aux_engine_create(
|
|||||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||||
&aux_engine_regs[inst]);
|
&aux_engine_regs[inst]);
|
||||||
|
|
||||||
return &aux_engine->base.base;
|
return &aux_engine->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
|
static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
|
||||||
|
|||||||
@@ -138,7 +138,7 @@ struct resource_pool {
|
|||||||
struct output_pixel_processor *opps[MAX_PIPES];
|
struct output_pixel_processor *opps[MAX_PIPES];
|
||||||
struct timing_generator *timing_generators[MAX_PIPES];
|
struct timing_generator *timing_generators[MAX_PIPES];
|
||||||
struct stream_encoder *stream_enc[MAX_PIPES * 2];
|
struct stream_encoder *stream_enc[MAX_PIPES * 2];
|
||||||
struct engine *engines[MAX_PIPES];
|
struct aux_engine *engines[MAX_PIPES];
|
||||||
struct hubbub *hubbub;
|
struct hubbub *hubbub;
|
||||||
struct mpc *mpc;
|
struct mpc *mpc;
|
||||||
struct pp_smu_funcs_rv *pp_smu;
|
struct pp_smu_funcs_rv *pp_smu;
|
||||||
|
|||||||
@@ -35,7 +35,8 @@ struct dc_link_settings;
|
|||||||
|
|
||||||
bool dp_verify_link_cap(
|
bool dp_verify_link_cap(
|
||||||
struct dc_link *link,
|
struct dc_link *link,
|
||||||
struct dc_link_settings *known_limit_link_setting);
|
struct dc_link_settings *known_limit_link_setting,
|
||||||
|
int *fail_count);
|
||||||
|
|
||||||
bool dp_validate_mode_timing(
|
bool dp_validate_mode_timing(
|
||||||
struct dc_link *link,
|
struct dc_link *link,
|
||||||
|
|||||||
@@ -26,46 +26,72 @@
|
|||||||
#ifndef __DAL_AUX_ENGINE_H__
|
#ifndef __DAL_AUX_ENGINE_H__
|
||||||
#define __DAL_AUX_ENGINE_H__
|
#define __DAL_AUX_ENGINE_H__
|
||||||
|
|
||||||
#include "engine.h"
|
#include "dc_ddc_types.h"
|
||||||
#include "include/i2caux_interface.h"
|
#include "include/i2caux_interface.h"
|
||||||
|
|
||||||
struct aux_engine;
|
enum i2caux_transaction_operation {
|
||||||
union aux_config;
|
I2CAUX_TRANSACTION_READ,
|
||||||
struct aux_engine_funcs {
|
I2CAUX_TRANSACTION_WRITE
|
||||||
void (*destroy)(
|
|
||||||
struct aux_engine **ptr);
|
|
||||||
bool (*acquire_engine)(
|
|
||||||
struct aux_engine *engine);
|
|
||||||
void (*configure)(
|
|
||||||
struct aux_engine *engine,
|
|
||||||
union aux_config cfg);
|
|
||||||
void (*submit_channel_request)(
|
|
||||||
struct aux_engine *engine,
|
|
||||||
struct aux_request_transaction_data *request);
|
|
||||||
void (*process_channel_reply)(
|
|
||||||
struct aux_engine *engine,
|
|
||||||
struct aux_reply_transaction_data *reply);
|
|
||||||
int (*read_channel_reply)(
|
|
||||||
struct aux_engine *engine,
|
|
||||||
uint32_t size,
|
|
||||||
uint8_t *buffer,
|
|
||||||
uint8_t *reply_result,
|
|
||||||
uint32_t *sw_status);
|
|
||||||
enum aux_channel_operation_result (*get_channel_status)(
|
|
||||||
struct aux_engine *engine,
|
|
||||||
uint8_t *returned_bytes);
|
|
||||||
bool (*is_engine_available)(struct aux_engine *engine);
|
|
||||||
};
|
};
|
||||||
struct engine;
|
|
||||||
|
enum i2caux_transaction_address_space {
|
||||||
|
I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
|
||||||
|
I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
|
||||||
|
};
|
||||||
|
|
||||||
|
struct i2caux_transaction_payload {
|
||||||
|
enum i2caux_transaction_address_space address_space;
|
||||||
|
uint32_t address;
|
||||||
|
uint32_t length;
|
||||||
|
uint8_t *data;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum i2caux_transaction_status {
|
||||||
|
I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
|
||||||
|
I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
|
||||||
|
I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
|
||||||
|
I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
|
||||||
|
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
|
||||||
|
I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
|
||||||
|
I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
|
||||||
|
I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
|
||||||
|
I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
|
||||||
|
I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
|
||||||
|
I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
|
||||||
|
};
|
||||||
|
|
||||||
|
struct i2caux_transaction_request {
|
||||||
|
enum i2caux_transaction_operation operation;
|
||||||
|
struct i2caux_transaction_payload payload;
|
||||||
|
enum i2caux_transaction_status status;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum i2caux_engine_type {
|
||||||
|
I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
|
||||||
|
I2CAUX_ENGINE_TYPE_AUX,
|
||||||
|
I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
|
||||||
|
I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
|
||||||
|
I2CAUX_ENGINE_TYPE_I2C_SW
|
||||||
|
};
|
||||||
|
|
||||||
|
enum i2c_default_speed {
|
||||||
|
I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
|
||||||
|
I2CAUX_DEFAULT_I2C_SW_SPEED = 50
|
||||||
|
};
|
||||||
|
|
||||||
|
union aux_config;
|
||||||
|
|
||||||
struct aux_engine {
|
struct aux_engine {
|
||||||
struct engine base;
|
uint32_t inst;
|
||||||
|
struct ddc *ddc;
|
||||||
|
struct dc_context *ctx;
|
||||||
const struct aux_engine_funcs *funcs;
|
const struct aux_engine_funcs *funcs;
|
||||||
/* following values are expressed in milliseconds */
|
/* following values are expressed in milliseconds */
|
||||||
uint32_t delay;
|
uint32_t delay;
|
||||||
uint32_t max_defer_write_retry;
|
uint32_t max_defer_write_retry;
|
||||||
|
|
||||||
bool acquire_reset;
|
bool acquire_reset;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct read_command_context {
|
struct read_command_context {
|
||||||
uint8_t *buffer;
|
uint8_t *buffer;
|
||||||
uint32_t current_read_length;
|
uint32_t current_read_length;
|
||||||
@@ -86,6 +112,7 @@ struct read_command_context {
|
|||||||
bool transaction_complete;
|
bool transaction_complete;
|
||||||
bool operation_succeeded;
|
bool operation_succeeded;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct write_command_context {
|
struct write_command_context {
|
||||||
bool mot;
|
bool mot;
|
||||||
|
|
||||||
@@ -110,4 +137,44 @@ struct write_command_context {
|
|||||||
bool transaction_complete;
|
bool transaction_complete;
|
||||||
bool operation_succeeded;
|
bool operation_succeeded;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct aux_engine_funcs {
|
||||||
|
void (*destroy)(
|
||||||
|
struct aux_engine **ptr);
|
||||||
|
bool (*acquire_engine)(
|
||||||
|
struct aux_engine *engine);
|
||||||
|
void (*configure)(
|
||||||
|
struct aux_engine *engine,
|
||||||
|
union aux_config cfg);
|
||||||
|
void (*submit_channel_request)(
|
||||||
|
struct aux_engine *engine,
|
||||||
|
struct aux_request_transaction_data *request);
|
||||||
|
void (*process_channel_reply)(
|
||||||
|
struct aux_engine *engine,
|
||||||
|
struct aux_reply_transaction_data *reply);
|
||||||
|
int (*read_channel_reply)(
|
||||||
|
struct aux_engine *engine,
|
||||||
|
uint32_t size,
|
||||||
|
uint8_t *buffer,
|
||||||
|
uint8_t *reply_result,
|
||||||
|
uint32_t *sw_status);
|
||||||
|
enum aux_channel_operation_result (*get_channel_status)(
|
||||||
|
struct aux_engine *engine,
|
||||||
|
uint8_t *returned_bytes);
|
||||||
|
bool (*is_engine_available)(struct aux_engine *engine);
|
||||||
|
enum i2caux_engine_type (*get_engine_type)(
|
||||||
|
const struct aux_engine *engine);
|
||||||
|
bool (*acquire)(
|
||||||
|
struct aux_engine *engine,
|
||||||
|
struct ddc *ddc);
|
||||||
|
bool (*submit_request)(
|
||||||
|
struct aux_engine *engine,
|
||||||
|
struct i2caux_transaction_request *request,
|
||||||
|
bool middle_of_transaction);
|
||||||
|
void (*release_engine)(
|
||||||
|
struct aux_engine *engine);
|
||||||
|
void (*destroy_engine)(
|
||||||
|
struct aux_engine **engine);
|
||||||
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ struct dmcu_funcs {
|
|||||||
const char *src,
|
const char *src,
|
||||||
unsigned int bytes);
|
unsigned int bytes);
|
||||||
void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
|
void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
|
||||||
void (*setup_psr)(struct dmcu *dmcu,
|
bool (*setup_psr)(struct dmcu *dmcu,
|
||||||
struct dc_link *link,
|
struct dc_link *link,
|
||||||
struct psr_context *psr_context);
|
struct psr_context *psr_context);
|
||||||
void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
|
void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
|
||||||
|
|||||||
@@ -1,106 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2012-15 Advanced Micro Devices, Inc.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
||||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
* Authors: AMD
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __DAL_ENGINE_H__
|
|
||||||
#define __DAL_ENGINE_H__
|
|
||||||
|
|
||||||
#include "dc_ddc_types.h"
|
|
||||||
|
|
||||||
enum i2caux_transaction_operation {
|
|
||||||
I2CAUX_TRANSACTION_READ,
|
|
||||||
I2CAUX_TRANSACTION_WRITE
|
|
||||||
};
|
|
||||||
|
|
||||||
enum i2caux_transaction_address_space {
|
|
||||||
I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
|
|
||||||
I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
|
|
||||||
};
|
|
||||||
|
|
||||||
struct i2caux_transaction_payload {
|
|
||||||
enum i2caux_transaction_address_space address_space;
|
|
||||||
uint32_t address;
|
|
||||||
uint32_t length;
|
|
||||||
uint8_t *data;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum i2caux_transaction_status {
|
|
||||||
I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
|
|
||||||
I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
|
|
||||||
I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
|
|
||||||
I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
|
|
||||||
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
|
|
||||||
I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
|
|
||||||
I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
|
|
||||||
I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
|
|
||||||
I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
|
|
||||||
I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
|
|
||||||
I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
|
|
||||||
};
|
|
||||||
|
|
||||||
struct i2caux_transaction_request {
|
|
||||||
enum i2caux_transaction_operation operation;
|
|
||||||
struct i2caux_transaction_payload payload;
|
|
||||||
enum i2caux_transaction_status status;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum i2caux_engine_type {
|
|
||||||
I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
|
|
||||||
I2CAUX_ENGINE_TYPE_AUX,
|
|
||||||
I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
|
|
||||||
I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
|
|
||||||
I2CAUX_ENGINE_TYPE_I2C_SW
|
|
||||||
};
|
|
||||||
|
|
||||||
enum i2c_default_speed {
|
|
||||||
I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
|
|
||||||
I2CAUX_DEFAULT_I2C_SW_SPEED = 50
|
|
||||||
};
|
|
||||||
|
|
||||||
struct engine;
|
|
||||||
|
|
||||||
struct engine_funcs {
|
|
||||||
enum i2caux_engine_type (*get_engine_type)(
|
|
||||||
const struct engine *engine);
|
|
||||||
struct aux_engine* (*acquire)(
|
|
||||||
struct engine *engine,
|
|
||||||
struct ddc *ddc);
|
|
||||||
bool (*submit_request)(
|
|
||||||
struct engine *engine,
|
|
||||||
struct i2caux_transaction_request *request,
|
|
||||||
bool middle_of_transaction);
|
|
||||||
void (*release_engine)(
|
|
||||||
struct engine *engine);
|
|
||||||
void (*destroy_engine)(
|
|
||||||
struct engine **engine);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct engine {
|
|
||||||
const struct engine_funcs *funcs;
|
|
||||||
uint32_t inst;
|
|
||||||
struct ddc *ddc;
|
|
||||||
struct dc_context *ctx;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif
|
|
||||||
@@ -1074,7 +1074,7 @@ struct atom_integrated_system_info_v1_11
|
|||||||
uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def
|
uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def
|
||||||
uint16_t lvds_misc; // enum of atom_sys_info_lvds_misc_def
|
uint16_t lvds_misc; // enum of atom_sys_info_lvds_misc_def
|
||||||
uint16_t backlight_pwm_hz; // pwm frequency in hz
|
uint16_t backlight_pwm_hz; // pwm frequency in hz
|
||||||
uint8_t memorytype; // enum of atom_sys_mem_type
|
uint8_t memorytype; // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
|
||||||
uint8_t umachannelnumber; // number of memory channels
|
uint8_t umachannelnumber; // number of memory channels
|
||||||
uint8_t pwr_on_digon_to_de; /* all pwr sequence numbers below are in uint of 4ms */
|
uint8_t pwr_on_digon_to_de; /* all pwr sequence numbers below are in uint of 4ms */
|
||||||
uint8_t pwr_on_de_to_vary_bl;
|
uint8_t pwr_on_de_to_vary_bl;
|
||||||
@@ -1084,18 +1084,25 @@ struct atom_integrated_system_info_v1_11
|
|||||||
uint8_t pwr_on_vary_bl_to_blon;
|
uint8_t pwr_on_vary_bl_to_blon;
|
||||||
uint8_t pwr_down_bloff_to_vary_bloff;
|
uint8_t pwr_down_bloff_to_vary_bloff;
|
||||||
uint8_t min_allowed_bl_level;
|
uint8_t min_allowed_bl_level;
|
||||||
|
uint8_t htc_hyst_limit;
|
||||||
|
uint8_t htc_tmp_limit;
|
||||||
|
uint8_t reserved1;
|
||||||
|
uint8_t reserved2;
|
||||||
struct atom_external_display_connection_info extdispconninfo;
|
struct atom_external_display_connection_info extdispconninfo;
|
||||||
struct atom_14nm_dpphy_dvihdmi_tuningset dvi_tuningset;
|
struct atom_14nm_dpphy_dvihdmi_tuningset dvi_tuningset;
|
||||||
struct atom_14nm_dpphy_dvihdmi_tuningset hdmi_tuningset;
|
struct atom_14nm_dpphy_dvihdmi_tuningset hdmi_tuningset;
|
||||||
struct atom_14nm_dpphy_dvihdmi_tuningset hdmi6g_tuningset;
|
struct atom_14nm_dpphy_dvihdmi_tuningset hdmi6g_tuningset;
|
||||||
struct atom_14nm_dpphy_dp_tuningset dp_tuningset;
|
struct atom_14nm_dpphy_dp_tuningset dp_tuningset; // rbr 1.62G dp tuning set
|
||||||
struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;
|
struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset; // HBR3 dp tuning set
|
||||||
struct atom_camera_data camera_info;
|
struct atom_camera_data camera_info;
|
||||||
struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0
|
struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0
|
||||||
struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1
|
struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1
|
||||||
struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2
|
struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2
|
||||||
struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3
|
struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3
|
||||||
uint32_t reserved[108];
|
struct atom_14nm_dpphy_dp_tuningset dp_hbr_tuningset; //hbr 2.7G dp tuning set
|
||||||
|
struct atom_14nm_dpphy_dp_tuningset dp_hbr2_tuningset; //hbr2 5.4G dp turnig set
|
||||||
|
struct atom_14nm_dpphy_dp_tuningset edp_tuningset; //edp tuning set
|
||||||
|
uint32_t reserved[66];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
|
|||||||
{ ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
|
{ ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
|
||||||
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
|
||||||
|
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
|
||||||
|
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
|
||||||
|
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
|
||||||
|
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
|
||||||
|
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
|
||||||
|
{ ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
|
||||||
|
{ ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
{ ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
|
||||||
|
{ ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||||
{ ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
|
{ ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
|
||||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
|
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
|
||||||
|
|||||||
@@ -244,6 +244,7 @@ static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* convert form 8bit vid to real voltage in mV*4 */
|
||||||
static uint32_t smu8_convert_8Bit_index_to_voltage(
|
static uint32_t smu8_convert_8Bit_index_to_voltage(
|
||||||
struct pp_hwmgr *hwmgr, uint16_t voltage)
|
struct pp_hwmgr *hwmgr, uint16_t voltage)
|
||||||
{
|
{
|
||||||
@@ -1702,13 +1703,13 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
|
|||||||
case AMDGPU_PP_SENSOR_VDDNB:
|
case AMDGPU_PP_SENSOR_VDDNB:
|
||||||
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
|
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
|
||||||
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
|
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
|
||||||
vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
|
vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
|
||||||
*((uint32_t *)value) = vddnb;
|
*((uint32_t *)value) = vddnb;
|
||||||
return 0;
|
return 0;
|
||||||
case AMDGPU_PP_SENSOR_VDDGFX:
|
case AMDGPU_PP_SENSOR_VDDGFX:
|
||||||
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
|
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
|
||||||
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
|
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
|
||||||
vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
|
vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
|
||||||
*((uint32_t *)value) = vddgfx;
|
*((uint32_t *)value) = vddgfx;
|
||||||
return 0;
|
return 0;
|
||||||
case AMDGPU_PP_SENSOR_UVD_VCLK:
|
case AMDGPU_PP_SENSOR_UVD_VCLK:
|
||||||
|
|||||||
@@ -1204,7 +1204,6 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
|
|||||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||||
SMIO_Pattern vol_level;
|
SMIO_Pattern vol_level;
|
||||||
uint32_t mvdd;
|
uint32_t mvdd;
|
||||||
uint16_t us_mvdd;
|
|
||||||
|
|
||||||
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
|
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
|
||||||
|
|
||||||
@@ -1255,16 +1254,11 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
|
|||||||
"in Clock Dependency Table",
|
"in Clock Dependency Table",
|
||||||
);
|
);
|
||||||
|
|
||||||
us_mvdd = 0;
|
if (!((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
|
||||||
if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
|
(data->mclk_dpm_key_disabled)))
|
||||||
(data->mclk_dpm_key_disabled))
|
polaris10_populate_mvdd_value(hwmgr,
|
||||||
us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
|
|
||||||
else {
|
|
||||||
if (!polaris10_populate_mvdd_value(hwmgr,
|
|
||||||
data->dpm_table.mclk_table.dpm_levels[0].value,
|
data->dpm_table.mclk_table.dpm_levels[0].value,
|
||||||
&vol_level))
|
&vol_level);
|
||||||
us_mvdd = vol_level.Voltage;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
|
if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
|
||||||
table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
|
table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
|
||||||
@@ -1517,7 +1511,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
|
|||||||
uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
|
uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
|
||||||
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
|
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
|
||||||
|
|
||||||
uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
|
uint8_t i, stretch_amount, volt_offset = 0;
|
||||||
struct phm_ppt_v1_information *table_info =
|
struct phm_ppt_v1_information *table_info =
|
||||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||||
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
|
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
|
||||||
@@ -1568,11 +1562,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
|
|||||||
|
|
||||||
smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
|
smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
|
||||||
/* Populate CKS Lookup Table */
|
/* Populate CKS Lookup Table */
|
||||||
if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
|
if (stretch_amount == 0 || stretch_amount > 5) {
|
||||||
stretch_amount2 = 0;
|
|
||||||
else if (stretch_amount == 3 || stretch_amount == 4)
|
|
||||||
stretch_amount2 = 1;
|
|
||||||
else {
|
|
||||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||||
PHM_PlatformCaps_ClockStretcher);
|
PHM_PlatformCaps_ClockStretcher);
|
||||||
PP_ASSERT_WITH_CODE(false,
|
PP_ASSERT_WITH_CODE(false,
|
||||||
|
|||||||
@@ -307,7 +307,7 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
|
|||||||
if (bo == NULL)
|
if (bo == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ttm_bo_reference(&bo->tbo);
|
ttm_bo_get(&bo->tbo);
|
||||||
return bo;
|
return bo;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -320,8 +320,7 @@ void radeon_bo_unref(struct radeon_bo **bo)
|
|||||||
return;
|
return;
|
||||||
rdev = (*bo)->rdev;
|
rdev = (*bo)->rdev;
|
||||||
tbo = &((*bo)->tbo);
|
tbo = &((*bo)->tbo);
|
||||||
ttm_bo_unref(&tbo);
|
ttm_bo_put(tbo);
|
||||||
if (tbo == NULL)
|
|
||||||
*bo = NULL;
|
*bo = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -20,7 +20,6 @@
|
|||||||
# OTHER DEALINGS IN THE SOFTWARE.
|
# OTHER DEALINGS IN THE SOFTWARE.
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
ccflags-y := -Iinclude/drm
|
|
||||||
gpu-sched-y := gpu_scheduler.o sched_fence.o
|
gpu-sched-y := gpu_scheduler.o sched_fence.o
|
||||||
|
|
||||||
obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
|
obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
|
||||||
|
|||||||
@@ -198,21 +198,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_sched_entity_init);
|
EXPORT_SYMBOL(drm_sched_entity_init);
|
||||||
|
|
||||||
/**
|
|
||||||
* drm_sched_entity_is_initialized - Query if entity is initialized
|
|
||||||
*
|
|
||||||
* @sched: Pointer to scheduler instance
|
|
||||||
* @entity: The pointer to a valid scheduler entity
|
|
||||||
*
|
|
||||||
* return true if entity is initialized, false otherwise
|
|
||||||
*/
|
|
||||||
static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
|
|
||||||
struct drm_sched_entity *entity)
|
|
||||||
{
|
|
||||||
return entity->rq != NULL &&
|
|
||||||
entity->rq->sched == sched;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_sched_entity_is_idle - Check if entity is idle
|
* drm_sched_entity_is_idle - Check if entity is idle
|
||||||
*
|
*
|
||||||
@@ -224,7 +209,8 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
|
|||||||
{
|
{
|
||||||
rmb();
|
rmb();
|
||||||
|
|
||||||
if (!entity->rq || spsc_queue_peek(&entity->job_queue) == NULL)
|
if (list_empty(&entity->list) ||
|
||||||
|
spsc_queue_peek(&entity->job_queue) == NULL)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
@@ -275,11 +261,10 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
|
|||||||
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
|
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
|
||||||
{
|
{
|
||||||
struct drm_gpu_scheduler *sched;
|
struct drm_gpu_scheduler *sched;
|
||||||
|
struct task_struct *last_user;
|
||||||
long ret = timeout;
|
long ret = timeout;
|
||||||
|
|
||||||
sched = entity->rq->sched;
|
sched = entity->rq->sched;
|
||||||
if (!drm_sched_entity_is_initialized(sched, entity))
|
|
||||||
return ret;
|
|
||||||
/**
|
/**
|
||||||
* The client will not queue more IBs during this fini, consume existing
|
* The client will not queue more IBs during this fini, consume existing
|
||||||
* queued IBs or discard them on SIGKILL
|
* queued IBs or discard them on SIGKILL
|
||||||
@@ -295,8 +280,10 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
|
|||||||
|
|
||||||
|
|
||||||
/* For killed process disable any more IBs enqueue right now */
|
/* For killed process disable any more IBs enqueue right now */
|
||||||
if ((current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
|
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
|
||||||
drm_sched_entity_set_rq(entity, NULL);
|
if ((!last_user || last_user == current->group_leader) &&
|
||||||
|
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
|
||||||
|
drm_sched_rq_remove_entity(entity->rq, entity);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -317,7 +304,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
|
|||||||
struct drm_gpu_scheduler *sched;
|
struct drm_gpu_scheduler *sched;
|
||||||
|
|
||||||
sched = entity->rq->sched;
|
sched = entity->rq->sched;
|
||||||
drm_sched_entity_set_rq(entity, NULL);
|
drm_sched_rq_remove_entity(entity->rq, entity);
|
||||||
|
|
||||||
/* Consumption of existing IBs wasn't completed. Forcefully
|
/* Consumption of existing IBs wasn't completed. Forcefully
|
||||||
* remove them here.
|
* remove them here.
|
||||||
@@ -413,15 +400,12 @@ void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
|
|||||||
if (entity->rq == rq)
|
if (entity->rq == rq)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
BUG_ON(!rq);
|
||||||
|
|
||||||
spin_lock(&entity->rq_lock);
|
spin_lock(&entity->rq_lock);
|
||||||
|
|
||||||
if (entity->rq)
|
|
||||||
drm_sched_rq_remove_entity(entity->rq, entity);
|
drm_sched_rq_remove_entity(entity->rq, entity);
|
||||||
|
|
||||||
entity->rq = rq;
|
entity->rq = rq;
|
||||||
if (rq)
|
|
||||||
drm_sched_rq_add_entity(rq, entity);
|
drm_sched_rq_add_entity(rq, entity);
|
||||||
|
|
||||||
spin_unlock(&entity->rq_lock);
|
spin_unlock(&entity->rq_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_sched_entity_set_rq);
|
EXPORT_SYMBOL(drm_sched_entity_set_rq);
|
||||||
@@ -541,6 +525,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
|||||||
|
|
||||||
trace_drm_sched_job(sched_job, entity);
|
trace_drm_sched_job(sched_job, entity);
|
||||||
|
|
||||||
|
WRITE_ONCE(entity->last_user, current->group_leader);
|
||||||
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
|
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
|
||||||
|
|
||||||
/* first job wakes up scheduler */
|
/* first job wakes up scheduler */
|
||||||
|
|||||||
@@ -47,13 +47,7 @@
|
|||||||
|
|
||||||
#include <drm/ttm/ttm_bo_driver.h>
|
#include <drm/ttm/ttm_bo_driver.h>
|
||||||
#include <drm/ttm/ttm_page_alloc.h>
|
#include <drm/ttm/ttm_page_alloc.h>
|
||||||
|
#include <drm/ttm/ttm_set_memory.h>
|
||||||
#if IS_ENABLED(CONFIG_AGP)
|
|
||||||
#include <asm/agp.h>
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_X86
|
|
||||||
#include <asm/set_memory.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
|
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
|
||||||
#define SMALL_ALLOCATION 16
|
#define SMALL_ALLOCATION 16
|
||||||
@@ -222,52 +216,6 @@ static struct kobj_type ttm_pool_kobj_type = {
|
|||||||
|
|
||||||
static struct ttm_pool_manager *_manager;
|
static struct ttm_pool_manager *_manager;
|
||||||
|
|
||||||
#ifndef CONFIG_X86
|
|
||||||
static int set_pages_wb(struct page *page, int numpages)
|
|
||||||
{
|
|
||||||
#if IS_ENABLED(CONFIG_AGP)
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < numpages; i++)
|
|
||||||
unmap_page_from_agp(page++);
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int set_pages_array_wb(struct page **pages, int addrinarray)
|
|
||||||
{
|
|
||||||
#if IS_ENABLED(CONFIG_AGP)
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < addrinarray; i++)
|
|
||||||
unmap_page_from_agp(pages[i]);
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int set_pages_array_wc(struct page **pages, int addrinarray)
|
|
||||||
{
|
|
||||||
#if IS_ENABLED(CONFIG_AGP)
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < addrinarray; i++)
|
|
||||||
map_page_into_agp(pages[i]);
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int set_pages_array_uc(struct page **pages, int addrinarray)
|
|
||||||
{
|
|
||||||
#if IS_ENABLED(CONFIG_AGP)
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < addrinarray; i++)
|
|
||||||
map_page_into_agp(pages[i]);
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Select the right pool or requested caching state and ttm flags. */
|
* Select the right pool or requested caching state and ttm flags. */
|
||||||
static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
|
static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
|
||||||
@@ -302,13 +250,13 @@ static void ttm_pages_put(struct page *pages[], unsigned npages,
|
|||||||
unsigned int i, pages_nr = (1 << order);
|
unsigned int i, pages_nr = (1 << order);
|
||||||
|
|
||||||
if (order == 0) {
|
if (order == 0) {
|
||||||
if (set_pages_array_wb(pages, npages))
|
if (ttm_set_pages_array_wb(pages, npages))
|
||||||
pr_err("Failed to set %d pages to wb!\n", npages);
|
pr_err("Failed to set %d pages to wb!\n", npages);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < npages; ++i) {
|
for (i = 0; i < npages; ++i) {
|
||||||
if (order > 0) {
|
if (order > 0) {
|
||||||
if (set_pages_wb(pages[i], pages_nr))
|
if (ttm_set_pages_wb(pages[i], pages_nr))
|
||||||
pr_err("Failed to set %d pages to wb!\n", pages_nr);
|
pr_err("Failed to set %d pages to wb!\n", pages_nr);
|
||||||
}
|
}
|
||||||
__free_pages(pages[i], order);
|
__free_pages(pages[i], order);
|
||||||
@@ -498,12 +446,12 @@ static int ttm_set_pages_caching(struct page **pages,
|
|||||||
/* Set page caching */
|
/* Set page caching */
|
||||||
switch (cstate) {
|
switch (cstate) {
|
||||||
case tt_uncached:
|
case tt_uncached:
|
||||||
r = set_pages_array_uc(pages, cpages);
|
r = ttm_set_pages_array_uc(pages, cpages);
|
||||||
if (r)
|
if (r)
|
||||||
pr_err("Failed to set %d pages to uc!\n", cpages);
|
pr_err("Failed to set %d pages to uc!\n", cpages);
|
||||||
break;
|
break;
|
||||||
case tt_wc:
|
case tt_wc:
|
||||||
r = set_pages_array_wc(pages, cpages);
|
r = ttm_set_pages_array_wc(pages, cpages);
|
||||||
if (r)
|
if (r)
|
||||||
pr_err("Failed to set %d pages to wc!\n", cpages);
|
pr_err("Failed to set %d pages to wc!\n", cpages);
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -50,12 +50,7 @@
|
|||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
#include <drm/ttm/ttm_bo_driver.h>
|
#include <drm/ttm/ttm_bo_driver.h>
|
||||||
#include <drm/ttm/ttm_page_alloc.h>
|
#include <drm/ttm/ttm_page_alloc.h>
|
||||||
#if IS_ENABLED(CONFIG_AGP)
|
#include <drm/ttm/ttm_set_memory.h>
|
||||||
#include <asm/agp.h>
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_X86
|
|
||||||
#include <asm/set_memory.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
|
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
|
||||||
#define SMALL_ALLOCATION 4
|
#define SMALL_ALLOCATION 4
|
||||||
@@ -268,54 +263,19 @@ static struct kobj_type ttm_pool_kobj_type = {
|
|||||||
.default_attrs = ttm_pool_attrs,
|
.default_attrs = ttm_pool_attrs,
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifndef CONFIG_X86
|
|
||||||
static int set_pages_array_wb(struct page **pages, int addrinarray)
|
|
||||||
{
|
|
||||||
#if IS_ENABLED(CONFIG_AGP)
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < addrinarray; i++)
|
|
||||||
unmap_page_from_agp(pages[i]);
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int set_pages_array_wc(struct page **pages, int addrinarray)
|
|
||||||
{
|
|
||||||
#if IS_ENABLED(CONFIG_AGP)
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < addrinarray; i++)
|
|
||||||
map_page_into_agp(pages[i]);
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int set_pages_array_uc(struct page **pages, int addrinarray)
|
|
||||||
{
|
|
||||||
#if IS_ENABLED(CONFIG_AGP)
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < addrinarray; i++)
|
|
||||||
map_page_into_agp(pages[i]);
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif /* for !CONFIG_X86 */
|
|
||||||
|
|
||||||
static int ttm_set_pages_caching(struct dma_pool *pool,
|
static int ttm_set_pages_caching(struct dma_pool *pool,
|
||||||
struct page **pages, unsigned cpages)
|
struct page **pages, unsigned cpages)
|
||||||
{
|
{
|
||||||
int r = 0;
|
int r = 0;
|
||||||
/* Set page caching */
|
/* Set page caching */
|
||||||
if (pool->type & IS_UC) {
|
if (pool->type & IS_UC) {
|
||||||
r = set_pages_array_uc(pages, cpages);
|
r = ttm_set_pages_array_uc(pages, cpages);
|
||||||
if (r)
|
if (r)
|
||||||
pr_err("%s: Failed to set %d pages to uc!\n",
|
pr_err("%s: Failed to set %d pages to uc!\n",
|
||||||
pool->dev_name, cpages);
|
pool->dev_name, cpages);
|
||||||
}
|
}
|
||||||
if (pool->type & IS_WC) {
|
if (pool->type & IS_WC) {
|
||||||
r = set_pages_array_wc(pages, cpages);
|
r = ttm_set_pages_array_wc(pages, cpages);
|
||||||
if (r)
|
if (r)
|
||||||
pr_err("%s: Failed to set %d pages to wc!\n",
|
pr_err("%s: Failed to set %d pages to wc!\n",
|
||||||
pool->dev_name, cpages);
|
pool->dev_name, cpages);
|
||||||
@@ -389,17 +349,14 @@ static void ttm_pool_update_free_locked(struct dma_pool *pool,
|
|||||||
static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
|
static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
|
||||||
{
|
{
|
||||||
struct page *page = d_page->p;
|
struct page *page = d_page->p;
|
||||||
unsigned i, num_pages;
|
unsigned num_pages;
|
||||||
|
|
||||||
/* Don't set WB on WB page pool. */
|
/* Don't set WB on WB page pool. */
|
||||||
if (!(pool->type & IS_CACHED)) {
|
if (!(pool->type & IS_CACHED)) {
|
||||||
num_pages = pool->size / PAGE_SIZE;
|
num_pages = pool->size / PAGE_SIZE;
|
||||||
for (i = 0; i < num_pages; ++i, ++page) {
|
if (ttm_set_pages_wb(page, num_pages))
|
||||||
if (set_pages_array_wb(&page, 1)) {
|
|
||||||
pr_err("%s: Failed to set %d pages to wb!\n",
|
pr_err("%s: Failed to set %d pages to wb!\n",
|
||||||
pool->dev_name, 1);
|
pool->dev_name, num_pages);
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del(&d_page->page_list);
|
list_del(&d_page->page_list);
|
||||||
@@ -420,7 +377,7 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
|
|||||||
|
|
||||||
/* Don't set WB on WB page pool. */
|
/* Don't set WB on WB page pool. */
|
||||||
if (npages && !(pool->type & IS_CACHED) &&
|
if (npages && !(pool->type & IS_CACHED) &&
|
||||||
set_pages_array_wb(pages, npages))
|
ttm_set_pages_array_wb(pages, npages))
|
||||||
pr_err("%s: Failed to set %d pages to wb!\n",
|
pr_err("%s: Failed to set %d pages to wb!\n",
|
||||||
pool->dev_name, npages);
|
pool->dev_name, npages);
|
||||||
|
|
||||||
|
|||||||
@@ -38,9 +38,7 @@
|
|||||||
#include <drm/drm_cache.h>
|
#include <drm/drm_cache.h>
|
||||||
#include <drm/ttm/ttm_bo_driver.h>
|
#include <drm/ttm/ttm_bo_driver.h>
|
||||||
#include <drm/ttm/ttm_page_alloc.h>
|
#include <drm/ttm/ttm_page_alloc.h>
|
||||||
#ifdef CONFIG_X86
|
#include <drm/ttm/ttm_set_memory.h>
|
||||||
#include <asm/set_memory.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocates a ttm structure for the given BO.
|
* Allocates a ttm structure for the given BO.
|
||||||
@@ -115,8 +113,7 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86
|
static int ttm_tt_set_page_caching(struct page *p,
|
||||||
static inline int ttm_tt_set_page_caching(struct page *p,
|
|
||||||
enum ttm_caching_state c_old,
|
enum ttm_caching_state c_old,
|
||||||
enum ttm_caching_state c_new)
|
enum ttm_caching_state c_new)
|
||||||
{
|
{
|
||||||
@@ -129,26 +126,18 @@ static inline int ttm_tt_set_page_caching(struct page *p,
|
|||||||
/* p isn't in the default caching state, set it to
|
/* p isn't in the default caching state, set it to
|
||||||
* writeback first to free its current memtype. */
|
* writeback first to free its current memtype. */
|
||||||
|
|
||||||
ret = set_pages_wb(p, 1);
|
ret = ttm_set_pages_wb(p, 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (c_new == tt_wc)
|
if (c_new == tt_wc)
|
||||||
ret = set_memory_wc((unsigned long) page_address(p), 1);
|
ret = ttm_set_pages_wc(p, 1);
|
||||||
else if (c_new == tt_uncached)
|
else if (c_new == tt_uncached)
|
||||||
ret = set_pages_uc(p, 1);
|
ret = ttm_set_pages_uc(p, 1);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#else /* CONFIG_X86 */
|
|
||||||
static inline int ttm_tt_set_page_caching(struct page *p,
|
|
||||||
enum ttm_caching_state c_old,
|
|
||||||
enum ttm_caching_state c_new)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_X86 */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Change caching policy for the linear kernel map
|
* Change caching policy for the linear kernel map
|
||||||
|
|||||||
@@ -66,6 +66,7 @@ enum drm_sched_priority {
|
|||||||
* @guilty: points to ctx's guilty.
|
* @guilty: points to ctx's guilty.
|
||||||
* @fini_status: contains the exit status in case the process was signalled.
|
* @fini_status: contains the exit status in case the process was signalled.
|
||||||
* @last_scheduled: points to the finished fence of the last scheduled job.
|
* @last_scheduled: points to the finished fence of the last scheduled job.
|
||||||
|
* @last_user: last group leader pushing a job into the entity.
|
||||||
*
|
*
|
||||||
* Entities will emit jobs in order to their corresponding hardware
|
* Entities will emit jobs in order to their corresponding hardware
|
||||||
* ring, and the scheduler will alternate between entities based on
|
* ring, and the scheduler will alternate between entities based on
|
||||||
@@ -85,6 +86,7 @@ struct drm_sched_entity {
|
|||||||
struct dma_fence_cb cb;
|
struct dma_fence_cb cb;
|
||||||
atomic_t *guilty;
|
atomic_t *guilty;
|
||||||
struct dma_fence *last_scheduled;
|
struct dma_fence *last_scheduled;
|
||||||
|
struct task_struct *last_user;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
150
include/drm/ttm/ttm_set_memory.h
Normal file
150
include/drm/ttm/ttm_set_memory.h
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright (c) 2018 Advanced Micro Devices, Inc.
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Huang Rui <ray.huang@amd.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef TTM_SET_MEMORY
|
||||||
|
#define TTM_SET_MEMORY
|
||||||
|
|
||||||
|
#include <linux/mm.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
|
||||||
|
#include <asm/set_memory.h>
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
|
||||||
|
{
|
||||||
|
return set_pages_array_wb(pages, addrinarray);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
|
||||||
|
{
|
||||||
|
return set_pages_array_wc(pages, addrinarray);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
|
||||||
|
{
|
||||||
|
return set_pages_array_uc(pages, addrinarray);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_wb(struct page *page, int numpages)
|
||||||
|
{
|
||||||
|
return set_pages_wb(page, numpages);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_wc(struct page *page, int numpages)
|
||||||
|
{
|
||||||
|
unsigned long addr = (unsigned long)page_address(page);
|
||||||
|
|
||||||
|
return set_memory_wc(addr, numpages);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_uc(struct page *page, int numpages)
|
||||||
|
{
|
||||||
|
return set_pages_uc(page, numpages);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* for CONFIG_X86 */
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_AGP)
|
||||||
|
|
||||||
|
#include <asm/agp.h>
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < addrinarray; i++)
|
||||||
|
unmap_page_from_agp(pages[i]);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < addrinarray; i++)
|
||||||
|
map_page_into_agp(pages[i]);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < addrinarray; i++)
|
||||||
|
map_page_into_agp(pages[i]);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_wb(struct page *page, int numpages)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < numpages; i++)
|
||||||
|
unmap_page_from_agp(page++);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* for CONFIG_AGP */
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_wb(struct page *page, int numpages)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* for CONFIG_AGP */
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_wc(struct page *page, int numpages)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ttm_set_pages_uc(struct page *page, int numpages)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* for CONFIG_X86 */
|
||||||
|
|
||||||
|
#endif
|
||||||
Reference in New Issue
Block a user