Merge tag 'drm-misc-next-2019-08-08' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.4: UAPI Changes: - HDCP: Add a Content protection type property Cross-subsystem Changes: Core Changes: - Continue to rework the include dependencies - fb: Remove the unused drm_gem_fbdev_fb_create function - drm-dp-helper: Make the link rate calculation more tolerant to non-explicitly defined, yet supported, rates - fb-helper: Map DRM client buffer only when required, and instanciate a shadow buffer when the device has a dirty function or says so - connector: Add a helper to link the DDC adapter used by that connector to the userspace - vblank: Switch from DRM_WAIT_ON to wait_event_interruptible_timeout - dma-buf: Fix a stack corruption - ttm: Embed a drm_gem_object struct to make ttm_buffer_object a superclass of GEM, and convert drivers to use it. - hdcp: Improvements to report the content protection type to the userspace Driver Changes: - Remove drm_gem_prime_import/export from being defined in the drivers - Drop DRM_AUTH usage from drivers - Continue to drop drmP.h - Convert drivers to the connector ddc helper - ingenic: Add support for more panel-related cases - komeda: Support for dual-link - lima: Reduce logging - mpag200: Fix the cursor support - panfrost: Export GPU features register to userspace through an ioctl - pl111: Remove the CLD pads wiring support from the DT - rockchip: Rework to use DRM PSR helpers, fix a bug in the VOP_WIN_GET macro - sun4i: Improve support for color encoding and range - tinydrm: Rework SPI support, improve MIPI-DBI support, move to drm/tiny - vkms: Rework of the CRC tracking - bridges: - sii902x: Add support for audio graph card - tc358767: Rework AUX data handling code - ti-sn65dsi86: Add Debugfs and proper DSI mode flags support - panels - Support for GiantPlus GPM940B0, Sharp LQ070Y3DG3B, Ortustech COM37H3M, Novatek NT39016, Sharp LS020B1DD01D, Raydium RM67191, Boe Himax8279d, Sharp LD-D5116Z01B - Conversion of the device tree bindings to the YAML description - jh057n00900: Rework the enable / disable path - fbdev: - ssd1307fb: Support more devices based on that controller Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <maxime.ripard@bootlin.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190808121423.xzpedzkpyecvsiy4@flea
This commit is contained in:
@@ -218,7 +218,7 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
|
||||
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
|
||||
struct amdgpu_amdkfd_fence *ef)
|
||||
{
|
||||
struct reservation_object *resv = bo->tbo.resv;
|
||||
struct reservation_object *resv = bo->tbo.base.resv;
|
||||
struct reservation_object_list *old, *new;
|
||||
unsigned int i, j, k;
|
||||
|
||||
@@ -812,7 +812,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
|
||||
struct amdgpu_bo *pd = peer_vm->root.base.bo;
|
||||
|
||||
ret = amdgpu_sync_resv(NULL,
|
||||
sync, pd->tbo.resv,
|
||||
sync, pd->tbo.base.resv,
|
||||
AMDGPU_FENCE_OWNER_KFD, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -887,7 +887,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
|
||||
AMDGPU_FENCE_OWNER_KFD, false);
|
||||
if (ret)
|
||||
goto wait_pd_fail;
|
||||
ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
|
||||
ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
|
||||
if (ret)
|
||||
goto reserve_shared_fail;
|
||||
amdgpu_bo_fence(vm->root.base.bo,
|
||||
@@ -2133,7 +2133,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
|
||||
* Add process eviction fence to bo so they can
|
||||
* evict each other.
|
||||
*/
|
||||
ret = reservation_object_reserve_shared(gws_bo->tbo.resv, 1);
|
||||
ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1);
|
||||
if (ret)
|
||||
goto reserve_shared_fail;
|
||||
amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
|
||||
|
@@ -1505,6 +1505,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct amdgpu_encoder *amdgpu_encoder;
|
||||
struct i2c_adapter *ddc = NULL;
|
||||
uint32_t subpixel_order = SubPixelNone;
|
||||
bool shared_ddc = false;
|
||||
bool is_dp_bridge = false;
|
||||
@@ -1574,17 +1575,21 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
amdgpu_connector->con_priv = amdgpu_dig_connector;
|
||||
if (i2c_bus->valid) {
|
||||
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
|
||||
if (amdgpu_connector->ddc_bus)
|
||||
if (amdgpu_connector->ddc_bus) {
|
||||
has_aux = true;
|
||||
else
|
||||
ddc = &amdgpu_connector->ddc_bus->adapter;
|
||||
} else {
|
||||
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
|
||||
}
|
||||
}
|
||||
switch (connector_type) {
|
||||
case DRM_MODE_CONNECTOR_VGA:
|
||||
case DRM_MODE_CONNECTOR_DVIA:
|
||||
default:
|
||||
drm_connector_init(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_dp_funcs, connector_type);
|
||||
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_dp_funcs,
|
||||
connector_type,
|
||||
ddc);
|
||||
drm_connector_helper_add(&amdgpu_connector->base,
|
||||
&amdgpu_connector_dp_helper_funcs);
|
||||
connector->interlace_allowed = true;
|
||||
@@ -1602,8 +1607,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
case DRM_MODE_CONNECTOR_HDMIA:
|
||||
case DRM_MODE_CONNECTOR_HDMIB:
|
||||
case DRM_MODE_CONNECTOR_DisplayPort:
|
||||
drm_connector_init(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_dp_funcs, connector_type);
|
||||
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_dp_funcs,
|
||||
connector_type,
|
||||
ddc);
|
||||
drm_connector_helper_add(&amdgpu_connector->base,
|
||||
&amdgpu_connector_dp_helper_funcs);
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
@@ -1644,8 +1651,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_LVDS:
|
||||
case DRM_MODE_CONNECTOR_eDP:
|
||||
drm_connector_init(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_edp_funcs, connector_type);
|
||||
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_edp_funcs,
|
||||
connector_type,
|
||||
ddc);
|
||||
drm_connector_helper_add(&amdgpu_connector->base,
|
||||
&amdgpu_connector_dp_helper_funcs);
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
@@ -1659,13 +1668,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
} else {
|
||||
switch (connector_type) {
|
||||
case DRM_MODE_CONNECTOR_VGA:
|
||||
drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
|
||||
if (!amdgpu_connector->ddc_bus)
|
||||
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
|
||||
else
|
||||
ddc = &amdgpu_connector->ddc_bus->adapter;
|
||||
}
|
||||
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_vga_funcs,
|
||||
connector_type,
|
||||
ddc);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
|
||||
amdgpu_connector->dac_load_detect = true;
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
adev->mode_info.load_detect_property,
|
||||
@@ -1679,13 +1693,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
connector->doublescan_allowed = true;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DVIA:
|
||||
drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
|
||||
if (!amdgpu_connector->ddc_bus)
|
||||
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
|
||||
else
|
||||
ddc = &amdgpu_connector->ddc_bus->adapter;
|
||||
}
|
||||
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_vga_funcs,
|
||||
connector_type,
|
||||
ddc);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
|
||||
amdgpu_connector->dac_load_detect = true;
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
adev->mode_info.load_detect_property,
|
||||
@@ -1704,13 +1723,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
if (!amdgpu_dig_connector)
|
||||
goto failed;
|
||||
amdgpu_connector->con_priv = amdgpu_dig_connector;
|
||||
drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
|
||||
if (!amdgpu_connector->ddc_bus)
|
||||
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
|
||||
else
|
||||
ddc = &amdgpu_connector->ddc_bus->adapter;
|
||||
}
|
||||
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_dvi_funcs,
|
||||
connector_type,
|
||||
ddc);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
adev->mode_info.coherent_mode_property,
|
||||
@@ -1754,13 +1778,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
if (!amdgpu_dig_connector)
|
||||
goto failed;
|
||||
amdgpu_connector->con_priv = amdgpu_dig_connector;
|
||||
drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
|
||||
if (!amdgpu_connector->ddc_bus)
|
||||
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
|
||||
else
|
||||
ddc = &amdgpu_connector->ddc_bus->adapter;
|
||||
}
|
||||
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_dvi_funcs,
|
||||
connector_type,
|
||||
ddc);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
adev->mode_info.coherent_mode_property,
|
||||
1);
|
||||
@@ -1796,15 +1825,20 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
if (!amdgpu_dig_connector)
|
||||
goto failed;
|
||||
amdgpu_connector->con_priv = amdgpu_dig_connector;
|
||||
drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dp_funcs, connector_type);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
|
||||
if (amdgpu_connector->ddc_bus)
|
||||
if (amdgpu_connector->ddc_bus) {
|
||||
has_aux = true;
|
||||
else
|
||||
ddc = &amdgpu_connector->ddc_bus->adapter;
|
||||
} else {
|
||||
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
|
||||
}
|
||||
}
|
||||
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_dp_funcs,
|
||||
connector_type,
|
||||
ddc);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
adev->mode_info.coherent_mode_property,
|
||||
@@ -1838,15 +1872,20 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
if (!amdgpu_dig_connector)
|
||||
goto failed;
|
||||
amdgpu_connector->con_priv = amdgpu_dig_connector;
|
||||
drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_edp_funcs, connector_type);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
|
||||
if (amdgpu_connector->ddc_bus)
|
||||
if (amdgpu_connector->ddc_bus) {
|
||||
has_aux = true;
|
||||
else
|
||||
ddc = &amdgpu_connector->ddc_bus->adapter;
|
||||
} else {
|
||||
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
|
||||
}
|
||||
}
|
||||
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_edp_funcs,
|
||||
connector_type,
|
||||
ddc);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
dev->mode_config.scaling_mode_property,
|
||||
DRM_MODE_SCALE_FULLSCREEN);
|
||||
@@ -1859,13 +1898,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
if (!amdgpu_dig_connector)
|
||||
goto failed;
|
||||
amdgpu_connector->con_priv = amdgpu_dig_connector;
|
||||
drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_lvds_funcs, connector_type);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_lvds_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
|
||||
if (!amdgpu_connector->ddc_bus)
|
||||
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
|
||||
else
|
||||
ddc = &amdgpu_connector->ddc_bus->adapter;
|
||||
}
|
||||
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
|
||||
&amdgpu_connector_lvds_funcs,
|
||||
connector_type,
|
||||
ddc);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_lvds_helper_funcs);
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
dev->mode_config.scaling_mode_property,
|
||||
DRM_MODE_SCALE_FULLSCREEN);
|
||||
|
@@ -402,7 +402,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false,
|
||||
.resv = bo->tbo.resv,
|
||||
.resv = bo->tbo.base.resv,
|
||||
.flags = 0
|
||||
};
|
||||
uint32_t domain;
|
||||
@@ -730,7 +730,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
|
||||
|
||||
list_for_each_entry(e, &p->validated, tv.head) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
|
||||
struct reservation_object *resv = bo->tbo.resv;
|
||||
struct reservation_object *resv = bo->tbo.base.resv;
|
||||
|
||||
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
|
||||
amdgpu_bo_explicit_sync(bo));
|
||||
@@ -1727,7 +1727,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||
*map = mapping;
|
||||
|
||||
/* Double check that the BO is reserved by this CS */
|
||||
if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
|
||||
if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
|
||||
return -EINVAL;
|
||||
|
||||
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
|
||||
|
@@ -204,7 +204,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
|
||||
r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
|
||||
&work->shared_count,
|
||||
&work->shared);
|
||||
if (unlikely(r != 0)) {
|
||||
|
@@ -216,7 +216,7 @@ static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
|
||||
* fences on the reservation object into a single exclusive
|
||||
* fence.
|
||||
*/
|
||||
r = __reservation_object_make_exclusive(bo->tbo.resv);
|
||||
r = __reservation_object_make_exclusive(bo->tbo.base.resv);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
}
|
||||
@@ -267,20 +267,6 @@ error:
|
||||
drm_gem_map_detach(dma_buf, attach);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
|
||||
* @obj: GEM BO
|
||||
*
|
||||
* Returns:
|
||||
* The BO's reservation object.
|
||||
*/
|
||||
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
|
||||
{
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
|
||||
return bo->tbo.resv;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
|
||||
* @dma_buf: Shared DMA buffer
|
||||
@@ -339,14 +325,12 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
|
||||
* @gobj: GEM BO
|
||||
* @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
|
||||
*
|
||||
* The main work is done by the &drm_gem_prime_export helper, which in turn
|
||||
* uses &amdgpu_gem_prime_res_obj.
|
||||
* The main work is done by the &drm_gem_prime_export helper.
|
||||
*
|
||||
* Returns:
|
||||
* Shared DMA buffer representing the GEM BO from the given device.
|
||||
*/
|
||||
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gobj,
|
||||
struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
|
||||
int flags)
|
||||
{
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
|
||||
@@ -356,9 +340,9 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
|
||||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
buf = drm_gem_prime_export(dev, gobj, flags);
|
||||
buf = drm_gem_prime_export(gobj, flags);
|
||||
if (!IS_ERR(buf)) {
|
||||
buf->file->f_mapping = dev->anon_inode->i_mapping;
|
||||
buf->file->f_mapping = gobj->dev->anon_inode->i_mapping;
|
||||
buf->ops = &amdgpu_dmabuf_ops;
|
||||
}
|
||||
|
||||
@@ -396,7 +380,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
bp.flags = 0;
|
||||
bp.type = ttm_bo_type_sg;
|
||||
bp.resv = resv;
|
||||
ww_mutex_lock(&resv->lock, NULL);
|
||||
reservation_object_lock(resv, NULL);
|
||||
ret = amdgpu_bo_create(adev, &bp, &bo);
|
||||
if (ret)
|
||||
goto error;
|
||||
@@ -408,11 +392,11 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
|
||||
bo->prime_shared_count = 1;
|
||||
|
||||
ww_mutex_unlock(&resv->lock);
|
||||
return &bo->gem_base;
|
||||
reservation_object_unlock(resv);
|
||||
return &bo->tbo.base;
|
||||
|
||||
error:
|
||||
ww_mutex_unlock(&resv->lock);
|
||||
reservation_object_unlock(resv);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@@ -30,12 +30,10 @@ struct drm_gem_object *
|
||||
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sg);
|
||||
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gobj,
|
||||
struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
|
||||
int flags);
|
||||
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf);
|
||||
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
|
||||
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
|
||||
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
||||
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
|
||||
|
@@ -1373,7 +1373,7 @@ static struct drm_driver kms_driver = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_ATOMIC |
|
||||
DRIVER_GEM |
|
||||
DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
|
||||
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
|
||||
.load = amdgpu_driver_load_kms,
|
||||
.open = amdgpu_driver_open_kms,
|
||||
.postclose = amdgpu_driver_postclose_kms,
|
||||
@@ -1397,7 +1397,6 @@ static struct drm_driver kms_driver = {
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = amdgpu_gem_prime_export,
|
||||
.gem_prime_import = amdgpu_gem_prime_import,
|
||||
.gem_prime_res_obj = amdgpu_gem_prime_res_obj,
|
||||
.gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
|
||||
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
|
||||
.gem_prime_vmap = amdgpu_gem_prime_vmap,
|
||||
|
@@ -85,7 +85,7 @@ retry:
|
||||
}
|
||||
return r;
|
||||
}
|
||||
*obj = &bo->gem_base;
|
||||
*obj = &bo->tbo.base;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -134,7 +134,7 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
|
||||
return -EPERM;
|
||||
|
||||
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
|
||||
abo->tbo.resv != vm->root.base.bo->tbo.resv)
|
||||
abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
|
||||
return -EPERM;
|
||||
|
||||
r = amdgpu_bo_reserve(abo, false);
|
||||
@@ -252,7 +252,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
resv = vm->root.base.bo->tbo.resv;
|
||||
resv = vm->root.base.bo->tbo.base.resv;
|
||||
}
|
||||
|
||||
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
|
||||
@@ -433,7 +433,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOENT;
|
||||
}
|
||||
robj = gem_to_amdgpu_bo(gobj);
|
||||
ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
|
||||
ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true,
|
||||
timeout);
|
||||
|
||||
/* ret == 0 means not signaled,
|
||||
@@ -689,7 +689,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_amdgpu_gem_create_in info;
|
||||
void __user *out = u64_to_user_ptr(args->value);
|
||||
|
||||
info.bo_size = robj->gem_base.size;
|
||||
info.bo_size = robj->tbo.base.size;
|
||||
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
|
||||
info.domains = robj->preferred_domains;
|
||||
info.domain_flags = robj->flags;
|
||||
@@ -819,8 +819,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
|
||||
if (pin_count)
|
||||
seq_printf(m, " pin count %d", pin_count);
|
||||
|
||||
dma_buf = READ_ONCE(bo->gem_base.dma_buf);
|
||||
attachment = READ_ONCE(bo->gem_base.import_attach);
|
||||
dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
|
||||
attachment = READ_ONCE(bo->tbo.base.import_attach);
|
||||
|
||||
if (attachment)
|
||||
seq_printf(m, " imported from %p", dma_buf);
|
||||
|
@@ -31,7 +31,7 @@
|
||||
*/
|
||||
|
||||
#define AMDGPU_GEM_DOMAIN_MAX 0x3
|
||||
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
|
||||
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
|
||||
|
||||
void amdgpu_gem_object_free(struct drm_gem_object *obj);
|
||||
int amdgpu_gem_object_open(struct drm_gem_object *obj,
|
||||
|
@@ -1088,7 +1088,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
||||
amdgpu_vm_fini(adev, &fpriv->vm);
|
||||
|
||||
if (pasid)
|
||||
amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
|
||||
amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
|
||||
amdgpu_bo_unref(&pd);
|
||||
|
||||
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
|
||||
|
@@ -179,7 +179,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
|
||||
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
|
||||
continue;
|
||||
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
|
||||
true, false, MAX_SCHEDULE_TIMEOUT);
|
||||
if (r <= 0)
|
||||
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
|
||||
|
@@ -85,9 +85,9 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
|
||||
|
||||
amdgpu_bo_kunmap(bo);
|
||||
|
||||
if (bo->gem_base.import_attach)
|
||||
drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
|
||||
drm_gem_object_release(&bo->gem_base);
|
||||
if (bo->tbo.base.import_attach)
|
||||
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
|
||||
drm_gem_object_release(&bo->tbo.base);
|
||||
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
|
||||
if (!list_empty(&bo->shadow_list)) {
|
||||
mutex_lock(&adev->shadow_list_lock);
|
||||
@@ -454,7 +454,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
|
||||
if (bo == NULL)
|
||||
return -ENOMEM;
|
||||
drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
|
||||
drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
|
||||
INIT_LIST_HEAD(&bo->shadow_list);
|
||||
bo->vm_bo = NULL;
|
||||
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
|
||||
@@ -521,7 +521,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
|
||||
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
|
||||
if (unlikely(r))
|
||||
goto fail_unreserve;
|
||||
|
||||
@@ -544,7 +544,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||
|
||||
fail_unreserve:
|
||||
if (!bp->resv)
|
||||
ww_mutex_unlock(&bo->tbo.resv->lock);
|
||||
reservation_object_unlock(bo->tbo.base.resv);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
@@ -565,7 +565,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
|
||||
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
|
||||
AMDGPU_GEM_CREATE_SHADOW;
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = bo->tbo.resv;
|
||||
bp.resv = bo->tbo.base.resv;
|
||||
|
||||
r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
|
||||
if (!r) {
|
||||
@@ -606,13 +606,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
|
||||
if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
|
||||
if (!bp->resv)
|
||||
WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
|
||||
WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv,
|
||||
NULL));
|
||||
|
||||
r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
|
||||
|
||||
if (!bp->resv)
|
||||
reservation_object_unlock((*bo_ptr)->tbo.resv);
|
||||
reservation_object_unlock((*bo_ptr)->tbo.base.resv);
|
||||
|
||||
if (r)
|
||||
amdgpu_bo_unref(bo_ptr);
|
||||
@@ -709,7 +709,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (r < 0)
|
||||
return r;
|
||||
@@ -1087,7 +1087,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
|
||||
*/
|
||||
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
|
||||
{
|
||||
lockdep_assert_held(&bo->tbo.resv->lock.base);
|
||||
reservation_object_assert_held(bo->tbo.base.resv);
|
||||
|
||||
if (tiling_flags)
|
||||
*tiling_flags = bo->tiling_flags;
|
||||
@@ -1283,7 +1283,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
|
||||
bool shared)
|
||||
{
|
||||
struct reservation_object *resv = bo->tbo.resv;
|
||||
struct reservation_object *resv = bo->tbo.base.resv;
|
||||
|
||||
if (shared)
|
||||
reservation_object_add_shared_fence(resv, fence);
|
||||
@@ -1308,7 +1308,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
|
||||
int r;
|
||||
|
||||
amdgpu_sync_create(&sync);
|
||||
amdgpu_sync_resv(adev, &sync, bo->tbo.resv, owner, false);
|
||||
amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
|
||||
r = amdgpu_sync_wait(&sync, intr);
|
||||
amdgpu_sync_free(&sync);
|
||||
|
||||
@@ -1328,7 +1328,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
|
||||
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
||||
{
|
||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
|
||||
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
|
||||
WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) &&
|
||||
!bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
|
||||
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||
|
@@ -94,7 +94,6 @@ struct amdgpu_bo {
|
||||
/* per VM structure for page tables and with virtual addresses */
|
||||
struct amdgpu_vm_bo_base *vm_bo;
|
||||
/* Constant after initialization */
|
||||
struct drm_gem_object gem_base;
|
||||
struct amdgpu_bo *parent;
|
||||
struct amdgpu_bo *shadow;
|
||||
|
||||
@@ -192,7 +191,7 @@ static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
|
||||
*/
|
||||
static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
|
||||
{
|
||||
return drm_vma_node_offset_addr(&bo->tbo.vma_node);
|
||||
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -227,7 +227,7 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
|
||||
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
|
||||
return -EPERM;
|
||||
return drm_vma_node_verify_access(&abo->gem_base.vma_node,
|
||||
return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
|
||||
filp->private_data);
|
||||
}
|
||||
|
||||
@@ -440,7 +440,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
||||
|
||||
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
|
||||
new_mem->num_pages << PAGE_SHIFT,
|
||||
bo->resv, &fence);
|
||||
bo->base.resv, &fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
@@ -1478,18 +1478,18 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
* cleanly handle page faults.
|
||||
*/
|
||||
if (bo->type == ttm_bo_type_kernel &&
|
||||
!reservation_object_test_signaled_rcu(bo->resv, true))
|
||||
!reservation_object_test_signaled_rcu(bo->base.resv, true))
|
||||
return false;
|
||||
|
||||
/* If bo is a KFD BO, check if the bo belongs to the current process.
|
||||
* If true, then return false as any KFD process needs all its BOs to
|
||||
* be resident to run successfully
|
||||
*/
|
||||
flist = reservation_object_get_list(bo->resv);
|
||||
flist = reservation_object_get_list(bo->base.resv);
|
||||
if (flist) {
|
||||
for (i = 0; i < flist->shared_count; ++i) {
|
||||
f = rcu_dereference_protected(flist->shared[i],
|
||||
reservation_object_held(bo->resv));
|
||||
reservation_object_held(bo->base.resv));
|
||||
if (amdkfd_fence_check_mm(f, current->mm))
|
||||
return false;
|
||||
}
|
||||
|
@@ -1073,7 +1073,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||
ib->length_dw = 16;
|
||||
|
||||
if (direct) {
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
|
||||
true, false,
|
||||
msecs_to_jiffies(10));
|
||||
if (r == 0)
|
||||
@@ -1085,7 +1085,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||
if (r)
|
||||
goto err_free;
|
||||
} else {
|
||||
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
|
||||
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, false);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
@@ -302,7 +302,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
||||
base->next = bo->vm_bo;
|
||||
bo->vm_bo = base;
|
||||
|
||||
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
|
||||
if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
|
||||
return;
|
||||
|
||||
vm->bulk_moveable = false;
|
||||
@@ -583,7 +583,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
|
||||
for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
|
||||
struct amdgpu_vm *vm = bo_base->vm;
|
||||
|
||||
if (abo->tbo.resv == vm->root.base.bo->tbo.resv)
|
||||
if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
|
||||
vm->bulk_moveable = false;
|
||||
}
|
||||
|
||||
@@ -834,7 +834,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
|
||||
bp->type = ttm_bo_type_kernel;
|
||||
if (vm->root.base.bo)
|
||||
bp->resv = vm->root.base.bo->tbo.resv;
|
||||
bp->resv = vm->root.base.bo->tbo.base.resv;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1702,7 +1702,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
|
||||
pages_addr = ttm->dma_address;
|
||||
}
|
||||
exclusive = reservation_object_get_excl(bo->tbo.resv);
|
||||
exclusive = reservation_object_get_excl(bo->tbo.base.resv);
|
||||
}
|
||||
|
||||
if (bo) {
|
||||
@@ -1712,7 +1712,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
flags = 0x0;
|
||||
}
|
||||
|
||||
if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
|
||||
if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
|
||||
last_update = &vm->last_update;
|
||||
else
|
||||
last_update = &bo_va->last_pt_update;
|
||||
@@ -1743,7 +1743,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
* the evicted list so that it gets validated again on the
|
||||
* next command submission.
|
||||
*/
|
||||
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
|
||||
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
|
||||
uint32_t mem_type = bo->tbo.mem.mem_type;
|
||||
|
||||
if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
|
||||
@@ -1879,7 +1879,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
|
||||
*/
|
||||
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
{
|
||||
struct reservation_object *resv = vm->root.base.bo->tbo.resv;
|
||||
struct reservation_object *resv = vm->root.base.bo->tbo.base.resv;
|
||||
struct dma_fence *excl, **shared;
|
||||
unsigned i, shared_count;
|
||||
int r;
|
||||
@@ -1993,7 +1993,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
while (!list_empty(&vm->invalidated)) {
|
||||
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
|
||||
base.vm_status);
|
||||
resv = bo_va->base.bo->tbo.resv;
|
||||
resv = bo_va->base.bo->tbo.base.resv;
|
||||
spin_unlock(&vm->invalidated_lock);
|
||||
|
||||
/* Try to reserve the BO to avoid clearing its ptes */
|
||||
@@ -2084,7 +2084,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
|
||||
if (mapping->flags & AMDGPU_PTE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
|
||||
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv &&
|
||||
!bo_va->base.moved) {
|
||||
list_move(&bo_va->base.vm_status, &vm->moved);
|
||||
}
|
||||
@@ -2416,7 +2416,8 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
|
||||
struct amdgpu_bo *bo;
|
||||
|
||||
bo = mapping->bo_va->base.bo;
|
||||
if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
|
||||
if (reservation_object_locking_ctx(bo->tbo.base.resv) !=
|
||||
ticket)
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -2443,7 +2444,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm_bo_base **base;
|
||||
|
||||
if (bo) {
|
||||
if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
|
||||
if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
|
||||
vm->bulk_moveable = false;
|
||||
|
||||
for (base = &bo_va->base.bo->vm_bo; *base;
|
||||
@@ -2507,7 +2508,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
|
||||
struct amdgpu_vm *vm = bo_base->vm;
|
||||
|
||||
if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
|
||||
if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
|
||||
amdgpu_vm_bo_evicted(bo_base);
|
||||
continue;
|
||||
}
|
||||
@@ -2518,7 +2519,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
|
||||
if (bo->tbo.type == ttm_bo_type_kernel)
|
||||
amdgpu_vm_bo_relocated(bo_base);
|
||||
else if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
|
||||
else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
|
||||
amdgpu_vm_bo_moved(bo_base);
|
||||
else
|
||||
amdgpu_vm_bo_invalidated(bo_base);
|
||||
@@ -2648,7 +2649,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
|
||||
*/
|
||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
||||
{
|
||||
return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.resv,
|
||||
return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
|
||||
true, true, timeout);
|
||||
}
|
||||
|
||||
@@ -2723,7 +2724,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
if (r)
|
||||
goto error_free_root;
|
||||
|
||||
r = reservation_object_reserve_shared(root->tbo.resv, 1);
|
||||
r = reservation_object_reserve_shared(root->tbo.base.resv, 1);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
|
||||
|
@@ -72,7 +72,7 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.resv,
|
||||
r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
|
||||
owner, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
@@ -5693,7 +5693,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
* deadlock during GPU reset when this fence will not signal
|
||||
* but we hold reservation lock for the BO.
|
||||
*/
|
||||
r = reservation_object_wait_timeout_rcu(abo->tbo.resv, true,
|
||||
r = reservation_object_wait_timeout_rcu(abo->tbo.base.resv, true,
|
||||
false,
|
||||
msecs_to_jiffies(5000));
|
||||
if (unlikely(r <= 0))
|
||||
|
@@ -156,6 +156,26 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
|
||||
kfree(amdgpu_dm_connector);
|
||||
}
|
||||
|
||||
static int
|
||||
amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector =
|
||||
to_amdgpu_dm_connector(connector);
|
||||
struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
|
||||
|
||||
return drm_dp_mst_connector_late_register(connector, port);
|
||||
}
|
||||
|
||||
static void
|
||||
amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector =
|
||||
to_amdgpu_dm_connector(connector);
|
||||
struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
|
||||
|
||||
drm_dp_mst_connector_early_unregister(connector, port);
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
|
||||
.detect = dm_dp_mst_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
@@ -164,7 +184,9 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
|
||||
.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
|
||||
.atomic_get_property = amdgpu_dm_connector_atomic_get_property
|
||||
.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
|
||||
.late_register = amdgpu_dm_mst_connector_late_register,
|
||||
.early_unregister = amdgpu_dm_mst_connector_early_unregister,
|
||||
};
|
||||
|
||||
static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
@@ -388,7 +410,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
aconnector->dm_dp_aux.aux.name = "dmdc";
|
||||
aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
|
||||
aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev;
|
||||
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
|
||||
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
|
||||
|
||||
|
Fai riferimento in un nuovo problema
Block a user