Merge v5.0-rc7 into drm-next
Backmerging for nouveau and imx that needed some fixes for next pulls. Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
@@ -38,6 +38,7 @@
|
||||
#include "amdgpu_gem.h"
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-fence-array.h>
|
||||
|
||||
/**
|
||||
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
|
||||
@@ -187,6 +188,48 @@ error:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int
|
||||
__reservation_object_make_exclusive(struct reservation_object *obj)
|
||||
{
|
||||
struct dma_fence **fences;
|
||||
unsigned int count;
|
||||
int r;
|
||||
|
||||
if (!reservation_object_get_list(obj)) /* no shared fences to convert */
|
||||
return 0;
|
||||
|
||||
r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (count == 0) {
|
||||
/* Now that was unexpected. */
|
||||
} else if (count == 1) {
|
||||
reservation_object_add_excl_fence(obj, fences[0]);
|
||||
dma_fence_put(fences[0]);
|
||||
kfree(fences);
|
||||
} else {
|
||||
struct dma_fence_array *array;
|
||||
|
||||
array = dma_fence_array_create(count, fences,
|
||||
dma_fence_context_alloc(1), 0,
|
||||
false);
|
||||
if (!array)
|
||||
goto err_fences_put;
|
||||
|
||||
reservation_object_add_excl_fence(obj, &array->base);
|
||||
dma_fence_put(&array->base);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_fences_put:
|
||||
while (count--)
|
||||
dma_fence_put(fences[count]);
|
||||
kfree(fences);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
|
||||
* @dma_buf: Shared DMA buffer
|
||||
@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
|
||||
|
||||
if (attach->dev->driver != adev->dev->driver) {
|
||||
/*
|
||||
* Wait for all shared fences to complete before we switch to future
|
||||
* use of exclusive fence on this prime shared bo.
|
||||
* We only create shared fences for internal use, but importers
|
||||
* of the dmabuf rely on exclusive fences for implicitly
|
||||
* tracking write hazards. As any of the current fences may
|
||||
* correspond to a write, we need to convert all existing
|
||||
* fences on the reservation object into a single exclusive
|
||||
* fence.
|
||||
*/
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
|
||||
true, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (unlikely(r < 0)) {
|
||||
DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
|
||||
r = __reservation_object_make_exclusive(bo->tbo.resv);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
}
|
||||
}
|
||||
|
||||
/* pin buffer into GTT */
|
||||
|
@@ -84,8 +84,10 @@ static int psp_sw_fini(void *handle)
|
||||
adev->psp.sos_fw = NULL;
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
adev->psp.asd_fw = NULL;
|
||||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
if (adev->psp.ta_fw) {
|
||||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -440,6 +442,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
|
||||
struct ta_xgmi_shared_memory *xgmi_cmd;
|
||||
int ret;
|
||||
|
||||
if (!psp->adev->psp.ta_fw)
|
||||
return -ENOENT;
|
||||
|
||||
if (!psp->xgmi_context.initialized) {
|
||||
ret = psp_xgmi_init_shared_buf(psp);
|
||||
if (ret)
|
||||
|
@@ -3398,14 +3398,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
|
||||
struct amdgpu_task_info *task_info)
|
||||
{
|
||||
struct amdgpu_vm *vm;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&adev->vm_manager.pasid_lock);
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
if (vm)
|
||||
*task_info = vm->task_info;
|
||||
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -90,7 +90,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||
static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
u32 tmp = 0;
|
||||
|
||||
if (enable) {
|
||||
tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
|
||||
REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
|
||||
REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
|
||||
|
||||
WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
|
||||
lower_32_bits(adev->doorbell.base));
|
||||
WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
|
||||
upper_32_bits(adev->doorbell.base));
|
||||
}
|
||||
|
||||
WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
|
||||
}
|
||||
|
||||
static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
|
||||
|
@@ -98,18 +98,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
|
||||
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out2;
|
||||
if (err) {
|
||||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
dev_info(adev->dev,
|
||||
"psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
|
||||
} else {
|
||||
err = amdgpu_ucode_validate(adev->psp.ta_fw);
|
||||
if (err)
|
||||
goto out2;
|
||||
|
||||
err = amdgpu_ucode_validate(adev->psp.ta_fw);
|
||||
if (err)
|
||||
goto out2;
|
||||
|
||||
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
|
||||
adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
|
||||
adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
|
||||
adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
|
||||
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
|
||||
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
|
||||
adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
|
||||
adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
|
||||
adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
|
||||
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
|
@@ -858,11 +858,13 @@ static int soc15_common_early_init(void *handle)
|
||||
case CHIP_RAVEN:
|
||||
adev->asic_funcs = &soc15_asic_funcs;
|
||||
if (adev->rev_id >= 0x8)
|
||||
adev->external_rev_id = adev->rev_id + 0x81;
|
||||
adev->external_rev_id = adev->rev_id + 0x79;
|
||||
else if (adev->pdev->device == 0x15d8)
|
||||
adev->external_rev_id = adev->rev_id + 0x41;
|
||||
else if (adev->rev_id == 1)
|
||||
adev->external_rev_id = adev->rev_id + 0x20;
|
||||
else
|
||||
adev->external_rev_id = 0x1;
|
||||
adev->external_rev_id = adev->rev_id + 0x01;
|
||||
|
||||
if (adev->rev_id >= 0x8) {
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
|
Reference in New Issue
Block a user