Merge tag 'drm-qemu-20161121' of git://git.kraxel.org/linux into drm-next
drm/virtio: fix busid in a different way, allocate more vbufs. drm/qxl: various bugfixes and cleanups, * tag 'drm-qemu-20161121' of git://git.kraxel.org/linux: (224 commits) drm/virtio: allocate some extra bufs qxl: Allow resolution which are not multiple of 8 qxl: Don't notify userspace when monitors config is unchanged qxl: Remove qxl_bo_init() return value qxl: Call qxl_gem_{init, fini} qxl: Add missing '\n' to qxl_io_log() call qxl: Remove unused prototype qxl: Mark some internal functions as static Revert "drm: virtio: reinstate drm_virtio_set_busid()" drm/virtio: fix busid regression drm: re-export drm_dev_set_unique Linux 4.9-rc5 gp8psk: Fix DVB frontend attach gp8psk: fix gp8psk_usb_in_op() logic dvb-usb: move data_mutex to struct dvb_usb_device iio: maxim_thermocouple: detect invalid storage size in read() aoe: fix crash in page count manipulation lightnvm: invalid offset calculation for lba_shift Kbuild: enable -Wmaybe-uninitialized warnings by default pcmcia: fix return value of soc_pcmcia_regulator_set ...
This commit is contained in:
@@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle)
|
||||
{
|
||||
int i, ret;
|
||||
struct device *dev;
|
||||
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* return early if no ACP */
|
||||
if (!adev->acp.acp_genpd)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ACP_DEVS ; i++) {
|
||||
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
|
||||
ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
|
||||
|
@@ -809,10 +809,19 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
||||
if (!adev->pm.fw) {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TOPAZ:
|
||||
strcpy(fw_name, "amdgpu/topaz_smc.bin");
|
||||
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
|
||||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
|
||||
((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)))
|
||||
strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
|
||||
else
|
||||
strcpy(fw_name, "amdgpu/topaz_smc.bin");
|
||||
break;
|
||||
case CHIP_TONGA:
|
||||
strcpy(fw_name, "amdgpu/tonga_smc.bin");
|
||||
if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
|
||||
((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1)))
|
||||
strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
|
||||
else
|
||||
strcpy(fw_name, "amdgpu/tonga_smc.bin");
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
strcpy(fw_name, "amdgpu/fiji_smc.bin");
|
||||
|
@@ -769,7 +769,7 @@ static void amdgpu_connector_unregister(struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
if (amdgpu_connector->ddc_bus->has_aux) {
|
||||
if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) {
|
||||
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
|
||||
amdgpu_connector->ddc_bus->has_aux = false;
|
||||
}
|
||||
|
@@ -742,8 +742,20 @@ static struct pci_driver amdgpu_kms_pci_driver = {
|
||||
|
||||
static int __init amdgpu_init(void)
|
||||
{
|
||||
amdgpu_sync_init();
|
||||
amdgpu_fence_slab_init();
|
||||
int r;
|
||||
|
||||
r = amdgpu_sync_init();
|
||||
if (r)
|
||||
goto error_sync;
|
||||
|
||||
r = amdgpu_fence_slab_init();
|
||||
if (r)
|
||||
goto error_fence;
|
||||
|
||||
r = amd_sched_fence_slab_init();
|
||||
if (r)
|
||||
goto error_sched;
|
||||
|
||||
if (vgacon_text_force()) {
|
||||
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
|
||||
return -EINVAL;
|
||||
@@ -755,6 +767,15 @@ static int __init amdgpu_init(void)
|
||||
amdgpu_register_atpx_handler();
|
||||
/* let modprobe override vga console setting */
|
||||
return drm_pci_init(driver, pdriver);
|
||||
|
||||
error_sched:
|
||||
amdgpu_fence_slab_fini();
|
||||
|
||||
error_fence:
|
||||
amdgpu_sync_fini();
|
||||
|
||||
error_sync:
|
||||
return r;
|
||||
}
|
||||
|
||||
static void __exit amdgpu_exit(void)
|
||||
@@ -763,6 +784,7 @@ static void __exit amdgpu_exit(void)
|
||||
drm_pci_exit(driver, pdriver);
|
||||
amdgpu_unregister_atpx_handler();
|
||||
amdgpu_sync_fini();
|
||||
amd_sched_fence_slab_fini();
|
||||
amdgpu_fence_slab_fini();
|
||||
}
|
||||
|
||||
|
@@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
if ((amdgpu_runtime_pm != 0) &&
|
||||
amdgpu_has_atpx() &&
|
||||
(amdgpu_is_atpx_hybrid() ||
|
||||
amdgpu_has_atpx_dgpu_power_cntl()) &&
|
||||
((flags & AMD_IS_APU) == 0))
|
||||
flags |= AMD_IS_PX;
|
||||
|
||||
|
@@ -80,7 +80,9 @@
|
||||
#include "dce_virtual.h"
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
|
||||
|
@@ -272,7 +272,7 @@ bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hw
|
||||
PHM_FUNC_CHECK(hwmgr);
|
||||
|
||||
if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
|
||||
return -EINVAL;
|
||||
return false;
|
||||
|
||||
return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
|
||||
}
|
||||
|
@@ -710,8 +710,10 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
||||
uint32_t vol;
|
||||
int ret = 0;
|
||||
|
||||
if (hwmgr->chip_id < CHIP_POLARIS10) {
|
||||
atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
|
||||
if (hwmgr->chip_id < CHIP_TONGA) {
|
||||
ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
|
||||
} else if (hwmgr->chip_id < CHIP_POLARIS10) {
|
||||
ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
|
||||
if (*voltage >= 2000 || *voltage == 0)
|
||||
*voltage = 1150;
|
||||
} else {
|
||||
|
@@ -1474,19 +1474,19 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
|
||||
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
|
||||
|
||||
|
||||
if (table_info == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
sclk_table = table_info->vdd_dep_on_sclk;
|
||||
|
||||
for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
|
||||
vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
|
||||
|
||||
if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
|
||||
if (0 == phm_get_sclk_for_voltage_evv(hwmgr,
|
||||
if ((hwmgr->pp_table_version == PP_TABLE_V1)
|
||||
&& !phm_get_sclk_for_voltage_evv(hwmgr,
|
||||
table_info->vddgfx_lookup_table, vv_id, &sclk)) {
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_ClockStretcher)) {
|
||||
if (table_info == NULL)
|
||||
return -EINVAL;
|
||||
sclk_table = table_info->vdd_dep_on_sclk;
|
||||
|
||||
for (j = 1; j < sclk_table->count; j++) {
|
||||
if (sclk_table->entries[j].clk == sclk &&
|
||||
sclk_table->entries[j].cks_enable == 0) {
|
||||
@@ -1512,12 +1512,15 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
if ((hwmgr->pp_table_version == PP_TABLE_V0)
|
||||
|| !phm_get_sclk_for_voltage_evv(hwmgr,
|
||||
table_info->vddc_lookup_table, vv_id, &sclk)) {
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_ClockStretcher)) {
|
||||
if (table_info == NULL)
|
||||
return -EINVAL;
|
||||
sclk_table = table_info->vdd_dep_on_sclk;
|
||||
|
||||
for (j = 1; j < sclk_table->count; j++) {
|
||||
if (sclk_table->entries[j].clk == sclk &&
|
||||
sclk_table->entries[j].cks_enable == 0) {
|
||||
@@ -2147,9 +2150,11 @@ static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (tab) {
|
||||
vddc = tab->vddc;
|
||||
smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
|
||||
&data->vddc_leakage);
|
||||
tab->vddc = vddc;
|
||||
vddci = tab->vddci;
|
||||
smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
|
||||
&data->vddci_leakage);
|
||||
tab->vddci = vddci;
|
||||
@@ -4247,18 +4252,26 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
|
||||
{
|
||||
struct phm_ppt_v1_information *table_info =
|
||||
(struct phm_ppt_v1_information *)hwmgr->pptable;
|
||||
struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
|
||||
struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
|
||||
struct phm_clock_voltage_dependency_table *sclk_table;
|
||||
int i;
|
||||
|
||||
if (table_info == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
dep_sclk_table = table_info->vdd_dep_on_sclk;
|
||||
|
||||
for (i = 0; i < dep_sclk_table->count; i++) {
|
||||
clocks->clock[i] = dep_sclk_table->entries[i].clk;
|
||||
clocks->count++;
|
||||
if (hwmgr->pp_table_version == PP_TABLE_V1) {
|
||||
if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
|
||||
return -EINVAL;
|
||||
dep_sclk_table = table_info->vdd_dep_on_sclk;
|
||||
for (i = 0; i < dep_sclk_table->count; i++) {
|
||||
clocks->clock[i] = dep_sclk_table->entries[i].clk;
|
||||
clocks->count++;
|
||||
}
|
||||
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
|
||||
sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
|
||||
for (i = 0; i < sclk_table->count; i++) {
|
||||
clocks->clock[i] = sclk_table->entries[i].clk;
|
||||
clocks->count++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -4280,17 +4293,24 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
|
||||
(struct phm_ppt_v1_information *)hwmgr->pptable;
|
||||
struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
|
||||
int i;
|
||||
struct phm_clock_voltage_dependency_table *mclk_table;
|
||||
|
||||
if (table_info == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
dep_mclk_table = table_info->vdd_dep_on_mclk;
|
||||
|
||||
for (i = 0; i < dep_mclk_table->count; i++) {
|
||||
clocks->clock[i] = dep_mclk_table->entries[i].clk;
|
||||
clocks->latency[i] = smu7_get_mem_latency(hwmgr,
|
||||
if (hwmgr->pp_table_version == PP_TABLE_V1) {
|
||||
if (table_info == NULL)
|
||||
return -EINVAL;
|
||||
dep_mclk_table = table_info->vdd_dep_on_mclk;
|
||||
for (i = 0; i < dep_mclk_table->count; i++) {
|
||||
clocks->clock[i] = dep_mclk_table->entries[i].clk;
|
||||
clocks->latency[i] = smu7_get_mem_latency(hwmgr,
|
||||
dep_mclk_table->entries[i].clk);
|
||||
clocks->count++;
|
||||
clocks->count++;
|
||||
}
|
||||
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
|
||||
mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
|
||||
for (i = 0; i < mclk_table->count; i++) {
|
||||
clocks->clock[i] = mclk_table->entries[i].clk;
|
||||
clocks->count++;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -30,7 +30,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
|
||||
struct phm_fan_speed_info *fan_speed_info)
|
||||
{
|
||||
if (hwmgr->thermal_controller.fanInfo.bNoFan)
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
|
||||
fan_speed_info->supports_percent_read = true;
|
||||
fan_speed_info->supports_percent_write = true;
|
||||
@@ -60,7 +60,7 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
uint64_t tmp64;
|
||||
|
||||
if (hwmgr->thermal_controller.fanInfo.bNoFan)
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
|
||||
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
|
||||
CG_FDO_CTRL1, FMAX_DUTY100);
|
||||
@@ -89,7 +89,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
|
||||
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
|
||||
(hwmgr->thermal_controller.fanInfo.
|
||||
ucTachometerPulsesPerRevolution == 0))
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
|
||||
tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
|
||||
CG_TACH_STATUS, TACH_PERIOD);
|
||||
|
@@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
|
||||
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
|
||||
static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
|
||||
|
||||
struct kmem_cache *sched_fence_slab;
|
||||
atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
|
||||
|
||||
/* Initialize a given run queue struct */
|
||||
static void amd_sched_rq_init(struct amd_sched_rq *rq)
|
||||
{
|
||||
@@ -619,13 +616,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
|
||||
INIT_LIST_HEAD(&sched->ring_mirror_list);
|
||||
spin_lock_init(&sched->job_list_lock);
|
||||
atomic_set(&sched->hw_rq_count, 0);
|
||||
if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
|
||||
sched_fence_slab = kmem_cache_create(
|
||||
"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!sched_fence_slab)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Each scheduler will run on a seperate kernel thread */
|
||||
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
|
||||
@@ -646,7 +636,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
|
||||
{
|
||||
if (sched->thread)
|
||||
kthread_stop(sched->thread);
|
||||
rcu_barrier();
|
||||
if (atomic_dec_and_test(&sched_fence_slab_ref))
|
||||
kmem_cache_destroy(sched_fence_slab);
|
||||
}
|
||||
|
@@ -30,9 +30,6 @@
|
||||
struct amd_gpu_scheduler;
|
||||
struct amd_sched_rq;
|
||||
|
||||
extern struct kmem_cache *sched_fence_slab;
|
||||
extern atomic_t sched_fence_slab_ref;
|
||||
|
||||
/**
|
||||
* A scheduler entity is a wrapper around a job queue or a group
|
||||
* of other entities. Entities take turns emitting jobs from their
|
||||
@@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity);
|
||||
void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
|
||||
|
||||
int amd_sched_fence_slab_init(void);
|
||||
void amd_sched_fence_slab_fini(void);
|
||||
|
||||
struct amd_sched_fence *amd_sched_fence_create(
|
||||
struct amd_sched_entity *s_entity, void *owner);
|
||||
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
|
||||
|
@@ -27,6 +27,25 @@
|
||||
#include <drm/drmP.h>
|
||||
#include "gpu_scheduler.h"
|
||||
|
||||
static struct kmem_cache *sched_fence_slab;
|
||||
|
||||
int amd_sched_fence_slab_init(void)
|
||||
{
|
||||
sched_fence_slab = kmem_cache_create(
|
||||
"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!sched_fence_slab)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amd_sched_fence_slab_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
kmem_cache_destroy(sched_fence_slab);
|
||||
}
|
||||
|
||||
struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
|
||||
void *owner)
|
||||
{
|
||||
|
@@ -303,9 +303,10 @@ void drm_minor_release(struct drm_minor *minor)
|
||||
* callbacks implemented by the driver. The driver then needs to initialize all
|
||||
* the various subsystems for the drm device like memory management, vblank
|
||||
* handling, modesetting support and intial output configuration plus obviously
|
||||
* initialize all the corresponding hardware bits. Finally when everything is up
|
||||
* and running and ready for userspace the device instance can be published
|
||||
* using drm_dev_register().
|
||||
* initialize all the corresponding hardware bits. An important part of this is
|
||||
* also calling drm_dev_set_unique() to set the userspace-visible unique name of
|
||||
* this device instance. Finally when everything is up and running and ready for
|
||||
* userspace the device instance can be published using drm_dev_register().
|
||||
*
|
||||
* There is also deprecated support for initalizing device instances using
|
||||
* bus-specific helpers and the ->load() callback. But due to
|
||||
@@ -327,17 +328,6 @@ void drm_minor_release(struct drm_minor *minor)
|
||||
* dev_priv field of &drm_device.
|
||||
*/
|
||||
|
||||
static int drm_dev_set_unique(struct drm_device *dev, const char *name)
|
||||
{
|
||||
if (!name)
|
||||
return -EINVAL;
|
||||
|
||||
kfree(dev->unique);
|
||||
dev->unique = kstrdup(name, GFP_KERNEL);
|
||||
|
||||
return dev->unique ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_put_dev - Unregister and release a DRM device
|
||||
* @dev: DRM device
|
||||
@@ -760,6 +750,26 @@ void drm_dev_unregister(struct drm_device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dev_unregister);
|
||||
|
||||
/**
|
||||
* drm_dev_set_unique - Set the unique name of a DRM device
|
||||
* @dev: device of which to set the unique name
|
||||
* @name: unique name
|
||||
*
|
||||
* Sets the unique name of a DRM device using the specified string. Drivers
|
||||
* can use this at driver probe time if the unique name of the devices they
|
||||
* drive is static.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int drm_dev_set_unique(struct drm_device *dev, const char *name)
|
||||
{
|
||||
kfree(dev->unique);
|
||||
dev->unique = kstrdup(name, GFP_KERNEL);
|
||||
|
||||
return dev->unique ? 0 : -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dev_set_unique);
|
||||
|
||||
/*
|
||||
* DRM Core
|
||||
* The DRM core module initializes all global DRM objects and makes them
|
||||
|
@@ -68,6 +68,12 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
|
||||
ipu_dc_disable_channel(ipu_crtc->dc);
|
||||
ipu_di_disable(ipu_crtc->di);
|
||||
/*
|
||||
* Planes must be disabled before DC clock is removed, as otherwise the
|
||||
* attached IDMACs will be left in undefined state, possibly hanging
|
||||
* the IPU or even system.
|
||||
*/
|
||||
drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
|
||||
ipu_dc_disable(ipu);
|
||||
|
||||
spin_lock_irq(&crtc->dev->event_lock);
|
||||
@@ -77,9 +83,6 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
}
|
||||
spin_unlock_irq(&crtc->dev->event_lock);
|
||||
|
||||
/* always disable planes on the CRTC */
|
||||
drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
|
||||
|
||||
drm_crtc_vblank_off(crtc);
|
||||
}
|
||||
|
||||
|
@@ -139,6 +139,7 @@ struct msm_dsi_host {
|
||||
|
||||
u32 err_work_state;
|
||||
struct work_struct err_work;
|
||||
struct work_struct hpd_work;
|
||||
struct workqueue_struct *workqueue;
|
||||
|
||||
/* DSI 6G TX buffer*/
|
||||
@@ -1294,6 +1295,14 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
|
||||
wmb(); /* make sure dsi controller enabled again */
|
||||
}
|
||||
|
||||
static void dsi_hpd_worker(struct work_struct *work)
|
||||
{
|
||||
struct msm_dsi_host *msm_host =
|
||||
container_of(work, struct msm_dsi_host, hpd_work);
|
||||
|
||||
drm_helper_hpd_irq_event(msm_host->dev);
|
||||
}
|
||||
|
||||
static void dsi_err_worker(struct work_struct *work)
|
||||
{
|
||||
struct msm_dsi_host *msm_host =
|
||||
@@ -1480,7 +1489,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
|
||||
|
||||
DBG("id=%d", msm_host->id);
|
||||
if (msm_host->dev)
|
||||
drm_helper_hpd_irq_event(msm_host->dev);
|
||||
queue_work(msm_host->workqueue, &msm_host->hpd_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1494,7 +1503,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
|
||||
|
||||
DBG("id=%d", msm_host->id);
|
||||
if (msm_host->dev)
|
||||
drm_helper_hpd_irq_event(msm_host->dev);
|
||||
queue_work(msm_host->workqueue, &msm_host->hpd_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1748,6 +1757,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
|
||||
/* setup workqueue */
|
||||
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
|
||||
INIT_WORK(&msm_host->err_work, dsi_err_worker);
|
||||
INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
|
||||
|
||||
msm_dsi->host = &msm_host->base;
|
||||
msm_dsi->id = msm_host->id;
|
||||
|
@@ -521,6 +521,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
|
||||
.parent_names = (const char *[]){ "xo" },
|
||||
.num_parents = 1,
|
||||
.name = vco_name,
|
||||
.flags = CLK_IGNORE_UNUSED,
|
||||
.ops = &clk_ops_dsi_pll_28nm_vco,
|
||||
};
|
||||
struct device *dev = &pll_28nm->pdev->dev;
|
||||
|
@@ -412,6 +412,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
|
||||
struct clk_init_data vco_init = {
|
||||
.parent_names = (const char *[]){ "pxo" },
|
||||
.num_parents = 1,
|
||||
.flags = CLK_IGNORE_UNUSED,
|
||||
.ops = &clk_ops_dsi_pll_28nm_vco,
|
||||
};
|
||||
struct device *dev = &pll_28nm->pdev->dev;
|
||||
|
@@ -702,6 +702,7 @@ static struct clk_init_data pll_init = {
|
||||
.ops = &hdmi_8996_pll_ops,
|
||||
.parent_names = hdmi_pll_parents,
|
||||
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
|
||||
.flags = CLK_IGNORE_UNUSED,
|
||||
};
|
||||
|
||||
int msm_hdmi_pll_8996_init(struct platform_device *pdev)
|
||||
|
@@ -424,6 +424,7 @@ static struct clk_init_data pll_init = {
|
||||
.ops = &hdmi_pll_ops,
|
||||
.parent_names = hdmi_pll_parents,
|
||||
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
|
||||
.flags = CLK_IGNORE_UNUSED,
|
||||
};
|
||||
|
||||
int msm_hdmi_pll_8960_init(struct platform_device *pdev)
|
||||
|
@@ -272,7 +272,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
|
||||
.count = 2,
|
||||
.base = { 0x14000, 0x16000 },
|
||||
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
|
||||
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
|
||||
MDP_PIPE_CAP_DECIMATION,
|
||||
},
|
||||
.pipe_dma = {
|
||||
.count = 1,
|
||||
@@ -282,7 +282,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
|
||||
.lm = {
|
||||
.count = 2, /* LM0 and LM3 */
|
||||
.base = { 0x44000, 0x47000 },
|
||||
.nb_stages = 5,
|
||||
.nb_stages = 8,
|
||||
.max_width = 2048,
|
||||
.max_height = 0xFFFF,
|
||||
},
|
||||
|
@@ -223,12 +223,7 @@ static void blend_setup(struct drm_crtc *crtc)
|
||||
plane_cnt++;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there is no base layer, enable border color.
|
||||
* Although it's not possbile in current blend logic,
|
||||
* put it here as a reminder.
|
||||
*/
|
||||
if (!pstates[STAGE_BASE] && plane_cnt) {
|
||||
if (!pstates[STAGE_BASE]) {
|
||||
ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
|
||||
DBG("Border Color is enabled");
|
||||
}
|
||||
@@ -365,6 +360,15 @@ static int pstate_cmp(const void *a, const void *b)
|
||||
return pa->state->zpos - pb->state->zpos;
|
||||
}
|
||||
|
||||
/* is there a helper for this? */
|
||||
static bool is_fullscreen(struct drm_crtc_state *cstate,
|
||||
struct drm_plane_state *pstate)
|
||||
{
|
||||
return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
|
||||
((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
|
||||
((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
|
||||
}
|
||||
|
||||
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
{
|
||||
@@ -375,21 +379,11 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct plane_state pstates[STAGE_MAX + 1];
|
||||
const struct mdp5_cfg_hw *hw_cfg;
|
||||
const struct drm_plane_state *pstate;
|
||||
int cnt = 0, i;
|
||||
int cnt = 0, base = 0, i;
|
||||
|
||||
DBG("%s: check", mdp5_crtc->name);
|
||||
|
||||
/* verify that there are not too many planes attached to crtc
|
||||
* and that we don't have conflicting mixer stages:
|
||||
*/
|
||||
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
||||
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
|
||||
if (cnt >= (hw_cfg->lm.nb_stages)) {
|
||||
dev_err(dev->dev, "too many planes!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
pstates[cnt].plane = plane;
|
||||
pstates[cnt].state = to_mdp5_plane_state(pstate);
|
||||
|
||||
@@ -399,8 +393,24 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
/* assign a stage based on sorted zpos property */
|
||||
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
|
||||
|
||||
/* if the bottom-most layer is not fullscreen, we need to use
|
||||
* it for solid-color:
|
||||
*/
|
||||
if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
|
||||
base++;
|
||||
|
||||
/* verify that there are not too many planes attached to crtc
|
||||
* and that we don't have conflicting mixer stages:
|
||||
*/
|
||||
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
||||
|
||||
if ((cnt + base) >= hw_cfg->lm.nb_stages) {
|
||||
dev_err(dev->dev, "too many planes!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
pstates[i].state->stage = STAGE_BASE + i;
|
||||
pstates[i].state->stage = STAGE_BASE + i + base;
|
||||
DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
|
||||
pipe2name(mdp5_plane_pipe(pstates[i].plane)),
|
||||
pstates[i].state->stage);
|
||||
|
@@ -307,8 +307,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
|
||||
format = to_mdp_format(msm_framebuffer_format(state->fb));
|
||||
if (MDP_FORMAT_IS_YUV(format) &&
|
||||
!pipe_supports_yuv(mdp5_plane->caps)) {
|
||||
dev_err(plane->dev->dev,
|
||||
"Pipe doesn't support YUV\n");
|
||||
DBG("Pipe doesn't support YUV\n");
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -316,8 +315,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
|
||||
if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
|
||||
(((state->src_w >> 16) != state->crtc_w) ||
|
||||
((state->src_h >> 16) != state->crtc_h))) {
|
||||
dev_err(plane->dev->dev,
|
||||
"Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
|
||||
DBG("Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
|
||||
state->src_w >> 16, state->src_h >> 16,
|
||||
state->crtc_w, state->crtc_h);
|
||||
|
||||
@@ -333,8 +331,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
|
||||
|
||||
if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
|
||||
(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
|
||||
dev_err(plane->dev->dev,
|
||||
"Pipe doesn't support flip\n");
|
||||
DBG("Pipe doesn't support flip\n");
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -234,7 +234,7 @@ static int msm_drm_uninit(struct device *dev)
|
||||
flush_workqueue(priv->atomic_wq);
|
||||
destroy_workqueue(priv->atomic_wq);
|
||||
|
||||
if (kms)
|
||||
if (kms && kms->funcs)
|
||||
kms->funcs->destroy(kms);
|
||||
|
||||
if (gpu) {
|
||||
|
@@ -163,6 +163,9 @@ void msm_gem_shrinker_init(struct drm_device *dev)
|
||||
void msm_gem_shrinker_cleanup(struct drm_device *dev)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
|
||||
unregister_shrinker(&priv->shrinker);
|
||||
|
||||
if (priv->shrinker.nr_deferred) {
|
||||
WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
|
||||
unregister_shrinker(&priv->shrinker);
|
||||
}
|
||||
}
|
||||
|
@@ -578,7 +578,7 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
|
||||
static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
|
||||
{
|
||||
struct qxl_rect rect;
|
||||
int ret;
|
||||
|
@@ -36,7 +36,7 @@ static bool qxl_head_enabled(struct qxl_head *head)
|
||||
return head->width && head->height;
|
||||
}
|
||||
|
||||
void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
|
||||
static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
|
||||
{
|
||||
if (qdev->client_monitors_config &&
|
||||
count > qdev->client_monitors_config->count) {
|
||||
@@ -57,11 +57,18 @@ void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
|
||||
qdev->client_monitors_config->count = count;
|
||||
}
|
||||
|
||||
enum {
|
||||
MONITORS_CONFIG_MODIFIED,
|
||||
MONITORS_CONFIG_UNCHANGED,
|
||||
MONITORS_CONFIG_BAD_CRC,
|
||||
};
|
||||
|
||||
static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
|
||||
{
|
||||
int i;
|
||||
int num_monitors;
|
||||
uint32_t crc;
|
||||
int status = MONITORS_CONFIG_UNCHANGED;
|
||||
|
||||
num_monitors = qdev->rom->client_monitors_config.count;
|
||||
crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
|
||||
@@ -70,7 +77,7 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
|
||||
qxl_io_log(qdev, "crc mismatch: have %X (%zd) != %X\n", crc,
|
||||
sizeof(qdev->rom->client_monitors_config),
|
||||
qdev->rom->client_monitors_config_crc);
|
||||
return 1;
|
||||
return MONITORS_CONFIG_BAD_CRC;
|
||||
}
|
||||
if (num_monitors > qdev->monitors_config->max_allowed) {
|
||||
DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n",
|
||||
@@ -79,6 +86,10 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
|
||||
} else {
|
||||
num_monitors = qdev->rom->client_monitors_config.count;
|
||||
}
|
||||
if (qdev->client_monitors_config
|
||||
&& (num_monitors != qdev->client_monitors_config->count)) {
|
||||
status = MONITORS_CONFIG_MODIFIED;
|
||||
}
|
||||
qxl_alloc_client_monitors_config(qdev, num_monitors);
|
||||
/* we copy max from the client but it isn't used */
|
||||
qdev->client_monitors_config->max_allowed =
|
||||
@@ -88,17 +99,39 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
|
||||
&qdev->rom->client_monitors_config.heads[i];
|
||||
struct qxl_head *client_head =
|
||||
&qdev->client_monitors_config->heads[i];
|
||||
client_head->x = c_rect->left;
|
||||
client_head->y = c_rect->top;
|
||||
client_head->width = c_rect->right - c_rect->left;
|
||||
client_head->height = c_rect->bottom - c_rect->top;
|
||||
client_head->surface_id = 0;
|
||||
client_head->id = i;
|
||||
client_head->flags = 0;
|
||||
if (client_head->x != c_rect->left) {
|
||||
client_head->x = c_rect->left;
|
||||
status = MONITORS_CONFIG_MODIFIED;
|
||||
}
|
||||
if (client_head->y != c_rect->top) {
|
||||
client_head->y = c_rect->top;
|
||||
status = MONITORS_CONFIG_MODIFIED;
|
||||
}
|
||||
if (client_head->width != c_rect->right - c_rect->left) {
|
||||
client_head->width = c_rect->right - c_rect->left;
|
||||
status = MONITORS_CONFIG_MODIFIED;
|
||||
}
|
||||
if (client_head->height != c_rect->bottom - c_rect->top) {
|
||||
client_head->height = c_rect->bottom - c_rect->top;
|
||||
status = MONITORS_CONFIG_MODIFIED;
|
||||
}
|
||||
if (client_head->surface_id != 0) {
|
||||
client_head->surface_id = 0;
|
||||
status = MONITORS_CONFIG_MODIFIED;
|
||||
}
|
||||
if (client_head->id != i) {
|
||||
client_head->id = i;
|
||||
status = MONITORS_CONFIG_MODIFIED;
|
||||
}
|
||||
if (client_head->flags != 0) {
|
||||
client_head->flags = 0;
|
||||
status = MONITORS_CONFIG_MODIFIED;
|
||||
}
|
||||
DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height,
|
||||
client_head->x, client_head->y);
|
||||
}
|
||||
return 0;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void qxl_update_offset_props(struct qxl_device *qdev)
|
||||
@@ -124,9 +157,18 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
|
||||
{
|
||||
|
||||
struct drm_device *dev = qdev->ddev;
|
||||
while (qxl_display_copy_rom_client_monitors_config(qdev)) {
|
||||
int status;
|
||||
|
||||
status = qxl_display_copy_rom_client_monitors_config(qdev);
|
||||
while (status == MONITORS_CONFIG_BAD_CRC) {
|
||||
qxl_io_log(qdev, "failed crc check for client_monitors_config,"
|
||||
" retrying\n");
|
||||
status = qxl_display_copy_rom_client_monitors_config(qdev);
|
||||
}
|
||||
if (status == MONITORS_CONFIG_UNCHANGED) {
|
||||
qxl_io_log(qdev, "config unchanged\n");
|
||||
DRM_DEBUG("ignoring unchanged client monitors config");
|
||||
return;
|
||||
}
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
@@ -157,6 +199,9 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
|
||||
mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
|
||||
false);
|
||||
mode->type |= DRM_MODE_TYPE_PREFERRED;
|
||||
mode->hdisplay = head->width;
|
||||
mode->vdisplay = head->height;
|
||||
drm_mode_set_name(mode);
|
||||
*pwidth = head->width;
|
||||
*pheight = head->height;
|
||||
drm_mode_probed_add(connector, mode);
|
||||
@@ -607,7 +652,7 @@ static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
qxl_send_monitors_config(struct qxl_device *qdev)
|
||||
{
|
||||
int i;
|
||||
|
@@ -395,16 +395,11 @@ qxl_framebuffer_init(struct drm_device *dev,
|
||||
struct drm_gem_object *obj,
|
||||
const struct drm_framebuffer_funcs *funcs);
|
||||
void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
|
||||
void qxl_send_monitors_config(struct qxl_device *qdev);
|
||||
int qxl_create_monitors_object(struct qxl_device *qdev);
|
||||
int qxl_destroy_monitors_object(struct qxl_device *qdev);
|
||||
|
||||
/* used by qxl_debugfs only */
|
||||
void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
|
||||
void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
|
||||
|
||||
/* qxl_gem.c */
|
||||
int qxl_gem_init(struct qxl_device *qdev);
|
||||
void qxl_gem_init(struct qxl_device *qdev);
|
||||
void qxl_gem_fini(struct qxl_device *qdev);
|
||||
int qxl_gem_object_create(struct qxl_device *qdev, int size,
|
||||
int alignment, int initial_domain,
|
||||
@@ -574,6 +569,5 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
|
||||
struct qxl_drv_surface *
|
||||
qxl_surface_lookup(struct drm_device *dev, int surface_id);
|
||||
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
|
||||
int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
|
||||
|
||||
#endif
|
||||
|
@@ -191,7 +191,7 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
|
||||
/*
|
||||
* we are using a shadow draw buffer, at qdev->surface0_shadow
|
||||
*/
|
||||
qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", clips->x1, clips->x2,
|
||||
qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]\n", clips->x1, clips->x2,
|
||||
clips->y1, clips->y2);
|
||||
image->dx = clips->x1;
|
||||
image->dy = clips->y1;
|
||||
|
@@ -111,10 +111,9 @@ void qxl_gem_object_close(struct drm_gem_object *obj,
|
||||
{
|
||||
}
|
||||
|
||||
int qxl_gem_init(struct qxl_device *qdev)
|
||||
void qxl_gem_init(struct qxl_device *qdev)
|
||||
{
|
||||
INIT_LIST_HEAD(&qdev->gem.objects);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qxl_gem_fini(struct qxl_device *qdev)
|
||||
|
@@ -131,7 +131,7 @@ static int qxl_device_init(struct qxl_device *qdev,
|
||||
mutex_init(&qdev->update_area_mutex);
|
||||
mutex_init(&qdev->release_mutex);
|
||||
mutex_init(&qdev->surf_evict_mutex);
|
||||
INIT_LIST_HEAD(&qdev->gem.objects);
|
||||
qxl_gem_init(qdev);
|
||||
|
||||
qdev->rom_base = pci_resource_start(pdev, 2);
|
||||
qdev->rom_size = pci_resource_len(pdev, 2);
|
||||
@@ -273,6 +273,7 @@ static void qxl_device_fini(struct qxl_device *qdev)
|
||||
qxl_ring_free(qdev->command_ring);
|
||||
qxl_ring_free(qdev->cursor_ring);
|
||||
qxl_ring_free(qdev->release_ring);
|
||||
qxl_gem_fini(qdev);
|
||||
qxl_bo_fini(qdev);
|
||||
io_mapping_free(qdev->surface_mapping);
|
||||
io_mapping_free(qdev->vram_mapping);
|
||||
|
@@ -931,7 +931,7 @@ static void radeon_connector_unregister(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
|
||||
if (radeon_connector->ddc_bus->has_aux) {
|
||||
if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) {
|
||||
drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
|
||||
radeon_connector->ddc_bus->has_aux = false;
|
||||
}
|
||||
|
@@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = {
|
||||
"LAST",
|
||||
};
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
bool radeon_has_atpx_dgpu_power_cntl(void);
|
||||
bool radeon_is_atpx_hybrid(void);
|
||||
#else
|
||||
static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
|
||||
static inline bool radeon_is_atpx_hybrid(void) { return false; }
|
||||
#endif
|
||||
|
||||
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
|
||||
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
|
||||
|
||||
@@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
|
||||
|
||||
if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
|
||||
rdev->flags &= ~RADEON_IS_PX;
|
||||
|
||||
/* disable PX is the system doesn't support dGPU power control or hybrid gfx */
|
||||
if (!radeon_is_atpx_hybrid() &&
|
||||
!radeon_has_atpx_dgpu_power_cntl())
|
||||
rdev->flags &= ~RADEON_IS_PX;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -98,17 +98,23 @@ success:
|
||||
static int udl_select_std_channel(struct udl_device *udl)
|
||||
{
|
||||
int ret;
|
||||
u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
|
||||
0x1C, 0x88, 0x5E, 0x15,
|
||||
0x60, 0xFE, 0xC6, 0x97,
|
||||
0x16, 0x3D, 0x47, 0xF2};
|
||||
static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
|
||||
0x1C, 0x88, 0x5E, 0x15,
|
||||
0x60, 0xFE, 0xC6, 0x97,
|
||||
0x16, 0x3D, 0x47, 0xF2};
|
||||
void *sendbuf;
|
||||
|
||||
sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
|
||||
if (!sendbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = usb_control_msg(udl->udev,
|
||||
usb_sndctrlpipe(udl->udev, 0),
|
||||
NR_USB_REQUEST_CHANNEL,
|
||||
(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
|
||||
set_def_chn, sizeof(set_def_chn),
|
||||
sendbuf, sizeof(set_def_chn),
|
||||
USB_CTRL_SET_TIMEOUT);
|
||||
kfree(sendbuf);
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
|
@@ -28,16 +28,6 @@
|
||||
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
|
||||
{
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
|
||||
if (pdev) {
|
||||
return drm_pci_set_busid(dev, master);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
|
||||
{
|
||||
struct apertures_struct *ap;
|
||||
@@ -71,13 +61,22 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
|
||||
|
||||
if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) {
|
||||
struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
|
||||
const char *pname = dev_name(&pdev->dev);
|
||||
bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
|
||||
char unique[20];
|
||||
|
||||
DRM_INFO("pci: %s detected\n",
|
||||
vga ? "virtio-vga" : "virtio-gpu-pci");
|
||||
DRM_INFO("pci: %s detected at %s\n",
|
||||
vga ? "virtio-vga" : "virtio-gpu-pci",
|
||||
pname);
|
||||
dev->pdev = pdev;
|
||||
if (vga)
|
||||
virtio_pci_kick_out_firmware_fb(pdev);
|
||||
|
||||
snprintf(unique, sizeof(unique), "pci:%s", pname);
|
||||
ret = drm_dev_set_unique(dev, unique);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
}
|
||||
|
||||
ret = drm_dev_register(dev, 0);
|
||||
|
@@ -115,7 +115,6 @@ static const struct file_operations virtio_gpu_driver_fops = {
|
||||
|
||||
static struct drm_driver driver = {
|
||||
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
|
||||
.set_busid = drm_virtio_set_busid,
|
||||
.load = virtio_gpu_driver_load,
|
||||
.unload = virtio_gpu_driver_unload,
|
||||
.open = virtio_gpu_driver_open,
|
||||
|
@@ -49,7 +49,6 @@
|
||||
#define DRIVER_PATCHLEVEL 1
|
||||
|
||||
/* virtgpu_drm_bus.c */
|
||||
int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
|
||||
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
|
||||
|
||||
struct virtio_gpu_object {
|
||||
|
@@ -75,7 +75,7 @@ void virtio_gpu_cursor_ack(struct virtqueue *vq)
|
||||
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
|
||||
{
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
int i, size, count = 0;
|
||||
int i, size, count = 16;
|
||||
void *ptr;
|
||||
|
||||
INIT_LIST_HEAD(&vgdev->free_vbufs);
|
||||
|
Reference in New Issue
Block a user