Pull in char-misc-next from Greg
We need 32ea33a044
("mei: bus: export to_mei_cl_device for mei
client devices drivers") for the mei-hdcp patches.
References: https://lkml.org/lkml/2019/2/19/356
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
@@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
||||
effective_mode &= ~S_IWUSR;
|
||||
|
||||
if ((adev->flags & AMD_IS_APU) &&
|
||||
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
|
||||
(attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
|
||||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
|
||||
return 0;
|
||||
|
@@ -38,6 +38,7 @@
|
||||
#include "amdgpu_gem.h"
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-fence-array.h>
|
||||
|
||||
/**
|
||||
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
|
||||
@@ -187,6 +188,48 @@ error:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int
|
||||
__reservation_object_make_exclusive(struct reservation_object *obj)
|
||||
{
|
||||
struct dma_fence **fences;
|
||||
unsigned int count;
|
||||
int r;
|
||||
|
||||
if (!reservation_object_get_list(obj)) /* no shared fences to convert */
|
||||
return 0;
|
||||
|
||||
r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (count == 0) {
|
||||
/* Now that was unexpected. */
|
||||
} else if (count == 1) {
|
||||
reservation_object_add_excl_fence(obj, fences[0]);
|
||||
dma_fence_put(fences[0]);
|
||||
kfree(fences);
|
||||
} else {
|
||||
struct dma_fence_array *array;
|
||||
|
||||
array = dma_fence_array_create(count, fences,
|
||||
dma_fence_context_alloc(1), 0,
|
||||
false);
|
||||
if (!array)
|
||||
goto err_fences_put;
|
||||
|
||||
reservation_object_add_excl_fence(obj, &array->base);
|
||||
dma_fence_put(&array->base);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_fences_put:
|
||||
while (count--)
|
||||
dma_fence_put(fences[count]);
|
||||
kfree(fences);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
|
||||
* @dma_buf: Shared DMA buffer
|
||||
@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
|
||||
|
||||
if (attach->dev->driver != adev->dev->driver) {
|
||||
/*
|
||||
* Wait for all shared fences to complete before we switch to future
|
||||
* use of exclusive fence on this prime shared bo.
|
||||
* We only create shared fences for internal use, but importers
|
||||
* of the dmabuf rely on exclusive fences for implicitly
|
||||
* tracking write hazards. As any of the current fences may
|
||||
* correspond to a write, we need to convert all existing
|
||||
* fences on the reservation object into a single exclusive
|
||||
* fence.
|
||||
*/
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
|
||||
true, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (unlikely(r < 0)) {
|
||||
DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
|
||||
r = __reservation_object_make_exclusive(bo->tbo.resv);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
}
|
||||
}
|
||||
|
||||
/* pin buffer into GTT */
|
||||
|
@@ -3363,14 +3363,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
|
||||
struct amdgpu_task_info *task_info)
|
||||
{
|
||||
struct amdgpu_vm *vm;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&adev->vm_manager.pasid_lock);
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
if (vm)
|
||||
*task_info = vm->task_info;
|
||||
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||
static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
u32 tmp = 0;
|
||||
|
||||
if (enable) {
|
||||
tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
|
||||
REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
|
||||
REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
|
||||
|
||||
WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
|
||||
lower_32_bits(adev->doorbell.base));
|
||||
WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
|
||||
upper_32_bits(adev->doorbell.base));
|
||||
}
|
||||
|
||||
WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
|
||||
}
|
||||
|
||||
static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
|
||||
|
@@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle)
|
||||
case CHIP_RAVEN:
|
||||
adev->asic_funcs = &soc15_asic_funcs;
|
||||
if (adev->rev_id >= 0x8)
|
||||
adev->external_rev_id = adev->rev_id + 0x81;
|
||||
adev->external_rev_id = adev->rev_id + 0x79;
|
||||
else if (adev->pdev->device == 0x15d8)
|
||||
adev->external_rev_id = adev->rev_id + 0x41;
|
||||
else if (adev->rev_id == 1)
|
||||
adev->external_rev_id = adev->rev_id + 0x20;
|
||||
else
|
||||
adev->external_rev_id = 0x1;
|
||||
adev->external_rev_id = adev->rev_id + 0x01;
|
||||
|
||||
if (adev->rev_id >= 0x8) {
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
|
@@ -863,7 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if CONFIG_X86_64
|
||||
#ifdef CONFIG_X86_64
|
||||
static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
|
||||
uint32_t *num_entries,
|
||||
struct crat_subtype_iolink *sub_type_hdr)
|
||||
|
@@ -4082,7 +4082,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
||||
}
|
||||
|
||||
if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
|
||||
connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
|
||||
connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
|
||||
connector_type == DRM_MODE_CONNECTOR_eDP) {
|
||||
drm_connector_attach_vrr_capable_property(
|
||||
&aconnector->base);
|
||||
}
|
||||
|
@@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements(
|
||||
dc,
|
||||
context->bw.dce.sclk_khz);
|
||||
|
||||
pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
|
||||
/*
|
||||
* As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
|
||||
* This is not required for less than 5 displays,
|
||||
* thus don't request decfclk in dc to avoid impact
|
||||
* on power saving.
|
||||
*
|
||||
*/
|
||||
pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)?
|
||||
pp_display_cfg->min_engine_clock_khz : 0;
|
||||
|
||||
pp_display_cfg->min_engine_clock_deep_sleep_khz
|
||||
= context->bw.dce.sclk_deep_sleep_khz;
|
||||
|
@@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
|
||||
break;
|
||||
case amd_pp_dpp_clock:
|
||||
pclk_vol_table = pinfo->vdd_dep_on_dppclk;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
|
||||
if (mode->hsync)
|
||||
return mode->hsync;
|
||||
|
||||
if (mode->htotal < 0)
|
||||
if (mode->htotal <= 0)
|
||||
return 0;
|
||||
|
||||
calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
|
||||
|
@@ -1086,7 +1086,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
|
||||
return DDI_CLK_SEL_TBT_810;
|
||||
default:
|
||||
MISSING_CASE(clock);
|
||||
break;
|
||||
return DDI_CLK_SEL_NONE;
|
||||
}
|
||||
case DPLL_ID_ICL_MGPLL1:
|
||||
case DPLL_ID_ICL_MGPLL2:
|
||||
|
@@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
|
||||
}
|
||||
}
|
||||
|
||||
static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
|
||||
|
||||
/*
|
||||
* Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
|
||||
* the hardware when a high res displays plugged in. DPLL P
|
||||
* divider is zero, and the pipe timings are bonkers. We'll
|
||||
* try to disable everything in that case.
|
||||
*
|
||||
* FIXME would be nice to be able to sanitize this state
|
||||
* without several WARNs, but for now let's take the easy
|
||||
* road.
|
||||
*/
|
||||
return IS_GEN6(dev_priv) &&
|
||||
crtc_state->base.active &&
|
||||
crtc_state->shared_dpll &&
|
||||
crtc_state->port_clock == 0;
|
||||
}
|
||||
|
||||
static void intel_sanitize_encoder(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_connector *connector;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_crtc_state *crtc_state = crtc ?
|
||||
to_intel_crtc_state(crtc->base.state) : NULL;
|
||||
|
||||
/* We need to check both for a crtc link (meaning that the
|
||||
* encoder is active and trying to read from a pipe) and the
|
||||
* pipe itself being active. */
|
||||
bool has_active_crtc = encoder->base.crtc &&
|
||||
to_intel_crtc(encoder->base.crtc)->active;
|
||||
bool has_active_crtc = crtc_state &&
|
||||
crtc_state->base.active;
|
||||
|
||||
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
|
||||
DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
has_active_crtc = false;
|
||||
}
|
||||
|
||||
connector = intel_encoder_find_connector(encoder);
|
||||
if (connector && !has_active_crtc) {
|
||||
@@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
|
||||
/* Connector is active, but has no active pipe. This is
|
||||
* fallout from our resume register restoring. Disable
|
||||
* the encoder manually again. */
|
||||
if (encoder->base.crtc) {
|
||||
struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
|
||||
if (crtc_state) {
|
||||
struct drm_encoder *best_encoder;
|
||||
|
||||
DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
|
||||
encoder->base.base.id,
|
||||
encoder->base.name);
|
||||
|
||||
/* avoid oopsing in case the hooks consult best_encoder */
|
||||
best_encoder = connector->base.state->best_encoder;
|
||||
connector->base.state->best_encoder = &encoder->base;
|
||||
|
||||
if (encoder->disable)
|
||||
encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
|
||||
encoder->disable(encoder, crtc_state,
|
||||
connector->base.state);
|
||||
if (encoder->post_disable)
|
||||
encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
|
||||
encoder->post_disable(encoder, crtc_state,
|
||||
connector->base.state);
|
||||
|
||||
connector->base.state->best_encoder = best_encoder;
|
||||
}
|
||||
encoder->base.crtc = NULL;
|
||||
|
||||
|
@@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane,
|
||||
|
||||
keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
|
||||
|
||||
keymsk = key->channel_mask & 0x3ffffff;
|
||||
keymsk = key->channel_mask & 0x7ffffff;
|
||||
if (alpha < 0xff)
|
||||
keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
|
||||
|
||||
|
@@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll)
|
||||
|
||||
static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
|
||||
{
|
||||
struct dsi_data *dsi = p;
|
||||
struct dsi_data *dsi = s->private;
|
||||
struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
|
||||
enum dss_clk_source dispc_clk_src, dsi_clk_src;
|
||||
int dsi_module = dsi->module_id;
|
||||
@@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
|
||||
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
|
||||
static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
|
||||
{
|
||||
struct dsi_data *dsi = p;
|
||||
struct dsi_data *dsi = s->private;
|
||||
unsigned long flags;
|
||||
struct dsi_irq_stats stats;
|
||||
|
||||
@@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
|
||||
|
||||
static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
|
||||
{
|
||||
struct dsi_data *dsi = p;
|
||||
struct dsi_data *dsi = s->private;
|
||||
|
||||
if (dsi_runtime_get(dsi))
|
||||
return 0;
|
||||
@@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
|
||||
dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
|
||||
dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
|
||||
dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
|
||||
/*
|
||||
* HACK: These flags should be handled through the omap_dss_device bus
|
||||
* flags, but this will only be possible when the DSI encoder will be
|
||||
* converted to the omapdrm-managed encoder model.
|
||||
*/
|
||||
dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
|
||||
dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
|
||||
dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
|
||||
dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
|
||||
dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
|
||||
dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
|
||||
|
||||
dss_mgr_set_timings(&dsi->output, &dsi->vm);
|
||||
|
||||
@@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
|
||||
|
||||
snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1);
|
||||
dsi->debugfs.regs = dss_debugfs_create_file(dss, name,
|
||||
dsi_dump_dsi_regs, &dsi);
|
||||
dsi_dump_dsi_regs, dsi);
|
||||
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
|
||||
snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1);
|
||||
dsi->debugfs.irqs = dss_debugfs_create_file(dss, name,
|
||||
dsi_dump_dsi_irqs, &dsi);
|
||||
dsi_dump_dsi_irqs, dsi);
|
||||
#endif
|
||||
snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1);
|
||||
dsi->debugfs.clks = dss_debugfs_create_file(dss, name,
|
||||
dsi_dump_dsi_clocks, &dsi);
|
||||
dsi_dump_dsi_clocks, dsi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data)
|
||||
dss_debugfs_remove_file(dsi->debugfs.irqs);
|
||||
dss_debugfs_remove_file(dsi->debugfs.regs);
|
||||
|
||||
of_platform_depopulate(dev);
|
||||
|
||||
WARN_ON(dsi->scp_clk_refcount > 0);
|
||||
|
||||
dss_pll_unregister(&dsi->pll);
|
||||
@@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev)
|
||||
|
||||
dsi_uninit_output(dsi);
|
||||
|
||||
of_platform_depopulate(&pdev->dev);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
|
||||
|
@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
|
||||
u16 data_offset, size;
|
||||
u8 frev, crev;
|
||||
struct ci_power_info *pi;
|
||||
enum pci_bus_speed speed_cap;
|
||||
enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
|
||||
struct pci_dev *root = rdev->pdev->bus->self;
|
||||
int ret;
|
||||
|
||||
@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
|
||||
return -ENOMEM;
|
||||
rdev->pm.dpm.priv = pi;
|
||||
|
||||
speed_cap = pcie_get_speed_cap(root);
|
||||
if (!pci_is_root_bus(rdev->pdev->bus))
|
||||
speed_cap = pcie_get_speed_cap(root);
|
||||
if (speed_cap == PCI_SPEED_UNKNOWN) {
|
||||
pi->sys_pcie_mask = 0;
|
||||
} else {
|
||||
|
@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
|
||||
struct ni_power_info *ni_pi;
|
||||
struct si_power_info *si_pi;
|
||||
struct atom_clock_dividers dividers;
|
||||
enum pci_bus_speed speed_cap;
|
||||
enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
|
||||
struct pci_dev *root = rdev->pdev->bus->self;
|
||||
int ret;
|
||||
|
||||
@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
|
||||
eg_pi = &ni_pi->eg;
|
||||
pi = &eg_pi->rv7xx;
|
||||
|
||||
speed_cap = pcie_get_speed_cap(root);
|
||||
if (!pci_is_root_bus(rdev->pdev->bus))
|
||||
speed_cap = pcie_get_speed_cap(root);
|
||||
if (speed_cap == PCI_SPEED_UNKNOWN) {
|
||||
si_pi->sys_pcie_mask = 0;
|
||||
} else {
|
||||
|
@@ -1,17 +1,8 @@
|
||||
//SPDX-License-Identifier: GPL-2.0+
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
|
||||
* Author:
|
||||
* Sandy Huang <hjc@rock-chips.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
@@ -1,17 +1,8 @@
|
||||
//SPDX-License-Identifier: GPL-2.0+
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
|
||||
* Author:
|
||||
* Sandy Huang <hjc@rock-chips.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_ROCKCHIP_RGB
|
||||
|
@@ -761,6 +761,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
|
||||
return PTR_ERR(tcon->sclk0);
|
||||
}
|
||||
}
|
||||
clk_prepare_enable(tcon->sclk0);
|
||||
|
||||
if (tcon->quirks->has_channel_1) {
|
||||
tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
|
||||
@@ -775,6 +776,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
|
||||
|
||||
static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
|
||||
{
|
||||
clk_disable_unprepare(tcon->sclk0);
|
||||
clk_disable_unprepare(tcon->clk);
|
||||
}
|
||||
|
||||
|
@@ -26,6 +26,7 @@
|
||||
**************************************************************************/
|
||||
#include <linux/module.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
@@ -34,7 +35,6 @@
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
|
||||
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
|
||||
#define VMWGFX_CHIP_SVGAII 0
|
||||
@@ -545,6 +545,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
|
||||
dev_priv->initial_height = height;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_assume_iommu - Figure out whether coherent dma-remapping might be
|
||||
* taking place.
|
||||
* @dev: Pointer to the struct drm_device.
|
||||
*
|
||||
* Return: true if iommu present, false otherwise.
|
||||
*/
|
||||
static bool vmw_assume_iommu(struct drm_device *dev)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev->dev);
|
||||
|
||||
return !dma_is_direct(ops) && ops &&
|
||||
ops->map_page != dma_direct_map_page;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
|
||||
* system.
|
||||
@@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
||||
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
|
||||
[vmw_dma_map_populate] = "Keeping DMA mappings.",
|
||||
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
|
||||
#ifdef CONFIG_X86
|
||||
const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
if (intel_iommu_enabled) {
|
||||
dev_priv->map_mode = vmw_dma_map_populate;
|
||||
goto out_fixup;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!(vmw_force_iommu || vmw_force_coherent)) {
|
||||
dev_priv->map_mode = vmw_dma_phys;
|
||||
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev_priv->map_mode = vmw_dma_map_populate;
|
||||
|
||||
if (dma_ops && dma_ops->sync_single_for_cpu)
|
||||
dev_priv->map_mode = vmw_dma_alloc_coherent;
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (swiotlb_nr_tbl() == 0)
|
||||
dev_priv->map_mode = vmw_dma_map_populate;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
out_fixup:
|
||||
#endif
|
||||
if (dev_priv->map_mode == vmw_dma_map_populate &&
|
||||
vmw_restrict_iommu)
|
||||
dev_priv->map_mode = vmw_dma_map_bind;
|
||||
|
||||
if (vmw_force_coherent)
|
||||
dev_priv->map_mode = vmw_dma_alloc_coherent;
|
||||
else if (vmw_assume_iommu(dev_priv->dev))
|
||||
dev_priv->map_mode = vmw_dma_map_populate;
|
||||
else if (!vmw_force_iommu)
|
||||
dev_priv->map_mode = vmw_dma_phys;
|
||||
else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
|
||||
dev_priv->map_mode = vmw_dma_alloc_coherent;
|
||||
else
|
||||
dev_priv->map_mode = vmw_dma_map_populate;
|
||||
|
||||
#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
|
||||
/*
|
||||
* No coherent page pool
|
||||
*/
|
||||
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
|
||||
if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
|
||||
dev_priv->map_mode = vmw_dma_map_bind;
|
||||
|
||||
/* No TTM coherent page pool? FIXME: Ask TTM instead! */
|
||||
if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
|
||||
(dev_priv->map_mode == vmw_dma_alloc_coherent))
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_X86 */
|
||||
dev_priv->map_mode = vmw_dma_map_populate;
|
||||
#endif /* CONFIG_X86 */
|
||||
|
||||
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -625,24 +612,20 @@ out_fixup:
|
||||
* With 32-bit we can only handle 32 bit PFNs. Optionally set that
|
||||
* restriction also for 64-bit systems.
|
||||
*/
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
static int vmw_dma_masks(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int ret = 0;
|
||||
|
||||
if (intel_iommu_enabled &&
|
||||
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
|
||||
if (dev_priv->map_mode != vmw_dma_phys &&
|
||||
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
|
||||
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
|
||||
return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
|
||||
return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
|
||||
}
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static int vmw_dma_masks(struct vmw_private *dev_priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
{
|
||||
|
@@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
|
||||
*p_fence = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
|
||||
struct drm_connector_state *conn_state;
|
||||
struct vmw_connector_state *vmw_conn_state;
|
||||
|
||||
if (!du->pref_active) {
|
||||
if (!du->pref_active && new_crtc_state->enable) {
|
||||
ret = -EINVAL;
|
||||
goto clean;
|
||||
}
|
||||
@@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
|
||||
user_fence_rep)
|
||||
{
|
||||
struct vmw_fence_obj *fence = NULL;
|
||||
uint32_t handle;
|
||||
int ret;
|
||||
uint32_t handle = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
|
||||
out_fence)
|
||||
|
Reference in New Issue
Block a user