Merge tag 'drm-next-2019-09-18' of git://anongit.freedesktop.org/drm/drm

Pull drm updates from Dave Airlie:
 "This is the main pull request for 5.4-rc1 merge window. I don't think
  there is anything outstanding so next week should just be fixes, but
  we'll see if I missed anything. I landed some fixes earlier in the
  week but got delayed writing summary and sending it out, due to a mix
  of sick kid and jetlag!

  There are some fixes pending, but I'd rather get the main merge out of
  the way instead of delaying it longer.

  It's also pretty large in commit count and new amd header file size.
  The largest thing is four new amdgpu products (navi12/14, arcturus and
  renoir APU support).

  Otherwise it's pretty much lots of work across the board, i915 has
  started landing tigerlake support, lots of icelake fixes and lots of
  locking reworking for future gpu support, lots of header file rework
  (drmP.h is nearly gone), some old legacy hacks (DRM_WAIT_ON) have been
  put into the places they are needed.

  uapi:
   - content protection type property for HDCP

  core:
   - rework include dependencies
   - lots of drmP.h removals
   - link rate calculation robustness fix
   - make fb helper map only when required
   - add connector->DDC adapter link
   - DRM_WAIT_ON removed
   - drop DRM_AUTH usage from drivers

  dma-buf:
   - reservation object fence helper

  dma-fence:
   - shrink dma_fence struct
   - merge signal functions
   - store timestamps in dma_fence
   - selftests

  ttm:
   - embed drm_get_object struct into ttm_buffer_object
   - release_notify callback

  bridges:
   - sii902x - audio graph card support
   - tc358767 - aux data handling rework
   - ti-snd64dsi86 - debugfs support, DSI mode flags support

  panels:
   - Support for GiantPlus GPM940B0, Sharp LQ070Y3DG3B, Ortustech
     COM37H3M, Novatek NT39016, Sharp LS020B1DD01D, Raydium RM67191, Boe
     Himax8279d, Sharp LD-D5116Z01B
   - TI nspire, NEC NL8048HL11, LG Philips LB035Q02, Sharp LS037V7DW01,
     Sony ACX565AKM, Toppoly TD028TTEC1 Toppoly TD043MTEA1

  i915:
   - Initial tigerlake platform support
   - Locking simplification work, general all over refactoring.
   - Selftests
   - HDCP debug info improvements
   - DSI properties
   - Icelake display PLL fixes, colorspace fixes, bandwidth fixes, DSI
     suspend/resume
   - GuC fixes
   - Perf fixes
   - ElkhartLake enablement
   - DP MST fixes
   - GVT - command parser enhancements

  amdgpu:
   - add wipe memory on release flag for buffer creation
   - Navi12/14 support (may be marked experimental)
   - Arcturus support
   - Renoir APU support
   - mclk DPM for Navi
   - DC display fixes
   - Raven scatter/gather support
   - RAS support for GFX
   - Navi12 + Arcturus power features
   - GPU reset for Picasso
   - smu11 i2c controller support

  amdkfd:
   - navi12/14 support
   - Arcturus support

  radeon:
   - kexec fix

  nouveau:
   - improved display color management
   - detect lack of GPU power cables

  vmwgfx:
   - evicition priority support
   - remove unused security feature

  msm:
   - msm8998 display support
   - better async commit support for cursor updates

  etnaviv:
   - per-process address space support
   - performance counter fixes
   - softpin support

  mcde:
   - DCS transfers fix

  exynos:
   - drmP.h cleanup

  lima:
   - reduce logging

  kirin:
   - misc clenaups

  komeda:
   - dual-link support
   - DT memory regions

  hisilicon:
   - misc fixes

  imx:
   - IPUv3 image converter fixes
   - 32-bit RGB V4L2 pixel format support

  ingenic:
   - more support for panel related cases

  mgag200:
   - cursor support fix

  panfrost:
   - export GPU features register to userspace
   - gpu heap allocations
   - per-fd address space support

  pl111:
   - CLD pads wiring support removed from DT

  rockchip:
   - rework to use DRM PSR helpers
   - fix bug in VOP_WIN_GET macro
   - DSI DT binding rework

  sun4i:
   - improve support for color encoding and range
   - DDC enabled GPIO

  tinydrm:
   - rework SPI support
   - improve MIPI-DBI support
   - moved to drm/tiny

  vkms:
   - rework CRC tracking

  dw-hdmi:
   - get_eld and i2s improvements

  gm12u320:
   - misc fixes

  meson:
   - global code cleanup
   - vpu feature detect

  omap:
   - alpha/pixel blend mode properties

  rcar-du:
   - misc fixes"

* tag 'drm-next-2019-09-18' of git://anongit.freedesktop.org/drm/drm: (2112 commits)
  drm/nouveau/bar/gm20b: Avoid BAR1 teardown during init
  drm/nouveau: Fix ordering between TTM and GEM release
  drm/nouveau/prime: Extend DMA reservation object lock
  drm/nouveau: Fix fallout from reservation object rework
  drm/nouveau/kms/nv50-: Don't create MSTMs for eDP connectors
  drm/i915: Use NOEVICT for first pass on attemping to pin a GGTT mmap
  drm/i915: to make vgpu ppgtt notificaiton as atomic operation
  drm/i915: Flush the existing fence before GGTT read/write
  drm/i915: Hold irq-off for the entire fake lock period
  drm/i915/gvt: update RING_START reg of vGPU when the context is submitted to i915
  drm/i915/gvt: update vgpu workload head pointer correctly
  drm/mcde: Fix DSI transfers
  drm/msm: Use the correct dma_sync calls harder
  drm/msm: remove unlikely() from WARN_ON() conditions
  drm/msm/dsi: Fix return value check for clk_get_parent
  drm/msm: add atomic traces
  drm/msm/dpu: async commit support
  drm/msm: async commit support
  drm/msm: split power control from prepare/complete_commit
  drm/msm: add kms->flush_commit()
  ...
This commit is contained in:
Linus Torvalds
2019-09-19 16:24:24 -07:00
1621 changed files with 262709 additions and 38616 deletions

View File

@@ -7,6 +7,7 @@ config DRM_I915_WERROR
# We use the dependency on !COMPILE_TEST to not be enabled in
# allmodconfig or allyesconfig configurations
depends on !COMPILE_TEST
select HEADER_TEST
default n
help
Add -Werror to the build flags for (and only for) i915.ko.
@@ -29,6 +30,7 @@ config DRM_I915_DEBUG
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
select DRM_DEBUG_SELFTEST
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
@@ -94,6 +96,20 @@ config DRM_I915_TRACE_GEM
If in doubt, say "N".
config DRM_I915_TRACE_GTT
bool "Insert extra ftrace output from the GTT internals"
depends on DRM_I915_DEBUG_GEM
select TRACING
default n
help
Enable additional and verbose debugging output that will spam
ordinary tests, but may be vital for post-mortem debugging when
used with /proc/sys/kernel/ftrace_dump_on_oops
Recommended for driver developers only.
If in doubt, say "N".
config DRM_I915_SW_FENCE_DEBUG_OBJECTS
bool "Enable additional driver debugging for fence objects"
depends on DRM_I915

View File

@@ -32,22 +32,25 @@ subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
# Extra header tests
include $(src)/Makefile.header-test
header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
subdir-ccflags-y += -I$(src)
subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
# core driver code
i915-y += i915_drv.o \
i915_irq.o \
i915_getparam.o \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
i915_suspend.o \
i915_sysfs.o \
i915_utils.o \
intel_csr.o \
intel_device_info.o \
intel_pch.o \
intel_pm.o \
intel_runtime_pm.o \
intel_sideband.o \
@@ -59,6 +62,7 @@ i915-y += \
i915_memcpy.o \
i915_mm.o \
i915_sw_fence.o \
i915_sw_fence_work.o \
i915_syncmap.o \
i915_user_extensions.o
@@ -72,17 +76,28 @@ gt-y += \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
gt/intel_engine_cs.o \
gt/intel_engine_pool.o \
gt/intel_engine_pm.o \
gt/intel_engine_user.o \
gt/intel_gt.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_irq.o \
gt/intel_hangcheck.o \
gt/intel_lrc.o \
gt/intel_renderstate.o \
gt/intel_reset.o \
gt/intel_ringbuffer.o \
gt/intel_mocs.o \
gt/intel_sseu.o \
gt/intel_timeline.o \
gt/intel_workarounds.o
gt-$(CONFIG_DRM_I915_SELFTEST) += \
gt/mock_engine.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
gt/gen7_renderstate.o \
gt/gen8_renderstate.o \
gt/gen9_renderstate.o
i915-y += $(gt-y)
# GEM (Graphics Execution Management) code
@@ -114,39 +129,32 @@ gem-y += \
i915-y += \
$(gem-y) \
i915_active.o \
i915_buddy.o \
i915_cmd_parser.o \
i915_gem_batch_pool.o \
i915_gem_evict.o \
i915_gem_fence_reg.o \
i915_gem_gtt.o \
i915_gem.o \
i915_gem_render_state.o \
i915_globals.o \
i915_query.o \
i915_request.o \
i915_scheduler.o \
i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
i915-y += intel_uc.o \
intel_uc_fw.o \
intel_guc.o \
intel_guc_ads.o \
intel_guc_ct.o \
intel_guc_fw.o \
intel_guc_log.o \
intel_guc_submission.o \
intel_huc.o \
intel_huc_fw.o
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
intel_renderstate_gen7.o \
intel_renderstate_gen8.o \
intel_renderstate_gen9.o
obj-y += gt/uc/
i915-y += gt/uc/intel_uc.o \
gt/uc/intel_uc_fw.o \
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
gt/uc/intel_guc_ct.o \
gt/uc/intel_guc_fw.o \
gt/uc/intel_guc_log.o \
gt/uc/intel_guc_submission.o \
gt/uc/intel_huc.o \
gt/uc/intel_huc_fw.o
# modesetting core code
obj-y += display/
@@ -173,7 +181,8 @@ i915-y += \
display/intel_overlay.o \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_sprite.o
display/intel_sprite.o \
display/intel_tc.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -210,6 +219,25 @@ i915-y += \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
# perf code
obj-y += oa/
i915-y += \
oa/i915_oa_hsw.o \
oa/i915_oa_bdw.o \
oa/i915_oa_chv.o \
oa/i915_oa_sklgt2.o \
oa/i915_oa_sklgt3.o \
oa/i915_oa_sklgt4.o \
oa/i915_oa_bxt.o \
oa/i915_oa_kblgt2.o \
oa/i915_oa_kblgt3.o \
oa/i915_oa_glk.o \
oa/i915_oa_cflgt2.o \
oa/i915_oa_cflgt3.o \
oa/i915_oa_cnl.o \
oa/i915_oa_icl.o
i915-y += i915_perf.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
@@ -224,23 +252,6 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
# virtual gpu code
i915-y += i915_vgpu.o
# perf code
i915-y += i915_perf.o \
i915_oa_hsw.o \
i915_oa_bdw.o \
i915_oa_chv.o \
i915_oa_sklgt2.o \
i915_oa_sklgt3.o \
i915_oa_sklgt4.o \
i915_oa_bxt.o \
i915_oa_kblgt2.o \
i915_oa_kblgt3.o \
i915_oa_glk.o \
i915_oa_cflgt2.o \
i915_oa_cflgt3.o \
i915_oa_cnl.o \
i915_oa_icl.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
include $(src)/gvt/Makefile

View File

@@ -1,22 +0,0 @@
# SPDX-License-Identifier: MIT
# Copyright © 2019 Intel Corporation
# Test the headers are compilable as standalone units
header-test-$(CONFIG_DRM_I915_WERROR) := \
i915_active_types.h \
i915_debugfs.h \
i915_drv.h \
i915_irq.h \
i915_params.h \
i915_priolist_types.h \
i915_reg.h \
i915_scheduler_types.h \
i915_timeline_types.h \
i915_utils.h \
intel_csr.h \
intel_drv.h \
intel_pm.h \
intel_runtime_pm.h \
intel_sideband.h \
intel_uncore.h \
intel_wakeref.h

View File

@@ -1,2 +1,6 @@
# For building individual subdir files on the command line
subdir-ccflags-y += -I$(srctree)/$(src)/..
# Extra header tests
include $(src)/Makefile.header-test
header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
header-test- := intel_vbt_defs.h

View File

@@ -1,16 +0,0 @@
# SPDX-License-Identifier: MIT
# Copyright © 2019 Intel Corporation
# Test the headers are compilable as standalone units
header_test := $(notdir $(filter-out %/intel_vbt_defs.h,$(wildcard $(src)/*.h)))
quiet_cmd_header_test = HDRTEST $@
cmd_header_test = echo "\#include \"$(<F)\"" > $@
header_test_%.c: %.h
$(call cmd,header_test)
extra-$(CONFIG_DRM_I915_WERROR) += \
$(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))

View File

@@ -25,7 +25,7 @@
*
*/
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define CH7017_TV_DISPLAY_MODE 0x00

View File

@@ -26,7 +26,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define CH7xxx_REG_VID 0x4a

View File

@@ -29,7 +29,7 @@
*
*/
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
/*

View File

@@ -28,7 +28,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define NS2501_VID 0x1305

View File

@@ -26,7 +26,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define SIL164_VID 0x0001

View File

@@ -25,7 +25,7 @@
*
*/
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
/* register definitions according to the TFP410 data sheet */

View File

@@ -202,63 +202,62 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
enum phy phy;
u32 tmp;
int lane;
for_each_dsi_port(port, intel_dsi->ports) {
for_each_dsi_phy(phy, intel_dsi->phys) {
/*
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
/* Bspec: must not use GRP register for write */
tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
}
@@ -364,10 +363,10 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
enum phy phy;
for_each_dsi_port(port, intel_dsi->ports)
intel_combo_phy_power_up_lanes(dev_priv, port, true,
for_each_dsi_phy(phy, intel_dsi->phys)
intel_combo_phy_power_up_lanes(dev_priv, phy, true,
intel_dsi->lane_count, false);
}
@@ -375,34 +374,47 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
enum phy phy;
u32 tmp;
int lane;
/* Step 4b(i) set loadgen select for transmit and aux lanes */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
for_each_dsi_phy(phy, intel_dsi->phys) {
tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~LOADGEN_SELECT;
I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~LOADGEN_SELECT;
if (lane != 2)
tmp |= LOADGEN_SELECT;
I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
for_each_dsi_phy(phy, intel_dsi->phys) {
tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
if (IS_ELKHARTLAKE(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) {
tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0);
I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0x1);
I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
}
}
}
@@ -412,16 +424,16 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
enum port port;
enum phy phy;
/* clear common keeper enable bit */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
for_each_dsi_phy(phy, intel_dsi->phys) {
tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
tmp &= ~COMMON_KEEPER_EN;
I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~COMMON_KEEPER_EN;
I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
}
/*
@@ -429,33 +441,33 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
* Note: loadgen select program is done
* as part of lane phy sequence configuration
*/
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_PORT_CL_DW5(port));
for_each_dsi_phy(phy, intel_dsi->phys) {
tmp = I915_READ(ICL_PORT_CL_DW5(phy));
tmp |= SUS_CLOCK_CONFIG;
I915_WRITE(ICL_PORT_CL_DW5(port), tmp);
I915_WRITE(ICL_PORT_CL_DW5(phy), tmp);
}
/* Clear training enable to change swing values */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
for_each_dsi_phy(phy, intel_dsi->phys) {
tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~TX_TRAINING_EN;
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~TX_TRAINING_EN;
I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
}
/* Program swing and de-emphasis */
dsi_program_swing_and_deemphasis(encoder);
/* Set training enable to trigger update */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
for_each_dsi_phy(phy, intel_dsi->phys) {
tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
tmp |= TX_TRAINING_EN;
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
tmp |= TX_TRAINING_EN;
I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
}
}
@@ -484,6 +496,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
enum port port;
enum phy phy;
/* Program T-INIT master registers */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -517,18 +530,28 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
* a value '0' inside TA_PARAM_REGISTERS otherwise
* leave all fields at HW default values.
*/
if (intel_dsi_bitrate(intel_dsi) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
if (IS_GEN(dev_priv, 11)) {
if (intel_dsi_bitrate(intel_dsi) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
/* shadow register inside display core */
tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
/* shadow register inside display core */
tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
}
}
}
if (IS_ELKHARTLAKE(dev_priv)) {
for_each_dsi_phy(phy, intel_dsi->phys) {
tmp = I915_READ(ICL_DPHY_CHKN(phy));
tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
I915_WRITE(ICL_DPHY_CHKN(phy), tmp);
}
}
}
@@ -538,15 +561,14 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
enum port port;
enum phy phy;
mutex_lock(&dev_priv->dpll_lock);
tmp = I915_READ(DPCLKA_CFGCR0_ICL);
for_each_dsi_port(port, intel_dsi->ports) {
tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port);
}
tmp = I915_READ(ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -555,15 +577,14 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
enum port port;
enum phy phy;
mutex_lock(&dev_priv->dpll_lock);
tmp = I915_READ(DPCLKA_CFGCR0_ICL);
for_each_dsi_port(port, intel_dsi->ports) {
tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
}
tmp = I915_READ(ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -573,24 +594,27 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port;
enum phy phy;
u32 val;
mutex_lock(&dev_priv->dpll_lock);
val = I915_READ(DPCLKA_CFGCR0_ICL);
for_each_dsi_port(port, intel_dsi->ports) {
val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
val = I915_READ(ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys) {
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
}
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
I915_WRITE(ICL_DPCLKA_CFGCR0, val);
for_each_dsi_port(port, intel_dsi->ports) {
val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
for_each_dsi_phy(phy, intel_dsi->phys) {
if (INTEL_GEN(dev_priv) >= 12)
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
else
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
}
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
I915_WRITE(ICL_DPCLKA_CFGCR0, val);
POSTING_READ(DPCLKA_CFGCR0_ICL);
POSTING_READ(ICL_DPCLKA_CFGCR0);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -661,6 +685,11 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
break;
}
if (INTEL_GEN(dev_priv) >= 12) {
if (is_vid_mode(intel_dsi))
tmp |= BLANKING_PACKET_ENABLE;
}
/* program DSI operation mode */
if (is_vid_mode(intel_dsi)) {
tmp &= ~OP_MODE_MASK;
@@ -744,7 +773,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
enum transcoder dsi_trans;
/* horizontal timings */
u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
u16 hfront_porch, hback_porch;
u16 hback_porch;
/* vertical timings */
u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
@@ -753,8 +782,6 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
hsync_start = adjusted_mode->crtc_hsync_start;
hsync_end = adjusted_mode->crtc_hsync_end;
hsync_size = hsync_end - hsync_start;
hfront_porch = (adjusted_mode->crtc_hsync_start -
adjusted_mode->crtc_hdisplay);
hback_porch = (adjusted_mode->crtc_htotal -
adjusted_mode->crtc_hsync_end);
vactive = adjusted_mode->crtc_vdisplay;
@@ -845,6 +872,15 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
}
/* program TRANS_VBLANK register, should be same as vtotal programmed */
if (INTEL_GEN(dev_priv) >= 12) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
I915_WRITE(VBLANK(dsi_trans),
(vactive - 1) | ((vtotal - 1) << 16));
}
}
}
static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
@@ -862,10 +898,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
if (intel_wait_for_register(&dev_priv->uncore,
PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE,
I965_PIPECONF_ACTIVE, 10))
if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 10))
DRM_ERROR("DSI transcoder not enabled\n");
}
}
@@ -923,6 +957,8 @@ static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
/* step 4a: power up all lanes of the DDI used by DSI */
gen11_dsi_power_up_lanes(encoder);
@@ -945,7 +981,8 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
gen11_dsi_configure_transcoder(encoder, pipe_config);
/* Step 4l: Gate DDI clocks */
gen11_dsi_gate_clocks(encoder);
if (IS_GEN(dev_priv, 11))
gen11_dsi_gate_clocks(encoder);
}
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
@@ -1041,9 +1078,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
if (intel_wait_for_register(&dev_priv->uncore,
PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 0, 50))
if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 50))
DRM_ERROR("DSI trancoder not disabled\n");
}
}
@@ -1487,6 +1523,26 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
intel_dsi_log_params(intel_dsi);
}
static void icl_dsi_add_properties(struct intel_connector *connector)
{
u32 allowed_scalers;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) |
BIT(DRM_MODE_SCALE_FULLSCREEN) |
BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(&connector->base,
allowed_scalers);
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
connector->base.display_info.panel_orientation =
intel_dsi_get_panel_orientation(connector);
drm_connector_init_panel_orientation_property(&connector->base,
connector->panel.fixed_mode->hdisplay,
connector->panel.fixed_mode->vdisplay);
}
void icl_dsi_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
@@ -1580,6 +1636,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
}
icl_dphy_param_init(intel_dsi);
icl_dsi_add_properties(intel_connector);
return;
err:

View File

@@ -35,7 +35,7 @@
#include <drm/drm_plane_helper.h>
#include "intel_atomic.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_sprite.h"

View File

@@ -35,8 +35,9 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include "i915_trace.h"
#include "intel_atomic_plane.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_pm.h"
#include "intel_sprite.h"
@@ -176,33 +177,49 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->data_rate[plane->id] =
intel_plane_data_rate(new_crtc_state, new_plane_state);
return intel_plane_atomic_calc_changes(old_crtc_state,
&new_crtc_state->base,
old_plane_state,
&new_plane_state->base);
return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state,
old_plane_state, new_plane_state);
}
static int intel_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *new_plane_state)
static struct intel_crtc *
get_crtc_from_states(const struct intel_plane_state *old_plane_state,
const struct intel_plane_state *new_plane_state)
{
struct drm_atomic_state *state = new_plane_state->state;
const struct drm_plane_state *old_plane_state =
drm_atomic_get_old_plane_state(state, plane);
struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
const struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
if (new_plane_state->base.crtc)
return to_intel_crtc(new_plane_state->base.crtc);
new_plane_state->visible = false;
if (old_plane_state->base.crtc)
return to_intel_crtc(old_plane_state->base.crtc);
return NULL;
}
static int intel_plane_atomic_check(struct drm_plane *_plane,
struct drm_plane_state *_new_plane_state)
{
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_atomic_state *state =
to_intel_atomic_state(_new_plane_state->state);
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
const struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
struct intel_crtc *crtc =
get_crtc_from_states(old_plane_state, new_plane_state);
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
new_plane_state->base.visible = false;
if (!crtc)
return 0;
old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state),
to_intel_crtc_state(new_crtc_state),
to_intel_plane_state(old_plane_state),
to_intel_plane_state(new_plane_state));
return intel_plane_atomic_check_with_state(old_crtc_state,
new_crtc_state,
old_plane_state,
new_plane_state);
}
static struct intel_plane *

View File

@@ -8,7 +8,6 @@
#include <linux/types.h>
struct drm_crtc_state;
struct drm_plane;
struct drm_property;
struct intel_atomic_state;
@@ -43,8 +42,8 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct drm_crtc_state *crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct drm_plane_state *plane_state);
struct intel_plane_state *plane_state);
#endif /* __INTEL_ATOMIC_PLANE_H__ */

View File

@@ -29,7 +29,7 @@
#include "i915_drv.h"
#include "intel_audio.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_lpe_audio.h"
/**
@@ -72,6 +72,13 @@ struct dp_aud_n_m {
u16 n;
};
struct hdmi_aud_ncts {
int sample_rate;
int clock;
int n;
int cts;
};
/* Values according to DP 1.4 Table 2-104 */
static const struct dp_aud_n_m dp_aud_n_m[] = {
{ 32000, LC_162M, 1024, 10125 },
@@ -148,12 +155,7 @@ static const struct {
#define TMDS_594M 594000
#define TMDS_593M 593407
static const struct {
int sample_rate;
int clock;
int n;
int cts;
} hdmi_aud_ncts[] = {
static const struct hdmi_aud_ncts hdmi_aud_ncts_24bpp[] = {
{ 32000, TMDS_296M, 5824, 421875 },
{ 32000, TMDS_297M, 3072, 222750 },
{ 32000, TMDS_593M, 5824, 843750 },
@@ -184,6 +186,49 @@ static const struct {
{ 192000, TMDS_594M, 24576, 594000 },
};
/* Appendix C - N & CTS values for deep color from HDMI 2.0 spec*/
/* HDMI N/CTS table for 10 bit deep color(30 bpp)*/
#define TMDS_371M 371250
#define TMDS_370M 370878
static const struct hdmi_aud_ncts hdmi_aud_ncts_30bpp[] = {
{ 32000, TMDS_370M, 5824, 527344 },
{ 32000, TMDS_371M, 6144, 556875 },
{ 44100, TMDS_370M, 8918, 585938 },
{ 44100, TMDS_371M, 4704, 309375 },
{ 88200, TMDS_370M, 17836, 585938 },
{ 88200, TMDS_371M, 9408, 309375 },
{ 176400, TMDS_370M, 35672, 585938 },
{ 176400, TMDS_371M, 18816, 309375 },
{ 48000, TMDS_370M, 11648, 703125 },
{ 48000, TMDS_371M, 5120, 309375 },
{ 96000, TMDS_370M, 23296, 703125 },
{ 96000, TMDS_371M, 10240, 309375 },
{ 192000, TMDS_370M, 46592, 703125 },
{ 192000, TMDS_371M, 20480, 309375 },
};
/* HDMI N/CTS table for 12 bit deep color(36 bpp)*/
#define TMDS_445_5M 445500
#define TMDS_445M 445054
static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
{ 32000, TMDS_445M, 5824, 632813 },
{ 32000, TMDS_445_5M, 4096, 445500 },
{ 44100, TMDS_445M, 8918, 703125 },
{ 44100, TMDS_445_5M, 4704, 371250 },
{ 88200, TMDS_445M, 17836, 703125 },
{ 88200, TMDS_445_5M, 9408, 371250 },
{ 176400, TMDS_445M, 35672, 703125 },
{ 176400, TMDS_445_5M, 18816, 371250 },
{ 48000, TMDS_445M, 5824, 421875 },
{ 48000, TMDS_445_5M, 5120, 371250 },
{ 96000, TMDS_445M, 11648, 421875 },
{ 96000, TMDS_445_5M, 10240, 371250 },
{ 192000, TMDS_445M, 23296, 421875 },
{ 192000, TMDS_445_5M, 20480, 371250 },
};
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
@@ -212,14 +257,24 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
int rate)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
int i;
const struct hdmi_aud_ncts *hdmi_ncts_table;
int i, size;
for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
if (rate == hdmi_aud_ncts[i].sample_rate &&
adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
return hdmi_aud_ncts[i].n;
if (crtc_state->pipe_bpp == 36) {
hdmi_ncts_table = hdmi_aud_ncts_36bpp;
size = ARRAY_SIZE(hdmi_aud_ncts_36bpp);
} else if (crtc_state->pipe_bpp == 30) {
hdmi_ncts_table = hdmi_aud_ncts_30bpp;
size = ARRAY_SIZE(hdmi_aud_ncts_30bpp);
} else {
hdmi_ncts_table = hdmi_aud_ncts_24bpp;
size = ARRAY_SIZE(hdmi_aud_ncts_24bpp);
}
for (i = 0; i < size; i++) {
if (rate == hdmi_ncts_table[i].sample_rate &&
crtc_state->port_clock == hdmi_ncts_table[i].clock) {
return hdmi_ncts_table[i].n;
}
}
return 0;

View File

@@ -28,6 +28,7 @@
#include <drm/drm_dp_helper.h>
#include <drm/i915_drm.h>
#include "display/intel_display.h"
#include "display/intel_gmbus.h"
#include "i915_drv.h"
@@ -1342,16 +1343,13 @@ static const u8 cnp_ddc_pin_map[] = {
static const u8 icp_ddc_pin_map[] = {
[ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
[ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
[TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT,
[ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
[ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
[ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
[ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
};
static const u8 mcc_ddc_pin_map[] = {
[MCC_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
[MCC_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
[MCC_DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
[TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP,
[TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
};
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
@@ -1359,10 +1357,7 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
const u8 *ddc_pin_map;
int n_entries;
if (HAS_PCH_MCC(dev_priv)) {
ddc_pin_map = mcc_ddc_pin_map;
n_entries = ARRAY_SIZE(mcc_ddc_pin_map);
} else if (HAS_PCH_ICP(dev_priv)) {
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
} else if (HAS_PCH_CNP(dev_priv)) {
@@ -1668,6 +1663,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (!child->device_type)
continue;
DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n",
child->device_type);
/*
* Copy as much as we know (sizeof) and is available
* (child_dev_size) of the child device. Accessing the data must
@@ -1730,12 +1728,13 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
enum phy phy = intel_port_to_phy(dev_priv, port);
/*
* VBT has the TypeC mode (native,TBT/USB) and we don't want
* to detect it.
*/
if (intel_port_is_tc(dev_priv, port))
if (intel_phy_is_tc(dev_priv, phy))
continue;
info->supports_dvi = (port != PORT_A && port != PORT_E);
@@ -1888,10 +1887,10 @@ out:
}
/**
* intel_bios_cleanup - Free any resources allocated by intel_bios_init()
* intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
* @dev_priv: i915 device instance
*/
void intel_bios_cleanup(struct drm_i915_private *dev_priv)
void intel_bios_driver_remove(struct drm_i915_private *dev_priv)
{
kfree(dev_priv->vbt.child_dev);
dev_priv->vbt.child_dev = NULL;

View File

@@ -42,6 +42,7 @@ enum intel_backlight_type {
INTEL_BACKLIGHT_DISPLAY_DDI,
INTEL_BACKLIGHT_DSI_DCS,
INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
};
struct edp_power_seq {
@@ -227,7 +228,7 @@ struct mipi_pps_data {
} __packed;
void intel_bios_init(struct drm_i915_private *dev_priv);
void intel_bios_cleanup(struct drm_i915_private *dev_priv);
void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);

View File

@@ -6,7 +6,7 @@
#include <drm/drm_atomic_state_helper.h>
#include "intel_bw.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
/* Parameters for Qclk Geyserville (QGV) */
@@ -65,7 +65,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
struct intel_qgv_point *sp,
int point)
{
u32 val = 0, val2;
u32 val = 0, val2 = 0;
int ret;
ret = sandybridge_pcode_read(dev_priv,
@@ -322,6 +322,20 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
return data_rate;
}
static struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct drm_private_state *bw_state;
bw_state = drm_atomic_get_private_obj_state(&state->base,
&dev_priv->bw_obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
return to_intel_bw_state(bw_state);
}
int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);

View File

@@ -8,7 +8,6 @@
#include <drm/drm_atomic.h>
#include "i915_drv.h"
#include "intel_display.h"
struct drm_i915_private;
@@ -24,20 +23,6 @@ struct intel_bw_state {
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
static inline struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct drm_private_state *bw_state;
bw_state = drm_atomic_get_private_obj_state(&state->base,
&dev_priv->bw_obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
return to_intel_bw_state(bw_state);
}
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);

View File

@@ -22,7 +22,7 @@
*/
#include "intel_cdclk.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
/**
@@ -545,10 +545,10 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
* issuing a modeset without actually changing any display after
* a system suspend. So grab the PIPE-A domain, which covers
* a system suspend. So grab the display core domain, which covers
* the HW blocks needed for the following programming.
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
vlv_iosf_sb_get(dev_priv,
BIT(VLV_IOSF_SB_CCK) |
@@ -606,7 +606,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
@@ -631,10 +631,10 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
* issuing a modeset without actually changing any display after
* a system suspend. So grab the PIPE-A domain, which covers
* a system suspend. So grab the display core domain, which covers
* the HW blocks needed for the following programming.
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
@@ -653,7 +653,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
static int bdw_calc_cdclk(int min_cdclk)
@@ -969,9 +969,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
if (intel_wait_for_register(&dev_priv->uncore,
LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5))
if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
DRM_ERROR("DPLL0 not locked\n");
dev_priv->cdclk.hw.vco = vco;
@@ -983,9 +981,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
if (intel_wait_for_register(&dev_priv->uncore,
LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
1))
if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
DRM_ERROR("Couldn't disable DPLL0\n");
dev_priv->cdclk.hw.vco = 0;
@@ -1309,9 +1305,8 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
I915_WRITE(BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
if (intel_wait_for_register(&dev_priv->uncore,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
1))
if (intel_de_wait_for_clear(dev_priv,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
DRM_ERROR("timeout waiting for DE PLL unlock\n");
dev_priv->cdclk.hw.vco = 0;
@@ -1330,11 +1325,8 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
if (intel_wait_for_register(&dev_priv->uncore,
BXT_DE_PLL_ENABLE,
BXT_DE_PLL_LOCK,
BXT_DE_PLL_LOCK,
1))
if (intel_de_wait_for_set(dev_priv,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
DRM_ERROR("timeout waiting for DE PLL lock\n");
dev_priv->cdclk.hw.vco = vco;
@@ -1756,9 +1748,10 @@ sanitize:
static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
{
int ranges_24[] = { 312000, 552000, 648000 };
int ranges_19_38[] = { 307200, 556800, 652800 };
int *ranges;
static const int ranges_24[] = { 180000, 192000, 312000, 552000, 648000 };
static const int ranges_19_38[] = { 172800, 192000, 307200, 556800, 652800 };
const int *ranges;
int len, i;
switch (ref) {
default:
@@ -1766,19 +1759,22 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
/* fall through */
case 24000:
ranges = ranges_24;
len = ARRAY_SIZE(ranges_24);
break;
case 19200:
case 38400:
ranges = ranges_19_38;
len = ARRAY_SIZE(ranges_19_38);
break;
}
if (min_cdclk > ranges[1])
return ranges[2];
else if (min_cdclk > ranges[0])
return ranges[1];
else
return ranges[0];
for (i = 0; i < len; i++) {
if (min_cdclk <= ranges[i])
return ranges[i];
}
WARN_ON(min_cdclk > ranges[len - 1]);
return ranges[len - 1];
}
static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
@@ -1792,16 +1788,24 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
default:
MISSING_CASE(cdclk);
/* fall through */
case 172800:
case 307200:
case 556800:
case 652800:
WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
dev_priv->cdclk.hw.ref != 38400);
break;
case 180000:
case 312000:
case 552000:
case 648000:
WARN_ON(dev_priv->cdclk.hw.ref != 24000);
break;
case 192000:
WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
dev_priv->cdclk.hw.ref != 38400 &&
dev_priv->cdclk.hw.ref != 24000);
break;
}
ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
@@ -1854,14 +1858,23 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
}
static u8 icl_calc_voltage_level(int cdclk)
static u8 icl_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
{
if (cdclk > 556800)
return 2;
else if (cdclk > 312000)
return 1;
else
return 0;
if (IS_ELKHARTLAKE(dev_priv)) {
if (cdclk > 312000)
return 2;
else if (cdclk > 180000)
return 1;
else
return 0;
} else {
if (cdclk > 556800)
return 2;
else if (cdclk > 312000)
return 1;
else
return 0;
}
}
static void icl_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1912,7 +1925,7 @@ out:
* at least what the CDCLK frequency requires.
*/
cdclk_state->voltage_level =
icl_calc_voltage_level(cdclk_state->cdclk);
icl_calc_voltage_level(dev_priv, cdclk_state->cdclk);
}
static void icl_init_cdclk(struct drm_i915_private *dev_priv)
@@ -1947,7 +1960,8 @@ sanitize:
sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
sanitized_state.cdclk);
sanitized_state.voltage_level =
icl_calc_voltage_level(sanitized_state.cdclk);
icl_calc_voltage_level(dev_priv,
sanitized_state.cdclk);
icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
}
@@ -1958,7 +1972,8 @@ static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
cdclk_state.voltage_level = icl_calc_voltage_level(dev_priv,
cdclk_state.cdclk);
icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
@@ -2560,7 +2575,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.vco = vco;
state->cdclk.logical.cdclk = cdclk;
state->cdclk.logical.voltage_level =
max(icl_calc_voltage_level(cdclk),
max(icl_calc_voltage_level(dev_priv, cdclk),
cnl_compute_min_voltage_level(state));
if (!state->active_crtcs) {
@@ -2570,7 +2585,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.actual.vco = vco;
state->cdclk.actual.cdclk = cdclk;
state->cdclk.actual.voltage_level =
icl_calc_voltage_level(cdclk);
icl_calc_voltage_level(dev_priv, cdclk);
} else {
state->cdclk.actual = state->cdclk.logical;
}
@@ -2605,7 +2620,12 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) >= 11) {
if (IS_ELKHARTLAKE(dev_priv)) {
if (dev_priv->cdclk.hw.ref == 24000)
dev_priv->max_cdclk_freq = 552000;
else
dev_priv->max_cdclk_freq = 556800;
} else if (INTEL_GEN(dev_priv) >= 11) {
if (dev_priv->cdclk.hw.ref == 24000)
dev_priv->max_cdclk_freq = 648000;
else

View File

@@ -23,7 +23,7 @@
*/
#include "intel_color.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#define CTM_COEFF_SIGN (1ULL << 63)

View File

@@ -4,15 +4,15 @@
*/
#include "intel_combo_phy.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#define for_each_combo_port(__dev_priv, __port) \
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
for_each_if(intel_port_is_combophy(__dev_priv, __port))
#define for_each_combo_phy(__dev_priv, __phy) \
for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
for_each_if(intel_phy_is_combo(__dev_priv, __phy))
#define for_each_combo_port_reverse(__dev_priv, __port) \
for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
for_each_if(intel_port_is_combophy(__dev_priv, __port))
#define for_each_combo_phy_reverse(__dev_priv, __phy) \
for ((__phy) = I915_MAX_PHYS; (__phy)-- > PHY_A;) \
for_each_if(intel_phy_is_combo(__dev_priv, __phy))
enum {
PROCMON_0_85V_DOT_0,
@@ -38,18 +38,17 @@ static const struct cnl_procmon {
};
/*
* CNL has just one set of registers, while ICL has two sets: one for port A and
* the other for port B. The CNL registers are equivalent to the ICL port A
* registers, that's why we call the ICL macros even though the function has CNL
* on its name.
* CNL has just one set of registers, while gen11 has a set for each combo PHY.
* The CNL registers are equivalent to the gen11 PHY A registers, that's why we
* call the ICL macros even though the function has CNL on its name.
*/
static const struct cnl_procmon *
cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
{
const struct cnl_procmon *procmon;
u32 val;
val = I915_READ(ICL_PORT_COMP_DW3(port));
val = I915_READ(ICL_PORT_COMP_DW3(phy));
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
@@ -75,32 +74,32 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
}
static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
enum port port)
enum phy phy)
{
const struct cnl_procmon *procmon;
u32 val;
procmon = cnl_get_procmon_ref_values(dev_priv, port);
procmon = cnl_get_procmon_ref_values(dev_priv, phy);
val = I915_READ(ICL_PORT_COMP_DW1(port));
val = I915_READ(ICL_PORT_COMP_DW1(phy));
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
I915_WRITE(ICL_PORT_COMP_DW1(port), val);
I915_WRITE(ICL_PORT_COMP_DW1(phy), val);
I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
I915_WRITE(ICL_PORT_COMP_DW9(phy), procmon->dw9);
I915_WRITE(ICL_PORT_COMP_DW10(phy), procmon->dw10);
}
static bool check_phy_reg(struct drm_i915_private *dev_priv,
enum port port, i915_reg_t reg, u32 mask,
enum phy phy, i915_reg_t reg, u32 mask,
u32 expected_val)
{
u32 val = I915_READ(reg);
if ((val & mask) != expected_val) {
DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
DRM_DEBUG_DRIVER("Combo PHY %c reg %08x state mismatch: "
"current %08x mask %08x expected %08x\n",
port_name(port),
phy_name(phy),
reg.reg, val, mask, expected_val);
return false;
}
@@ -109,18 +108,18 @@ static bool check_phy_reg(struct drm_i915_private *dev_priv,
}
static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
enum port port)
enum phy phy)
{
const struct cnl_procmon *procmon;
bool ret;
procmon = cnl_get_procmon_ref_values(dev_priv, port);
procmon = cnl_get_procmon_ref_values(dev_priv, phy);
ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
ret = check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW1(phy),
(0xff << 16) | 0xff, procmon->dw1);
ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW9(phy),
-1U, procmon->dw9);
ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW10(phy),
-1U, procmon->dw10);
return ret;
@@ -134,15 +133,15 @@ static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
{
enum port port = PORT_A;
enum phy phy = PHY_A;
bool ret;
if (!cnl_combo_phy_enabled(dev_priv))
return false;
ret = cnl_verify_procmon_ref_values(dev_priv, port);
ret = cnl_verify_procmon_ref_values(dev_priv, phy);
ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
ret &= check_phy_reg(dev_priv, phy, CNL_PORT_CL1CM_DW5,
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
@@ -157,7 +156,7 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
I915_WRITE(CHICKEN_MISC_2, val);
/* Dummy PORT_A to get the correct CNL register from the ICL macro */
cnl_set_procmon_ref_values(dev_priv, PORT_A);
cnl_set_procmon_ref_values(dev_priv, PHY_A);
val = I915_READ(CNL_PORT_COMP_DW0);
val |= COMP_INIT;
@@ -181,35 +180,39 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
}
static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
enum port port)
enum phy phy)
{
return !(I915_READ(ICL_PHY_MISC(port)) &
ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
(I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
/* The PHY C added by EHL has no PHY_MISC register */
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
return I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
else
return !(I915_READ(ICL_PHY_MISC(phy)) &
ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
(I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
}
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
enum port port)
enum phy phy)
{
bool ret;
if (!icl_combo_phy_enabled(dev_priv, port))
if (!icl_combo_phy_enabled(dev_priv, phy))
return false;
ret = cnl_verify_procmon_ref_values(dev_priv, port);
ret = cnl_verify_procmon_ref_values(dev_priv, phy);
if (port == PORT_A)
ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW8(port),
if (phy == PHY_A)
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy),
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
}
void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
enum port port, bool is_dsi,
enum phy phy, bool is_dsi,
int lane_count, bool lane_reversal)
{
u8 lane_mask;
@@ -254,66 +257,120 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
val = I915_READ(ICL_PORT_CL_DW10(port));
val = I915_READ(ICL_PORT_CL_DW10(phy));
val &= ~PWR_DOWN_LN_MASK;
val |= lane_mask << PWR_DOWN_LN_SHIFT;
I915_WRITE(ICL_PORT_CL_DW10(port), val);
I915_WRITE(ICL_PORT_CL_DW10(phy), val);
}
static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val)
{
bool ddi_a_present = i915->vbt.ddi_port_info[PORT_A].child != NULL;
bool ddi_d_present = i915->vbt.ddi_port_info[PORT_D].child != NULL;
bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
/*
* VBT's 'dvo port' field for child devices references the DDI, not
* the PHY. So if combo PHY A is wired up to drive an external
* display, we should see a child device present on PORT_D and
* nothing on PORT_A and no DSI.
*/
if (ddi_d_present && !ddi_a_present && !dsi_present)
return val | ICL_PHY_MISC_MUX_DDID;
/*
* If we encounter a VBT that claims to have an external display on
* DDI-D _and_ an internal display on DDI-A/DSI leave an error message
* in the log and let the internal display win.
*/
if (ddi_d_present)
DRM_ERROR("VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
return val & ~ICL_PHY_MISC_MUX_DDID;
}
static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
{
enum port port;
enum phy phy;
for_each_combo_port(dev_priv, port) {
for_each_combo_phy(dev_priv, phy) {
u32 val;
if (icl_combo_phy_verify_state(dev_priv, port)) {
DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
port_name(port));
if (icl_combo_phy_verify_state(dev_priv, phy)) {
DRM_DEBUG_DRIVER("Combo PHY %c already enabled, won't reprogram it.\n",
phy_name(phy));
continue;
}
val = I915_READ(ICL_PHY_MISC(port));
/*
* Although EHL adds a combo PHY C, there's no PHY_MISC
* register for it and no need to program the
* DE_IO_COMP_PWR_DOWN setting on PHY C.
*/
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
goto skip_phy_misc;
/*
* EHL's combo PHY A can be hooked up to either an external
* display (via DDI-D) or an internal display (via DDI-A or
* the DSI DPHY). This is a motherboard design decision that
* can't be changed on the fly, so initialize the PHY's mux
* based on whether our VBT indicates the presence of any
* "internal" child devices.
*/
val = I915_READ(ICL_PHY_MISC(phy));
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A)
val = ehl_combo_phy_a_mux(dev_priv, val);
val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
I915_WRITE(ICL_PHY_MISC(port), val);
I915_WRITE(ICL_PHY_MISC(phy), val);
cnl_set_procmon_ref_values(dev_priv, port);
skip_phy_misc:
cnl_set_procmon_ref_values(dev_priv, phy);
if (port == PORT_A) {
val = I915_READ(ICL_PORT_COMP_DW8(port));
if (phy == PHY_A) {
val = I915_READ(ICL_PORT_COMP_DW8(phy));
val |= IREFGEN;
I915_WRITE(ICL_PORT_COMP_DW8(port), val);
I915_WRITE(ICL_PORT_COMP_DW8(phy), val);
}
val = I915_READ(ICL_PORT_COMP_DW0(port));
val = I915_READ(ICL_PORT_COMP_DW0(phy));
val |= COMP_INIT;
I915_WRITE(ICL_PORT_COMP_DW0(port), val);
I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
val = I915_READ(ICL_PORT_CL_DW5(port));
val = I915_READ(ICL_PORT_CL_DW5(phy));
val |= CL_POWER_DOWN_ENABLE;
I915_WRITE(ICL_PORT_CL_DW5(port), val);
I915_WRITE(ICL_PORT_CL_DW5(phy), val);
}
}
static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
{
enum port port;
enum phy phy;
for_each_combo_port_reverse(dev_priv, port) {
for_each_combo_phy_reverse(dev_priv, phy) {
u32 val;
if (port == PORT_A &&
!icl_combo_phy_verify_state(dev_priv, port))
DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
port_name(port));
if (phy == PHY_A &&
!icl_combo_phy_verify_state(dev_priv, phy))
DRM_WARN("Combo PHY %c HW state changed unexpectedly\n",
phy_name(phy));
val = I915_READ(ICL_PHY_MISC(port));
/*
* Although EHL adds a combo PHY C, there's no PHY_MISC
* register for it and no need to program the
* DE_IO_COMP_PWR_DOWN setting on PHY C.
*/
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
goto skip_phy_misc;
val = I915_READ(ICL_PHY_MISC(phy));
val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
I915_WRITE(ICL_PHY_MISC(port), val);
I915_WRITE(ICL_PHY_MISC(phy), val);
val = I915_READ(ICL_PORT_COMP_DW0(port));
skip_phy_misc:
val = I915_READ(ICL_PORT_COMP_DW0(phy));
val &= ~COMP_INIT;
I915_WRITE(ICL_PORT_COMP_DW0(port), val);
I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
}
}

View File

@@ -7,14 +7,14 @@
#define __INTEL_COMBO_PHY_H__
#include <linux/types.h>
#include <drm/i915_drm.h>
struct drm_i915_private;
enum phy;
void intel_combo_phy_init(struct drm_i915_private *dev_priv);
void intel_combo_phy_uninit(struct drm_i915_private *dev_priv);
void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
enum port port, bool is_dsi,
enum phy phy, bool is_dsi,
int lane_count, bool lane_reversal);
#endif /* __INTEL_COMBO_PHY_H__ */

View File

@@ -33,7 +33,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
int intel_connector_init(struct intel_connector *connector)
@@ -118,7 +118,7 @@ int intel_connector_register(struct drm_connector *connector)
if (ret)
goto err;
if (i915_inject_load_failure()) {
if (i915_inject_probe_failure(to_i915(connector->dev))) {
ret = -EFAULT;
goto err_backlight;
}

View File

@@ -38,7 +38,7 @@
#include "intel_connector.h"
#include "intel_crt.h"
#include "intel_ddi.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
@@ -443,9 +443,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
if (intel_wait_for_register(&dev_priv->uncore,
if (intel_de_wait_for_clear(dev_priv,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
@@ -497,10 +497,8 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
if (intel_wait_for_register(&dev_priv->uncore,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
1000)) {
if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
I915_WRITE(crt->adpa_reg, save_adpa);
}
@@ -550,9 +548,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN,
CRT_HOTPLUG_FORCE_DETECT, 0,
1000))
if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN,
CRT_HOTPLUG_FORCE_DETECT, 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}

View File

@@ -32,10 +32,10 @@
#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
@@ -45,6 +45,7 @@
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_psr.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
struct ddi_buf_trans {
@@ -846,8 +847,8 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct cnl_ddi_buf_trans *
icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
int type, int rate, int *n_entries)
icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
int *n_entries)
{
if (type == INTEL_OUTPUT_HDMI) {
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
@@ -867,12 +868,13 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
{
int n_entries, level, default_entry;
enum phy phy = intel_port_to_phy(dev_priv, port);
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
if (INTEL_GEN(dev_priv) >= 11) {
if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
@@ -1486,9 +1488,10 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
int link_clock;
if (intel_port_is_combophy(dev_priv, port)) {
if (intel_phy_is_combo(dev_priv, phy)) {
link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
} else {
enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
@@ -1770,7 +1773,10 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
temp |= TRANS_DDI_SELECT_PORT(port);
if (INTEL_GEN(dev_priv) >= 12)
temp |= TGL_TRANS_DDI_SELECT_PORT(port);
else
temp |= TRANS_DDI_SELECT_PORT(port);
switch (crtc_state->pipe_bpp) {
case 18:
@@ -1850,8 +1856,13 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
u32 val = I915_READ(reg);
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
if (INTEL_GEN(dev_priv) >= 12) {
val &= ~(TRANS_DDI_FUNC_ENABLE | TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
} else {
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK |
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
}
I915_WRITE(reg, val);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
@@ -2003,10 +2014,27 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
mst_pipe_mask = 0;
for_each_pipe(dev_priv, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
unsigned int port_mask, ddi_select;
intel_wakeref_t trans_wakeref;
trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!trans_wakeref)
continue;
if (INTEL_GEN(dev_priv) >= 12) {
port_mask = TGL_TRANS_DDI_PORT_MASK;
ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
} else {
port_mask = TRANS_DDI_PORT_MASK;
ddi_select = TRANS_DDI_SELECT_PORT(port);
}
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
trans_wakeref);
if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
if ((tmp & port_mask) != ddi_select)
continue;
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
@@ -2085,6 +2113,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
/*
* TODO: Add support for MST encoders. Atm, the following should never
@@ -2102,7 +2131,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
* ports.
*/
if (intel_crtc_has_dp_encoder(crtc_state) ||
intel_port_is_tc(dev_priv, encoder->port))
intel_phy_is_tc(dev_priv, phy))
intel_display_power_get(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
@@ -2122,9 +2151,14 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
enum port port = encoder->port;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
TRANS_CLK_SEL_PORT(port));
if (cpu_transcoder != TRANSCODER_EDP) {
if (INTEL_GEN(dev_priv) >= 12)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
TGL_TRANS_CLK_SEL_PORT(port));
else
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
TRANS_CLK_SEL_PORT(port));
}
}
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2132,9 +2166,14 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
TRANS_CLK_SEL_DISABLED);
if (cpu_transcoder != TRANSCODER_EDP) {
if (INTEL_GEN(dev_priv) >= 12)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
TGL_TRANS_CLK_SEL_DISABLED);
else
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
TRANS_CLK_SEL_DISABLED);
}
}
static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
@@ -2227,11 +2266,12 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
if (INTEL_GEN(dev_priv) >= 11) {
if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, encoder->type,
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
@@ -2413,15 +2453,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
}
static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
u32 level, enum port port, int type,
u32 level, enum phy phy, int type,
int rate)
{
const struct cnl_ddi_buf_trans *ddi_translations = NULL;
u32 n_entries, val;
int ln;
ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
rate, &n_entries);
ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
&n_entries);
if (!ddi_translations)
return;
@@ -2431,41 +2471,41 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
}
/* Set PORT_TX_DW5 */
val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
val = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
val |= RCOMP_SCALAR(0x98);
I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln <= 3; ln++) {
val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* Program PORT_TX_DW7 */
val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
val = I915_READ(ICL_PORT_TX_DW7_LN0(phy));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
I915_WRITE(ICL_PORT_TX_DW7_GRP(phy), val);
}
static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2473,7 +2513,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
int width = 0;
int rate = 0;
u32 val;
@@ -2494,12 +2534,12 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
val = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
val = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
if (type == INTEL_OUTPUT_HDMI)
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), val);
I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -2509,33 +2549,33 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
val = I915_READ(ICL_PORT_CL_DW5(port));
val = I915_READ(ICL_PORT_CL_DW5(phy));
val |= SUS_CLOCK_CONFIG;
I915_WRITE(ICL_PORT_CL_DW5(port), val);
I915_WRITE(ICL_PORT_CL_DW5(phy), val);
/* 4. Clear training enable to change swing values */
val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
val &= ~TX_TRAINING_EN;
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
/* 6. Set training enable to trigger update */
val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
val |= TX_TRAINING_EN;
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2663,9 +2703,9 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (intel_port_is_combophy(dev_priv, port))
if (intel_phy_is_combo(dev_priv, phy))
icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
else
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
@@ -2728,12 +2768,13 @@ u32 ddi_signal_levels(struct intel_dp *intel_dp)
static inline
u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
enum port port)
enum phy phy)
{
if (intel_port_is_combophy(dev_priv, port)) {
return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port);
} else if (intel_port_is_tc(dev_priv, port)) {
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
if (intel_phy_is_combo(dev_priv, phy)) {
return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
} else if (intel_phy_is_tc(dev_priv, phy)) {
enum tc_port tc_port = intel_port_to_tc(dev_priv,
(enum port)phy);
return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port);
}
@@ -2746,23 +2787,33 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
mutex_lock(&dev_priv->dpll_lock);
val = I915_READ(DPCLKA_CFGCR0_ICL);
WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
val = I915_READ(ICL_DPCLKA_CFGCR0);
WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
if (intel_port_is_combophy(dev_priv, port)) {
val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
POSTING_READ(DPCLKA_CFGCR0_ICL);
if (intel_phy_is_combo(dev_priv, phy)) {
/*
* Even though this register references DDIs, note that we
* want to pass the PHY rather than the port (DDI). For
* ICL, port=phy in all cases so it doesn't matter, but for
* EHL the bspec notes the following:
*
* "DDID clock tied to DDIA clock, so DPCLKA_CFGCR0 DDIA
* Clock Select chooses the PLL for both DDIA and DDID and
* drives port A in all cases."
*/
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
I915_WRITE(ICL_DPCLKA_CFGCR0, val);
POSTING_READ(ICL_DPCLKA_CFGCR0);
}
val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
I915_WRITE(ICL_DPCLKA_CFGCR0, val);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -2770,14 +2821,14 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
mutex_lock(&dev_priv->dpll_lock);
val = I915_READ(DPCLKA_CFGCR0_ICL);
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
val = I915_READ(ICL_DPCLKA_CFGCR0);
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
I915_WRITE(ICL_DPCLKA_CFGCR0, val);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -2835,11 +2886,13 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
ddi_clk_needed = false;
}
val = I915_READ(DPCLKA_CFGCR0_ICL);
val = I915_READ(ICL_DPCLKA_CFGCR0);
for_each_port_masked(port, port_mask) {
enum phy phy = intel_port_to_phy(dev_priv, port);
bool ddi_clk_ungated = !(val &
icl_dpclka_cfgcr0_clk_off(dev_priv,
port));
phy));
if (ddi_clk_needed == ddi_clk_ungated)
continue;
@@ -2851,10 +2904,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
if (WARN_ON(ddi_clk_needed))
continue;
DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
port_name(port));
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
phy_name(port));
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
I915_WRITE(ICL_DPCLKA_CFGCR0, val);
}
}
@@ -2863,6 +2916,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
u32 val;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
@@ -2872,9 +2926,15 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
mutex_lock(&dev_priv->dpll_lock);
if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_port_is_combophy(dev_priv, port))
if (!intel_phy_is_combo(dev_priv, phy))
I915_WRITE(DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
else if (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C)
/*
* MG does not exist but the programming is required
* to ungate DDIC and DDID
*/
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
} else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@@ -2912,9 +2972,11 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_port_is_combophy(dev_priv, port))
if (!intel_phy_is_combo(dev_priv, phy) ||
(IS_ELKHARTLAKE(dev_priv) && port >= PORT_C))
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
@@ -2995,25 +3057,22 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
u32 ln0, ln1, lane_info;
u32 ln0, ln1, lane_mask;
if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
return;
ln0 = I915_READ(MG_DP_MODE(0, port));
ln1 = I915_READ(MG_DP_MODE(1, port));
switch (intel_dig_port->tc_type) {
case TC_PORT_TYPEC:
switch (intel_dig_port->tc_mode) {
case TC_PORT_DP_ALT:
ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
DP_LANE_ASSIGNMENT_SHIFT(tc_port);
lane_mask = intel_tc_port_get_lane_mask(intel_dig_port);
switch (lane_info) {
switch (lane_mask) {
case 0x1:
case 0x4:
break;
@@ -3038,7 +3097,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
MG_DP_MODE_CFG_DP_X2_MODE;
break;
default:
MISSING_CASE(lane_info);
MISSING_CASE(lane_mask);
}
break;
@@ -3048,7 +3107,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
break;
default:
MISSING_CASE(intel_dig_port->tc_type);
MISSING_CASE(intel_dig_port->tc_mode);
return;
}
@@ -3080,10 +3139,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
val |= DP_TP_CTL_FEC_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
DP_TP_STATUS_FEC_ENABLE_LIVE,
DP_TP_STATUS_FEC_ENABLE_LIVE,
1))
if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
DRM_ERROR("Timed out waiting for FEC Enable Status\n");
}
@@ -3110,6 +3167,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
@@ -3123,7 +3181,10 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_clk_select(encoder, crtc_state);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
if (!intel_phy_is_tc(dev_priv, phy) ||
dig_port->tc_mode != TC_PORT_TBT_ALT)
intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
@@ -3138,11 +3199,11 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
if (intel_port_is_combophy(dev_priv, port)) {
if (intel_phy_is_combo(dev_priv, phy)) {
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
intel_combo_phy_power_up_lanes(dev_priv, port, false,
intel_combo_phy_power_up_lanes(dev_priv, phy, false,
crtc_state->lane_count,
lane_reversal);
}
@@ -3290,6 +3351,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &dig_port->dp;
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (!is_mst) {
intel_ddi_disable_pipe_clock(old_crtc_state);
@@ -3305,8 +3367,10 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
intel_display_power_put_unchecked(dev_priv,
dig_port->ddi_io_power_domain);
if (!intel_phy_is_tc(dev_priv, phy) ||
dig_port->tc_mode != TC_PORT_TBT_ALT)
intel_display_power_put_unchecked(dev_priv,
dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
}
@@ -3511,7 +3575,8 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
/* Enable hdcp if it's desired */
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
intel_hdcp_enable(to_intel_connector(conn_state->connector));
intel_hdcp_enable(to_intel_connector(conn_state->connector),
(u8)conn_state->hdcp_content_type);
}
static void intel_disable_ddi_dp(struct intel_encoder *encoder,
@@ -3580,44 +3645,65 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_hdcp *hdcp = &connector->hdcp;
bool content_protection_type_changed =
(conn_state->hdcp_content_type != hdcp->content_type &&
conn_state->content_protection !=
DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
/*
* During the HDCP encryption session if Type change is requested,
* disable the HDCP and reenable it with new TYPE value.
*/
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
intel_hdcp_enable(to_intel_connector(conn_state->connector));
else if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
intel_hdcp_disable(to_intel_connector(conn_state->connector));
DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
content_protection_type_changed)
intel_hdcp_disable(connector);
/*
* Mark the hdcp state as DESIRED after the hdcp disable of type
* change procedure.
*/
if (content_protection_type_changed) {
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
mutex_unlock(&hdcp->mutex);
}
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED ||
content_protection_type_changed)
intel_hdcp_enable(connector, (u8)conn_state->hdcp_content_type);
}
static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
enum port port)
static void
intel_ddi_update_prepare(struct intel_atomic_state *state,
struct intel_encoder *encoder,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
struct intel_crtc_state *crtc_state =
crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
int required_lanes = crtc_state ? crtc_state->lane_count : 1;
val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
switch (pipe_config->lane_count) {
case 1:
val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
DFLEXDPMLE1_DPMLETC_ML0(tc_port);
break;
case 2:
val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
break;
case 4:
val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
break;
default:
MISSING_CASE(pipe_config->lane_count);
}
I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
WARN_ON(crtc && crtc->active);
intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes);
if (crtc_state && crtc_state->base.active)
intel_update_active_dpll(state, crtc, encoder);
}
static void
intel_ddi_update_complete(struct intel_atomic_state *state,
struct intel_encoder *encoder,
struct intel_crtc *crtc)
{
intel_tc_port_put_link(enc_to_dig_port(&encoder->base));
}
static void
@@ -3627,26 +3713,25 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
if (intel_crtc_has_dp_encoder(crtc_state) ||
intel_port_is_tc(dev_priv, encoder->port))
if (is_tc_port)
intel_tc_port_get_link(dig_port, crtc_state->lane_count);
if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
intel_display_power_get(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
if (IS_GEN9_LP(dev_priv))
if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT)
/*
* Program the lane count for static/dynamic connections on
* Type-C ports. Skip this step for TBT.
*/
intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_phy_set_lane_optim_mask(encoder,
crtc_state->lane_lat_optim_mask);
/*
* Program the lane count for static/dynamic connections on Type-C ports.
* Skip this step for TBT.
*/
if (dig_port->tc_type == TC_PORT_UNKNOWN ||
dig_port->tc_type == TC_PORT_TBT)
return;
intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
}
static void
@@ -3656,11 +3741,15 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
if (intel_crtc_has_dp_encoder(crtc_state) ||
intel_port_is_tc(dev_priv, encoder->port))
if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
intel_display_power_put_unchecked(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
if (is_tc_port)
intel_tc_port_put_link(dig_port);
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3737,7 +3826,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_digital_port *intel_dig_port;
u32 temp, flags = 0;
/* XXX: DSI transcoder paranoia */
@@ -3776,7 +3864,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
intel_dig_port = enc_to_dig_port(&encoder->base);
pipe_config->infoframes.enable |=
intel_hdmi_infoframes_enabled(encoder, pipe_config);
@@ -3914,49 +4001,18 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
return 0;
}
static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
intel_dp_encoder_suspend(encoder);
/*
* TODO: disconnect also from USB DP alternate mode once we have a
* way to handle the modeset restore in that mode during resume
* even if the sink has disappeared while being suspended.
*/
if (dig_port->tc_legacy_port)
icl_tc_phy_disconnect(i915, dig_port);
}
static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder);
struct drm_i915_private *i915 = to_i915(drm_encoder->dev);
if (intel_port_is_tc(i915, dig_port->base.port))
intel_digital_port_connected(&dig_port->base);
intel_dp_encoder_reset(drm_encoder);
}
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *i915 = to_i915(encoder->dev);
intel_dp_encoder_flush_work(encoder);
if (intel_port_is_tc(i915, dig_port->base.port))
icl_tc_phy_disconnect(i915, dig_port);
drm_encoder_cleanup(encoder);
kfree(dig_port);
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
.reset = intel_ddi_encoder_reset,
.reset = intel_dp_encoder_reset,
.destroy = intel_ddi_encoder_destroy,
};
@@ -4081,14 +4137,17 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
return modeset_pipe(&crtc->base, ctx);
}
static bool intel_ddi_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector)
static enum intel_hotplug_state
intel_ddi_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector,
bool irq_received)
{
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct drm_modeset_acquire_ctx ctx;
bool changed;
enum intel_hotplug_state state;
int ret;
changed = intel_encoder_hotplug(encoder, connector);
state = intel_encoder_hotplug(encoder, connector, irq_received);
drm_modeset_acquire_init(&ctx, 0);
@@ -4110,7 +4169,27 @@ static bool intel_ddi_hotplug(struct intel_encoder *encoder,
drm_modeset_acquire_fini(&ctx);
WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
return changed;
/*
* Unpowered type-c dongles can take some time to boot and be
* responsible, so here giving some time to those dongles to power up
* and then retrying the probe.
*
* On many platforms the HDMI live state signal is known to be
* unreliable, so we can't use it to detect if a sink is connected or
* not. Instead we detect if it's connected based on whether we can
* read the EDID or not. That in turn has a problem during disconnect,
* since the HPD interrupt may be raised before the DDC lines get
* disconnected (due to how the required length of DDC vs. HPD
* connector pins are specified) and so we'll still be able to get a
* valid EDID. To solve this schedule another detection cycle if this
* time around we didn't detect any change in the sink's connection
* status.
*/
if (state == INTEL_HOTPLUG_UNCHANGED && irq_received &&
!dig_port->dp.is_mst)
state = INTEL_HOTPLUG_RETRY;
return state;
}
static struct intel_connector *
@@ -4198,6 +4277,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum pipe pipe;
enum phy phy = intel_port_to_phy(dev_priv, port);
init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
init_dp = port_info->supports_dp;
@@ -4242,7 +4322,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->update_pipe = intel_ddi_update_pipe;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
intel_encoder->suspend = intel_ddi_encoder_suspend;
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_encoder->get_power_domains = intel_ddi_get_power_domains;
intel_encoder->type = INTEL_OUTPUT_DDI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
@@ -4261,9 +4341,15 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) &&
!port_info->supports_typec_usb &&
!port_info->supports_tbt;
if (intel_phy_is_tc(dev_priv, phy)) {
bool is_legacy = !port_info->supports_typec_usb &&
!port_info->supports_tbt;
intel_tc_port_init(intel_dig_port, is_legacy);
intel_encoder->update_prepare = intel_ddi_update_prepare;
intel_encoder->update_complete = intel_ddi_update_complete;
}
switch (port) {
case PORT_A:
@@ -4290,6 +4376,18 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_F_IO;
break;
case PORT_G:
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_G_IO;
break;
case PORT_H:
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_H_IO;
break;
case PORT_I:
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_I_IO;
break;
default:
MISSING_CASE(port);
}
@@ -4324,9 +4422,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_infoframe_init(intel_dig_port);
if (intel_port_is_tc(dev_priv, port))
intel_digital_port_connected(intel_encoder);
return;
err:

File diff suppressed because it is too large Load Diff

View File

@@ -28,8 +28,30 @@
#include <drm/drm_util.h>
#include <drm/i915_drm.h>
enum link_m_n_set;
struct dpll;
struct drm_connector;
struct drm_device;
struct drm_encoder;
struct drm_file;
struct drm_framebuffer;
struct drm_i915_error_state_buf;
struct drm_i915_gem_object;
struct drm_i915_private;
struct drm_modeset_acquire_ctx;
struct drm_plane;
struct drm_plane_state;
struct i915_ggtt_view;
struct intel_crtc;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
struct intel_encoder;
struct intel_load_detect_pipe;
struct intel_plane;
struct intel_plane_state;
struct intel_remapped_info;
struct intel_rotation_info;
enum i915_gpio {
GPIOA,
@@ -45,6 +67,8 @@ enum i915_gpio {
GPIOK,
GPIOL,
GPIOM,
GPION,
GPIOO,
};
/*
@@ -58,6 +82,7 @@ enum pipe {
PIPE_A = 0,
PIPE_B,
PIPE_C,
PIPE_D,
_PIPE_EDP,
I915_MAX_PIPES = _PIPE_EDP
@@ -75,6 +100,7 @@ enum transcoder {
TRANSCODER_A = PIPE_A,
TRANSCODER_B = PIPE_B,
TRANSCODER_C = PIPE_C,
TRANSCODER_D = PIPE_D,
/*
* The following transcoders can map to any pipe, their enum value
@@ -98,6 +124,8 @@ static inline const char *transcoder_name(enum transcoder transcoder)
return "B";
case TRANSCODER_C:
return "C";
case TRANSCODER_D:
return "D";
case TRANSCODER_EDP:
return "EDP";
case TRANSCODER_DSI_A:
@@ -173,6 +201,12 @@ static inline const char *port_identifier(enum port port)
return "Port E";
case PORT_F:
return "Port F";
case PORT_G:
return "Port G";
case PORT_H:
return "Port H";
case PORT_I:
return "Port I";
default:
return "<invalid>";
}
@@ -185,14 +219,15 @@ enum tc_port {
PORT_TC2,
PORT_TC3,
PORT_TC4,
PORT_TC5,
PORT_TC6,
I915_MAX_TC_PORTS
};
enum tc_port_type {
TC_PORT_UNKNOWN = 0,
TC_PORT_TYPEC,
TC_PORT_TBT,
enum tc_port_mode {
TC_PORT_TBT_ALT,
TC_PORT_DP_ALT,
TC_PORT_LEGACY,
};
@@ -229,6 +264,30 @@ struct intel_link_m_n {
u32 link_n;
};
enum phy {
PHY_NONE = -1,
PHY_A = 0,
PHY_B,
PHY_C,
PHY_D,
PHY_E,
PHY_F,
PHY_G,
PHY_H,
PHY_I,
I915_MAX_PHYS
};
#define phy_name(a) ((a) + 'A')
enum phy_fia {
FIA1,
FIA2,
FIA3,
};
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
@@ -254,6 +313,10 @@ struct intel_link_m_n {
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
for_each_if((__ports_mask) & BIT(__port))
#define for_each_phy_masked(__phy, __phys_mask) \
for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
for_each_if((__phys_mask) & BIT(__phy))
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
@@ -357,5 +420,173 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
void intel_plane_destroy(struct drm_plane *plane);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg);
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
int plane);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height);
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
int intel_display_suspend(struct drm_device *dev);
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
unsigned int expected_mask);
int intel_get_load_detect_pipe(struct drm_connector *connector,
const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
const struct i915_ggtt_view *view,
bool uses_fence,
unsigned long *out_flags);
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
int intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
bool intel_fuzzy_clock_check(int clock1, int clock2);
void intel_prepare_reset(struct drm_i915_private *dev_priv);
void intel_finish_reset(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct intel_crtc *crtc);
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(const struct intel_crtc_state *crtc_state,
u32 pixel_format);
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int plane);
int skl_check_plane_surface(struct intel_plane_state *plane_state);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error);
/* modesetting */
void intel_modeset_init_hw(struct drm_device *dev);
int intel_modeset_init(struct drm_device *dev);
void intel_modeset_driver_remove(struct drm_device *dev);
int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state);
void intel_display_resume(struct drm_device *dev);
void i915_redisable_vga(struct drm_i915_private *dev_priv);
void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
/* modesetting asserts */
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
enum pipe pipe);
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
* which may not necessarily be a user visible problem. This will either
* WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
* enable distros and users to tailor their preferred amount of i915 abrt
* spam.
*/
#define I915_STATE_WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
if (!WARN(i915_modparams.verbose_state_checks, format)) \
DRM_ERROR(format); \
unlikely(__ret_warn_on); \
})
#define I915_STATE_WARN_ON(x) \
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -18,28 +18,47 @@ enum intel_display_power_domain {
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
POWER_DOMAIN_PIPE_C,
POWER_DOMAIN_PIPE_D,
POWER_DOMAIN_PIPE_A_PANEL_FITTER,
POWER_DOMAIN_PIPE_B_PANEL_FITTER,
POWER_DOMAIN_PIPE_C_PANEL_FITTER,
POWER_DOMAIN_PIPE_D_PANEL_FITTER,
POWER_DOMAIN_TRANSCODER_A,
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_D,
POWER_DOMAIN_TRANSCODER_EDP,
POWER_DOMAIN_TRANSCODER_EDP_VDSC,
/* VDSC/joining for TRANSCODER_EDP (ICL) or TRANSCODER_A (TGL) */
POWER_DOMAIN_TRANSCODER_VDSC_PW2,
POWER_DOMAIN_TRANSCODER_DSI_A,
POWER_DOMAIN_TRANSCODER_DSI_C,
POWER_DOMAIN_PORT_DDI_A_LANES,
POWER_DOMAIN_PORT_DDI_B_LANES,
POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_TC1_LANES = POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_TC2_LANES = POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_F_LANES,
POWER_DOMAIN_PORT_DDI_TC3_LANES = POWER_DOMAIN_PORT_DDI_F_LANES,
POWER_DOMAIN_PORT_DDI_TC4_LANES,
POWER_DOMAIN_PORT_DDI_TC5_LANES,
POWER_DOMAIN_PORT_DDI_TC6_LANES,
POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO,
POWER_DOMAIN_PORT_DDI_D_IO,
POWER_DOMAIN_PORT_DDI_TC1_IO = POWER_DOMAIN_PORT_DDI_D_IO,
POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DDI_TC2_IO = POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DDI_F_IO,
POWER_DOMAIN_PORT_DDI_TC3_IO = POWER_DOMAIN_PORT_DDI_F_IO,
POWER_DOMAIN_PORT_DDI_G_IO,
POWER_DOMAIN_PORT_DDI_TC4_IO = POWER_DOMAIN_PORT_DDI_G_IO,
POWER_DOMAIN_PORT_DDI_H_IO,
POWER_DOMAIN_PORT_DDI_TC5_IO = POWER_DOMAIN_PORT_DDI_H_IO,
POWER_DOMAIN_PORT_DDI_I_IO,
POWER_DOMAIN_PORT_DDI_TC6_IO = POWER_DOMAIN_PORT_DDI_I_IO,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -49,21 +68,51 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_TC1 = POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_TC2 = POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
POWER_DOMAIN_AUX_TC3 = POWER_DOMAIN_AUX_F,
POWER_DOMAIN_AUX_TC4,
POWER_DOMAIN_AUX_TC5,
POWER_DOMAIN_AUX_TC6,
POWER_DOMAIN_AUX_IO_A,
POWER_DOMAIN_AUX_TBT1,
POWER_DOMAIN_AUX_TBT2,
POWER_DOMAIN_AUX_TBT3,
POWER_DOMAIN_AUX_TBT4,
POWER_DOMAIN_AUX_TBT5,
POWER_DOMAIN_AUX_TBT6,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
POWER_DOMAIN_DPLL_DC_OFF,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
};
/*
* i915_power_well_id:
*
* IDs used to look up power wells. Power wells accessed directly bypassing
* the power domains framework must be assigned a unique ID. The rest of power
* wells must be assigned DISP_PW_ID_NONE.
*/
enum i915_power_well_id {
DISP_PW_ID_NONE,
VLV_DISP_PW_DISP2D,
BXT_DISP_PW_DPIO_CMN_A,
VLV_DISP_PW_DPIO_CMN_BC,
GLK_DISP_PW_DPIO_CMN_C,
CHV_DISP_PW_DPIO_CMN_D,
HSW_DISP_PW_GLOBAL,
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
};
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
@@ -204,30 +253,24 @@ struct i915_power_domains {
for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
void skl_enable_dc6(struct drm_i915_private *dev_priv);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
int intel_power_domains_init(struct drm_i915_private *dev_priv);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void icl_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv);
void intel_power_domains_enable(struct drm_i915_private *dev_priv);
void intel_power_domains_disable(struct drm_i915_private *dev_priv);
void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
enum i915_drm_suspend_mode);
void intel_power_domains_resume(struct drm_i915_private *dev_priv);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_display_power_suspend_late(struct drm_i915_private *i915);
void intel_display_power_resume_early(struct drm_i915_private *i915);
void intel_display_power_suspend(struct drm_i915_private *i915);
void intel_display_power_resume(struct drm_i915_private *i915);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
intel_display_power_domain_str(struct drm_i915_private *i915,
enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);

View File

@@ -22,8 +22,9 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef __INTEL_DRV_H__
#define __INTEL_DRV_H__
#ifndef __INTEL_DISPLAY_TYPES_H__
#define __INTEL_DISPLAY_TYPES_H__
#include <linux/async.h>
#include <linux/i2c.h>
@@ -67,8 +68,23 @@ enum intel_output_type {
INTEL_OUTPUT_DP_MST = 11,
};
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
HDMI_AUDIO_OFF, /* force turn off HDMI audio */
HDMI_AUDIO_AUTO, /* trust EDID */
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
/* "Broadcast RGB" property */
enum intel_broadcast_rgb {
INTEL_BROADCAST_RGB_AUTO,
INTEL_BROADCAST_RGB_FULL,
INTEL_BROADCAST_RGB_LIMITED,
};
struct intel_framebuffer {
struct drm_framebuffer base;
struct intel_frontbuffer *frontbuffer;
struct intel_rotation_info rot_info;
/* for each plane in the normal GTT view */
@@ -101,20 +117,30 @@ struct intel_fbdev {
struct mutex hpd_lock;
};
enum intel_hotplug_state {
INTEL_HOTPLUG_UNCHANGED,
INTEL_HOTPLUG_CHANGED,
INTEL_HOTPLUG_RETRY,
};
struct intel_encoder {
struct drm_encoder base;
enum intel_output_type type;
enum port port;
unsigned int cloneable;
bool (*hotplug)(struct intel_encoder *encoder,
struct intel_connector *connector);
enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
struct intel_connector *connector,
bool irq_received);
enum intel_output_type (*compute_output_type)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
int (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
void (*update_prepare)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
void (*pre_pll_enable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
@@ -124,6 +150,9 @@ struct intel_encoder {
void (*enable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
void (*update_complete)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
void (*disable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
@@ -812,6 +841,15 @@ struct intel_crtc_state {
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
/*
* ICL reserved DPLLs for the CRTC/port. The active PLL is selected by
* setting shared_dpll and dpll_hw_state to one of these reserved ones.
*/
struct icl_port_dpll {
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state hw_state;
} icl_port_dplls[ICL_PORT_DPLL_COUNT];
/* DSI PLL registers */
struct {
u32 ctrl, div;
@@ -1224,8 +1262,13 @@ struct intel_digital_port {
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
struct mutex tc_lock; /* protects the TypeC port mode */
intel_wakeref_t tc_lock_wakeref;
int tc_link_refcount;
bool tc_legacy_port:1;
enum tc_port_type tc_type;
char tc_port_name[8];
enum tc_port_mode tc_mode;
enum phy_fia tc_phy_fia;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1446,41 +1489,6 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
}
/* intel_display.c */
void intel_plane_destroy(struct drm_plane *plane);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg);
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
int plane);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height);
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
int intel_display_suspend(struct drm_device *dev);
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
static inline bool
intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
enum intel_output_type type)
@@ -1509,108 +1517,9 @@ intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
intel_wait_for_vblank(dev_priv, pipe);
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
unsigned int expected_mask);
int intel_get_load_detect_pipe(struct drm_connector *connector,
const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
const struct i915_ggtt_view *view,
bool uses_fence,
unsigned long *out_flags);
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
int intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
bool intel_fuzzy_clock_check(int clock1, int clock2);
/* modesetting asserts */
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
enum pipe pipe);
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
void intel_prepare_reset(struct drm_i915_private *dev_priv);
void intel_finish_reset(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct intel_crtc *crtc);
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(const struct intel_crtc_state *crtc_state,
u32 pixel_format);
static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
{
return i915_ggtt_offset(state->vma);
}
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int plane);
int skl_check_plane_surface(struct intel_plane_state *plane_state);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
#endif /* __INTEL_DRV_H__ */
#endif /* __INTEL_DISPLAY_TYPES_H__ */

View File

@@ -44,15 +44,16 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
#include "intel_drv.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
@@ -62,6 +63,7 @@
#include "intel_panel.h"
#include "intel_psr.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#define DP_DPRX_ESI_LEN 14
@@ -211,47 +213,13 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
return intel_dp->common_rates[intel_dp->num_common_rates - 1];
}
static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
intel_wakeref_t wakeref;
u32 lane_info;
if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
return 4;
lane_info = 0;
with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
DP_LANE_ASSIGNMENT_SHIFT(tc_port);
switch (lane_info) {
default:
MISSING_CASE(lane_info);
/* fall through */
case 1:
case 2:
case 4:
case 8:
return 1;
case 3:
case 12:
return 2;
case 15:
return 4;
}
}
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
int source_max = intel_dig_port->max_lanes;
int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
return min3(source_max, sink_max, fia_max);
}
@@ -330,9 +298,9 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
if (intel_port_is_combophy(dev_priv, port) &&
if (intel_phy_is_combo(dev_priv, phy) &&
!IS_ELKHARTLAKE(dev_priv) &&
!intel_dp_is_edp(intel_dp))
return 540000;
@@ -1209,7 +1177,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
if (intel_dig_port->tc_type == TC_PORT_TBT)
if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
ret |= DP_AUX_CH_CTL_TBT_IO;
return ret;
@@ -1225,6 +1193,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
struct drm_i915_private *i915 =
to_i915(intel_dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain =
@@ -1240,6 +1210,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
if (is_tc_port)
intel_tc_port_lock(intel_dig_port);
aux_wakeref = intel_display_power_get(i915, aux_domain);
pps_wakeref = pps_lock(intel_dp);
@@ -1392,6 +1365,9 @@ out:
pps_unlock(intel_dp, pps_wakeref);
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
if (is_tc_port)
intel_tc_port_unlock(intel_dig_port);
return ret;
}
@@ -1879,8 +1855,10 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
int mode_rate, link_clock, link_avail;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
output_bpp);
for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
for (lane_count = limits->min_lane_count;
@@ -2393,9 +2371,8 @@ static void wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
if (intel_wait_for_register(&dev_priv->uncore,
pp_stat_reg, mask, value,
5000))
if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
mask, value, 5000))
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
@@ -3982,10 +3959,8 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (port == PORT_A)
return;
if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
DP_TP_STATUS_IDLE_DONE,
DP_TP_STATUS_IDLE_DONE,
1))
if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
DP_TP_STATUS_IDLE_DONE, 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
@@ -4169,10 +4144,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
/*
* Read the eDP display control registers.
*
@@ -4244,8 +4215,14 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_read_dpcd(intel_dp))
return false;
/* Don't clobber cached eDP rates. */
/*
* Don't clobber cached eDP rates. Also skip re-reading
* the OUI/ID since we know it won't change.
*/
if (!intel_dp_is_edp(intel_dp)) {
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_common_rates(intel_dp);
}
@@ -4254,7 +4231,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* Some eDP panels do not set a valid value for sink count, that is why
* it don't care about read it here and in intel_edp_init_dpcd().
*/
if (!intel_dp_is_edp(intel_dp)) {
if (!intel_dp_is_edp(intel_dp) &&
!drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
u8 count;
ssize_t r;
@@ -4879,14 +4857,16 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
* retrain the link to get a picture. That's in case no
* userspace component reacted to intermittent HPD dip.
*/
static bool intel_dp_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector)
static enum intel_hotplug_state
intel_dp_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector,
bool irq_received)
{
struct drm_modeset_acquire_ctx ctx;
bool changed;
enum intel_hotplug_state state;
int ret;
changed = intel_encoder_hotplug(encoder, connector);
state = intel_encoder_hotplug(encoder, connector, irq_received);
drm_modeset_acquire_init(&ctx, 0);
@@ -4905,7 +4885,14 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
drm_modeset_acquire_fini(&ctx);
WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
return changed;
/*
* Keeping it consistent with intel_ddi_hotplug() and
* intel_hdmi_hotplug().
*/
if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
state = INTEL_HOTPLUG_RETRY;
return state;
}
static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
@@ -5233,204 +5220,16 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
}
static const char *tc_type_name(enum tc_port_type type)
{
static const char * const names[] = {
[TC_PORT_UNKNOWN] = "unknown",
[TC_PORT_LEGACY] = "legacy",
[TC_PORT_TYPEC] = "typec",
[TC_PORT_TBT] = "tbt",
};
if (WARN_ON(type >= ARRAY_SIZE(names)))
type = TC_PORT_UNKNOWN;
return names[type];
}
static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
struct intel_digital_port *intel_dig_port,
bool is_legacy, bool is_typec, bool is_tbt)
{
enum port port = intel_dig_port->base.port;
enum tc_port_type old_type = intel_dig_port->tc_type;
WARN_ON(is_legacy + is_typec + is_tbt != 1);
if (is_legacy)
intel_dig_port->tc_type = TC_PORT_LEGACY;
else if (is_typec)
intel_dig_port->tc_type = TC_PORT_TYPEC;
else if (is_tbt)
intel_dig_port->tc_type = TC_PORT_TBT;
else
return;
/* Types are not supposed to be changed at runtime. */
WARN_ON(old_type != TC_PORT_UNKNOWN &&
old_type != intel_dig_port->tc_type);
if (old_type != intel_dig_port->tc_type)
DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
tc_type_name(intel_dig_port->tc_type));
}
/*
* This function implements the first part of the Connect Flow described by our
* specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
* lanes, EDID, etc) is done as needed in the typical places.
*
* Unlike the other ports, type-C ports are not available to use as soon as we
* get a hotplug. The type-C PHYs can be shared between multiple controllers:
* display, USB, etc. As a result, handshaking through FIA is required around
* connect and disconnect to cleanly transfer ownership with the controller and
* set the type-C power state.
*
* We could opt to only do the connect flow when we actually try to use the AUX
* channels or do a modeset, then immediately run the disconnect flow after
* usage, but there are some implications on this for a dynamic environment:
* things may go away or change behind our backs. So for now our driver is
* always trying to acquire ownership of the controller as soon as it gets an
* interrupt (or polls state and sees a port is connected) and only gives it
* back when it sees a disconnect. Implementation of a more fine-grained model
* will require a lot of coordination with user space and thorough testing for
* the extra possible cases.
*/
static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
struct intel_digital_port *dig_port)
{
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
u32 val;
if (dig_port->tc_type != TC_PORT_LEGACY &&
dig_port->tc_type != TC_PORT_TYPEC)
return true;
val = I915_READ(PORT_TX_DFLEXDPPMS);
if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
WARN_ON(dig_port->tc_legacy_port);
return false;
}
/*
* This function may be called many times in a row without an HPD event
* in between, so try to avoid the write when we can.
*/
val = I915_READ(PORT_TX_DFLEXDPCSSS);
if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
}
/*
* Now we have to re-check the live state, in case the port recently
* became disconnected. Not necessary for legacy mode.
*/
if (dig_port->tc_type == TC_PORT_TYPEC &&
!(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
icl_tc_phy_disconnect(dev_priv, dig_port);
return false;
}
return true;
}
/*
* See the comment at the connect function. This implements the Disconnect
* Flow.
*/
void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
struct intel_digital_port *dig_port)
{
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
if (dig_port->tc_type == TC_PORT_UNKNOWN)
return;
/*
* TBT disconnection flow is read the live status, what was done in
* caller.
*/
if (dig_port->tc_type == TC_PORT_TYPEC ||
dig_port->tc_type == TC_PORT_LEGACY) {
u32 val;
val = I915_READ(PORT_TX_DFLEXDPCSSS);
val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
}
DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
port_name(dig_port->base.port),
tc_type_name(dig_port->tc_type));
dig_port->tc_type = TC_PORT_UNKNOWN;
}
/*
* The type-C ports are different because even when they are connected, they may
* not be available/usable by the graphics driver: see the comment on
* icl_tc_phy_connect(). So in our driver instead of adding the additional
* concept of "usable" and make everything check for "connected and usable" we
* define a port as "connected" when it is not only connected, but also when it
* is usable by the rest of the driver. That maintains the old assumption that
* connected ports are usable, and avoids exposing to the users objects they
* can't really use.
*/
static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *intel_dig_port)
{
enum port port = intel_dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
bool is_legacy, is_typec, is_tbt;
u32 dpsp;
/*
* Complain if we got a legacy port HPD, but VBT didn't mark the port as
* legacy. Treat the port as legacy from now on.
*/
if (!intel_dig_port->tc_legacy_port &&
I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) {
DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n",
port_name(port));
intel_dig_port->tc_legacy_port = true;
}
is_legacy = intel_dig_port->tc_legacy_port;
/*
* The spec says we shouldn't be using the ISR bits for detecting
* between TC and TBT. We should use DFLEXDPSP.
*/
dpsp = I915_READ(PORT_TX_DFLEXDPSP);
is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
if (!is_legacy && !is_typec && !is_tbt) {
icl_tc_phy_disconnect(dev_priv, intel_dig_port);
return false;
}
icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
is_tbt);
if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
return false;
return true;
}
static bool icl_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (intel_port_is_combophy(dev_priv, encoder->port))
if (intel_phy_is_combo(dev_priv, phy))
return icl_combo_port_connected(dev_priv, dig_port);
else if (intel_port_is_tc(dev_priv, encoder->port))
return icl_tc_port_connected(dev_priv, dig_port);
else if (intel_phy_is_tc(dev_priv, phy))
return intel_tc_port_connected(dig_port);
else
MISSING_CASE(encoder->hpd_pin);
@@ -5588,9 +5387,6 @@ intel_dp_detect(struct drm_connector *connector,
if (INTEL_GEN(dev_priv) >= 11)
intel_dp_get_dsc_sink_cap(intel_dp);
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
intel_dp_configure_mst(intel_dp);
if (intel_dp->is_mst) {
@@ -6016,47 +5812,49 @@ struct hdcp2_dp_errata_stream_type {
u8 stream_type;
} __packed;
static struct hdcp2_dp_msg_data {
struct hdcp2_dp_msg_data {
u8 msg_id;
u32 offset;
bool msg_detectable;
u32 timeout;
u32 timeout2; /* Added for non_paired situation */
} hdcp2_msg_data[] = {
{HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
{HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
{HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
false, 0, 0},
{HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
false, 0, 0},
{HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
{HDCP_2_2_AKE_SEND_PAIRING_INFO,
DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
{HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
{HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
{HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
0, 0},
{HDCP_2_2_REP_SEND_RECVID_LIST,
DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
{HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
0, 0},
{HDCP_2_2_REP_STREAM_MANAGE,
DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
0, 0},
{HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
};
static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
{ HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
{ HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
{ HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
false, 0, 0 },
{ HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
false, 0, 0 },
{ HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
{ HDCP_2_2_AKE_SEND_PAIRING_INFO,
DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
{ HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
{ HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
{ HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
0, 0 },
{ HDCP_2_2_REP_SEND_RECVID_LIST,
DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
{ HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
0, 0 },
{ HDCP_2_2_REP_STREAM_MANAGE,
DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
0, 0 },
{ HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
/* local define to shovel this through the write_2_2 interface */
#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
{HDCP_2_2_ERRATA_DP_STREAM_TYPE,
DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
0, 0},
};
{ HDCP_2_2_ERRATA_DP_STREAM_TYPE,
DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
0, 0 },
};
static inline
int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
@@ -6110,7 +5908,7 @@ int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
static ssize_t
intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
struct hdcp2_dp_msg_data *hdcp2_msg_data)
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
struct intel_dp *dp = &intel_dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
@@ -6149,13 +5947,13 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
return ret;
}
static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
{
int i;
for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
if (hdcp2_msg_data[i].msg_id == msg_id)
return &hdcp2_msg_data[i];
for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
if (hdcp2_dp_msg_data[i].msg_id == msg_id)
return &hdcp2_dp_msg_data[i];
return NULL;
}
@@ -6169,7 +5967,7 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_write, len;
struct hdcp2_dp_msg_data *hdcp2_msg_data;
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
if (!hdcp2_msg_data)
@@ -6233,7 +6031,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
struct hdcp2_dp_msg_data *hdcp2_msg_data;
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
if (!hdcp2_msg_data)
@@ -6835,8 +6633,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state,
int refresh_rate)
{
struct intel_encoder *encoder;
struct intel_digital_port *dig_port = NULL;
struct intel_dp *intel_dp = dev_priv->drrs.dp;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
@@ -6851,9 +6647,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
dig_port = dp_to_dig_port(intel_dp);
encoder = &dig_port->base;
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
return;
@@ -7333,6 +7126,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
int type;
/* Initialize the work for modeset in case of link train failure */
@@ -7359,7 +7153,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
*/
WARN_ON(intel_port_is_tc(dev_priv, port));
WARN_ON(intel_phy_is_tc(dev_priv, phy));
type = DRM_MODE_CONNECTOR_eDP;
} else {
type = DRM_MODE_CONNECTOR_DisplayPort;

View File

@@ -112,8 +112,6 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
bool intel_digital_port_connected(struct intel_encoder *encoder);
void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
struct intel_digital_port *dig_port);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{

View File

@@ -22,8 +22,8 @@
*
*/
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
#include "intel_drv.h"
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
@@ -264,8 +264,11 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev);
if (!i915_modparams.enable_dpcd_backlight)
if (i915_modparams.enable_dpcd_backlight == 0 ||
(i915_modparams.enable_dpcd_backlight == -1 &&
dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE))
return -ENODEV;
if (!intel_dp_aux_display_control_capable(intel_connector))

View File

@@ -21,9 +21,9 @@
* IN THE SOFTWARE.
*/
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_drv.h"
static void
intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])

View File

@@ -32,10 +32,10 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
#include "intel_drv.h"
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
@@ -346,11 +346,8 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
if (intel_wait_for_register(&dev_priv->uncore,
DP_TP_STATUS(port),
DP_TP_STATUS_ACT_SENT,
DP_TP_STATUS_ACT_SENT,
1))
if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
DP_TP_STATUS_ACT_SENT, 1))
DRM_ERROR("Timed out waiting for ACT sent\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
@@ -618,7 +615,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->base.port;
intel_encoder->crtc_mask = 0x7;
intel_encoder->crtc_mask = BIT(pipe);
intel_encoder->cloneable = 0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
@@ -647,6 +644,12 @@ intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
return true;
}
int
intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
{
return intel_dig_port->dp.active_mst_links;
}
int
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
{

View File

@@ -10,5 +10,6 @@ struct intel_digital_port;
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
#endif /* __INTEL_DP_MST_H__ */

View File

@@ -23,8 +23,8 @@
#include "display/intel_dp.h"
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_drv.h"
#include "intel_sideband.h"
/**
@@ -345,10 +345,8 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
if (intel_wait_for_register(&dev_priv->uncore,
BXT_PORT_REF_DW3(phy),
GRC_DONE, GRC_DONE,
10))
if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
GRC_DONE, 10))
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
}

File diff suppressed because it is too large Load Diff

View File

@@ -28,6 +28,7 @@
#include <linux/types.h>
#include "intel_display.h"
#include "intel_wakeref.h"
/*FIXME: Move this to a more appropriate place. */
#define abs_diff(a, b) ({ \
@@ -36,9 +37,9 @@
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
struct drm_atomic_state;
struct drm_device;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_encoder;
@@ -110,35 +111,59 @@ enum intel_dpll_id {
/**
* @DPLL_ID_ICL_DPLL0: ICL combo PHY DPLL0
* @DPLL_ID_ICL_DPLL0: ICL/TGL combo PHY DPLL0
*/
DPLL_ID_ICL_DPLL0 = 0,
/**
* @DPLL_ID_ICL_DPLL1: ICL combo PHY DPLL1
* @DPLL_ID_ICL_DPLL1: ICL/TGL combo PHY DPLL1
*/
DPLL_ID_ICL_DPLL1 = 1,
/**
* @DPLL_ID_ICL_TBTPLL: ICL TBT PLL
* @DPLL_ID_EHL_DPLL4: EHL combo PHY DPLL4
*/
DPLL_ID_EHL_DPLL4 = 2,
/**
* @DPLL_ID_ICL_TBTPLL: ICL/TGL TBT PLL
*/
DPLL_ID_ICL_TBTPLL = 2,
/**
* @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
* @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C),
* TGL TC PLL 1 port 1 (TC1)
*/
DPLL_ID_ICL_MGPLL1 = 3,
/**
* @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
* TGL TC PLL 1 port 2 (TC2)
*/
DPLL_ID_ICL_MGPLL2 = 4,
/**
* @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
* TGL TC PLL 1 port 3 (TC3)
*/
DPLL_ID_ICL_MGPLL3 = 5,
/**
* @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
* TGL TC PLL 1 port 4 (TC4)
*/
DPLL_ID_ICL_MGPLL4 = 6,
/**
* @DPLL_ID_TGL_TCPLL5: TGL TC PLL port 5 (TC5)
*/
DPLL_ID_TGL_MGPLL5 = 7,
/**
* @DPLL_ID_TGL_TCPLL6: TGL TC PLL port 6 (TC6)
*/
DPLL_ID_TGL_MGPLL6 = 8,
};
#define I915_NUM_PLLS 9
enum icl_port_dpll_id {
ICL_PORT_DPLL_DEFAULT,
ICL_PORT_DPLL_MG_PHY,
ICL_PORT_DPLL_COUNT,
};
#define I915_NUM_PLLS 7
struct intel_dpll_hw_state {
/* i9xx, pch plls */
@@ -195,7 +220,7 @@ struct intel_dpll_hw_state {
* future state which would be applied by an atomic mode set (stored in
* a struct &intel_atomic_state).
*
* See also intel_get_shared_dpll() and intel_release_shared_dpll().
* See also intel_reserve_shared_dplls() and intel_release_shared_dplls().
*/
struct intel_shared_dpll_state {
/**
@@ -312,6 +337,7 @@ struct intel_shared_dpll {
* @info: platform specific info
*/
const struct dpll_info *info;
intel_wakeref_t wakeref;
};
#define SKL_DPLL0 0
@@ -331,15 +357,20 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state,
struct intel_encoder *encoder);
void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
struct intel_crtc *crtc,
struct drm_atomic_state *state);
bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
void intel_release_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
enum icl_port_dpll_id port_dpll_id);
void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,

View File

@@ -26,7 +26,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include "intel_drv.h"
#include "intel_display_types.h"
#define INTEL_DSI_VIDEO_MODE 0
#define INTEL_DSI_COMMAND_MODE 1
@@ -49,8 +50,11 @@ struct intel_dsi {
struct intel_connector *attached_connector;
/* bit mask of ports being driven */
u16 ports;
/* bit mask of ports (vlv dsi) or phys (icl dsi) being driven */
union {
u16 ports; /* VLV DSI */
u16 phys; /* ICL DSI */
};
/* if true, use HS mode, otherwise LP */
bool hs;
@@ -132,7 +136,10 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
return container_of(h, struct intel_dsi_host, base);
}
#define for_each_dsi_port(__port, __ports_mask) for_each_port_masked(__port, __ports_mask)
#define for_each_dsi_port(__port, __ports_mask) \
for_each_port_masked(__port, __ports_mask)
#define for_each_dsi_phy(__phy, __phys_mask) \
for_each_phy_masked(__phy, __phys_mask)
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{

View File

@@ -27,7 +27,7 @@
#include <video/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_dcs_backlight.h"

View File

@@ -38,7 +38,7 @@
#include <video/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_sideband.h"

View File

@@ -34,7 +34,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
#include "intel_gmbus.h"

View File

@@ -41,7 +41,7 @@
#include <drm/drm_fourcc.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
@@ -110,9 +110,8 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
if (intel_wait_for_register(&dev_priv->uncore,
FBC_STATUS, FBC_STAT_COMPRESSING, 0,
10)) {
if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
FBC_STAT_COMPRESSING, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}

View File

@@ -43,17 +43,18 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
{
return ifbdev->fb->frontbuffer;
}
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
{
struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
unsigned int origin =
ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
intel_fb_obj_invalidate(obj, origin);
intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
}
static int intel_fbdev_set_par(struct fb_info *info)
@@ -120,7 +121,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
int size, ret;
int size;
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
@@ -147,24 +148,16 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = PTR_ERR(obj);
goto err;
return PTR_ERR(obj);
}
fb = intel_framebuffer_create(obj, &mode_cmd);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto err_obj;
}
i915_gem_object_put(obj);
if (IS_ERR(fb))
return PTR_ERR(fb);
ifbdev->fb = to_intel_framebuffer(fb);
return 0;
err_obj:
i915_gem_object_put(obj);
err:
return ret;
}
static int intelfb_create(struct drm_fb_helper *helper,
@@ -180,7 +173,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
const struct i915_ggtt_view view = {
.type = I915_GGTT_VIEW_NORMAL,
};
struct drm_framebuffer *fb;
intel_wakeref_t wakeref;
struct fb_info *info;
struct i915_vma *vma;
@@ -226,8 +218,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unlock;
}
fb = &ifbdev->fb->base;
intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
intel_frontbuffer_flush(to_frontbuffer(ifbdev), ORIGIN_DIRTYFB);
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
@@ -236,17 +227,14 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
ifbdev->helper.fb = fb;
ifbdev->helper.fb = &ifbdev->fb->base;
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].base = ggtt->gmadr.start;
info->apertures->ranges[0].size = ggtt->mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
info->fix.smem_len = vma->node.size;
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@@ -256,19 +244,24 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->screen_base = vaddr;
info->screen_size = vma->node.size;
/* Our framebuffer is the entirety of fbdev's system memory */
info->fix.smem_start = (unsigned long)info->screen_base;
info->fix.smem_len = info->screen_size;
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
if (intel_fb_obj(fb)->stolen && !prealloc)
if (vma->obj->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
fb->width, fb->height, i915_ggtt_offset(vma));
ifbdev->fb->base.width, ifbdev->fb->base.height,
i915_ggtt_offset(vma));
ifbdev->vma = vma;
ifbdev->vma_flags = flags;

View File

@@ -26,7 +26,8 @@
*/
#include "i915_drv.h"
#include "intel_drv.h"
#include "i915_trace.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_fifo_underrun.h"

View File

@@ -30,11 +30,11 @@
* Many features require us to track changes to the currently active
* frontbuffer, especially rendering targeted at the frontbuffer.
*
* To be able to do so GEM tracks frontbuffers using a bitmask for all possible
* frontbuffer slots through i915_gem_track_fb(). The function in this file are
* then called when the contents of the frontbuffer are invalidated, when
* frontbuffer rendering has stopped again to flush out all the changes and when
* the frontbuffer is exchanged with a flip. Subsystems interested in
* To be able to do so we track frontbuffers using a bitmask for all possible
* frontbuffer slots through intel_frontbuffer_track(). The functions in this
* file are then called when the contents of the frontbuffer are invalidated,
* when frontbuffer rendering has stopped again to flush out all the changes
* and when the frontbuffer is exchanged with a flip. Subsystems interested in
* frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
* into the relevant places and filter for the frontbuffer slots that they are
* interested int.
@@ -58,33 +58,14 @@
#include "display/intel_dp.h"
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
if (origin == ORIGIN_CS) {
spin_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
spin_unlock(&dev_priv->fb_tracking.lock);
}
might_sleep();
intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
}
/**
* intel_frontbuffer_flush - flush frontbuffer
* @dev_priv: i915 device
* frontbuffer_flush - flush frontbuffer
* @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -94,45 +75,27 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
*
* Can be called without any locks held.
*/
static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin)
static void frontbuffer_flush(struct drm_i915_private *i915,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
/* Delay flushing when rings are still busy.*/
spin_lock(&dev_priv->fb_tracking.lock);
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
spin_unlock(&dev_priv->fb_tracking.lock);
spin_lock(&i915->fb_tracking.lock);
frontbuffer_bits &= ~i915->fb_tracking.busy_bits;
spin_unlock(&i915->fb_tracking.lock);
if (!frontbuffer_bits)
return;
might_sleep();
intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
intel_psr_flush(dev_priv, frontbuffer_bits, origin);
intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
}
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
if (origin == ORIGIN_CS) {
spin_lock(&dev_priv->fb_tracking.lock);
/* Filter out new bits since rendering started. */
frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
spin_unlock(&dev_priv->fb_tracking.lock);
}
if (frontbuffer_bits)
intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
intel_edp_drrs_flush(i915, frontbuffer_bits);
intel_psr_flush(i915, frontbuffer_bits, origin);
intel_fbc_flush(i915, frontbuffer_bits, origin);
}
/**
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
* @dev_priv: i915 device
* @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. The actual
@@ -142,19 +105,19 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
*
* Can be called without any locks held.
*/
void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
spin_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
spin_lock(&i915->fb_tracking.lock);
i915->fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
spin_unlock(&dev_priv->fb_tracking.lock);
i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
spin_unlock(&i915->fb_tracking.lock);
}
/**
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
* @dev_priv: i915 device
* @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after the flip has been latched and will complete
@@ -162,23 +125,22 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
*
* Can be called without any locks held.
*/
void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
spin_lock(&dev_priv->fb_tracking.lock);
spin_lock(&i915->fb_tracking.lock);
/* Mask any cancelled flips. */
frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
spin_unlock(&dev_priv->fb_tracking.lock);
frontbuffer_bits &= i915->fb_tracking.flip_bits;
i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
spin_unlock(&i915->fb_tracking.lock);
if (frontbuffer_bits)
intel_frontbuffer_flush(dev_priv,
frontbuffer_bits, ORIGIN_FLIP);
frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
}
/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
* @dev_priv: i915 device
* @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
@@ -187,13 +149,160 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
*
* Can be called without any locks held.
*/
void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
spin_lock(&dev_priv->fb_tracking.lock);
spin_lock(&i915->fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
spin_unlock(&dev_priv->fb_tracking.lock);
i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
spin_unlock(&i915->fb_tracking.lock);
intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
}
void __intel_fb_invalidate(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
if (origin == ORIGIN_CS) {
spin_lock(&i915->fb_tracking.lock);
i915->fb_tracking.busy_bits |= frontbuffer_bits;
i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
spin_unlock(&i915->fb_tracking.lock);
}
might_sleep();
intel_psr_invalidate(i915, frontbuffer_bits, origin);
intel_edp_drrs_invalidate(i915, frontbuffer_bits);
intel_fbc_invalidate(i915, frontbuffer_bits, origin);
}
void __intel_fb_flush(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
if (origin == ORIGIN_CS) {
spin_lock(&i915->fb_tracking.lock);
/* Filter out new bits since rendering started. */
frontbuffer_bits &= i915->fb_tracking.busy_bits;
i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
spin_unlock(&i915->fb_tracking.lock);
}
if (frontbuffer_bits)
frontbuffer_flush(i915, frontbuffer_bits, origin);
}
static int frontbuffer_active(struct i915_active *ref)
{
struct intel_frontbuffer *front =
container_of(ref, typeof(*front), write);
kref_get(&front->ref);
return 0;
}
static void frontbuffer_retire(struct i915_active *ref)
{
struct intel_frontbuffer *front =
container_of(ref, typeof(*front), write);
intel_frontbuffer_flush(front, ORIGIN_CS);
intel_frontbuffer_put(front);
}
static void frontbuffer_release(struct kref *ref)
__releases(&to_i915(front->obj->base.dev)->fb_tracking.lock)
{
struct intel_frontbuffer *front =
container_of(ref, typeof(*front), ref);
front->obj->frontbuffer = NULL;
spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock);
i915_gem_object_put(front->obj);
kfree(front);
}
struct intel_frontbuffer *
intel_frontbuffer_get(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_frontbuffer *front;
spin_lock(&i915->fb_tracking.lock);
front = obj->frontbuffer;
if (front)
kref_get(&front->ref);
spin_unlock(&i915->fb_tracking.lock);
if (front)
return front;
front = kmalloc(sizeof(*front), GFP_KERNEL);
if (!front)
return NULL;
front->obj = obj;
kref_init(&front->ref);
atomic_set(&front->bits, 0);
i915_active_init(i915, &front->write,
frontbuffer_active, frontbuffer_retire);
spin_lock(&i915->fb_tracking.lock);
if (obj->frontbuffer) {
kfree(front);
front = obj->frontbuffer;
kref_get(&front->ref);
} else {
i915_gem_object_get(obj);
obj->frontbuffer = front;
}
spin_unlock(&i915->fb_tracking.lock);
return front;
}
void intel_frontbuffer_put(struct intel_frontbuffer *front)
{
kref_put_lock(&front->ref,
frontbuffer_release,
&to_i915(front->obj->base.dev)->fb_tracking.lock);
}
/**
* intel_frontbuffer_track - update frontbuffer tracking
* @old: current buffer for the frontbuffer slots
* @new: new buffer for the frontbuffer slots
* @frontbuffer_bits: bitmask of frontbuffer slots
*
* This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
* from @old and setting them in @new. Both @old and @new can be NULL.
*/
void intel_frontbuffer_track(struct intel_frontbuffer *old,
struct intel_frontbuffer *new,
unsigned int frontbuffer_bits)
{
/*
* Control of individual bits within the mask are guarded by
* the owning plane->mutex, i.e. we can never see concurrent
* manipulation of individual bits. But since the bitfield as a whole
* is updated using RMW, we need to use atomics in order to update
* the bits.
*/
BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
BITS_PER_TYPE(atomic_t));
if (old) {
WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
atomic_andnot(frontbuffer_bits, &old->bits);
}
if (new) {
WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
atomic_or(frontbuffer_bits, &new->bits);
}
}

View File

@@ -24,7 +24,10 @@
#ifndef __INTEL_FRONTBUFFER_H__
#define __INTEL_FRONTBUFFER_H__
#include "gem/i915_gem_object.h"
#include <linux/atomic.h>
#include <linux/kref.h>
#include "i915_active.h"
struct drm_i915_private;
struct drm_i915_gem_object;
@@ -37,23 +40,30 @@ enum fb_op_origin {
ORIGIN_DIRTYFB,
};
void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
struct intel_frontbuffer {
struct kref ref;
atomic_t bits;
struct i915_active write;
struct drm_i915_gem_object *obj;
};
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin,
unsigned int frontbuffer_bits);
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
enum fb_op_origin origin,
unsigned int frontbuffer_bits);
struct intel_frontbuffer *
intel_frontbuffer_get(struct drm_i915_gem_object *obj);
void __intel_fb_invalidate(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits);
/**
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
* intel_frontbuffer_invalidate - invalidate frontbuffer object
* @front: GEM object to invalidate
* @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
@@ -62,37 +72,53 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
if (!front)
return false;
frontbuffer_bits = atomic_read(&front->bits);
if (!frontbuffer_bits)
return false;
__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
__intel_fb_invalidate(front, origin, frontbuffer_bits);
return true;
}
void __intel_fb_flush(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits);
/**
* intel_fb_obj_flush - flush frontbuffer object
* @obj: GEM object to flush
* intel_frontbuffer_flush - flush frontbuffer object
* @front: GEM object to flush
* @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again.
*/
static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
if (!front)
return;
frontbuffer_bits = atomic_read(&front->bits);
if (!frontbuffer_bits)
return;
__intel_fb_obj_flush(obj, origin, frontbuffer_bits);
__intel_fb_flush(front, origin, frontbuffer_bits);
}
void intel_frontbuffer_track(struct intel_frontbuffer *old,
struct intel_frontbuffer *new,
unsigned int frontbuffer_bits);
void intel_frontbuffer_put(struct intel_frontbuffer *front);
#endif /* __INTEL_FRONTBUFFER_H__ */

View File

@@ -35,7 +35,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
struct gmbus_pin {
@@ -82,25 +82,20 @@ static const struct gmbus_pin gmbus_pins_cnp[] = {
static const struct gmbus_pin gmbus_pins_icp[] = {
[GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
[GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
[GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
[GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
[GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
[GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
[GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
};
static const struct gmbus_pin gmbus_pins_mcc[] = {
[GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
[GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
[GMBUS_PIN_9_TC1_ICP] = { "dpc", GPIOJ },
[GMBUS_PIN_13_TC5_TGP] = { "tc5", GPION },
[GMBUS_PIN_14_TC6_TGP] = { "tc6", GPIOO },
};
/* pin is expected to be valid */
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
if (HAS_PCH_MCC(dev_priv))
return &gmbus_pins_mcc[pin];
else if (HAS_PCH_ICP(dev_priv))
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
return &gmbus_pins_icp[pin];
else if (HAS_PCH_CNP(dev_priv))
return &gmbus_pins_cnp[pin];
@@ -119,9 +114,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{
unsigned int size;
if (HAS_PCH_MCC(dev_priv))
size = ARRAY_SIZE(gmbus_pins_mcc);
else if (HAS_PCH_ICP(dev_priv))
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
size = ARRAY_SIZE(gmbus_pins_icp);
else if (HAS_PCH_CNP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_cnp);

View File

@@ -11,6 +11,28 @@
struct drm_i915_private;
struct i2c_adapter;
#define GMBUS_PIN_DISABLED 0
#define GMBUS_PIN_SSC 1
#define GMBUS_PIN_VGADDC 2
#define GMBUS_PIN_PANEL 3
#define GMBUS_PIN_DPD_CHV 3 /* HDMID_CHV */
#define GMBUS_PIN_DPC 4 /* HDMIC */
#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
#define GMBUS_PIN_DPD 6 /* HDMID */
#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
#define GMBUS_PIN_1_BXT 1 /* BXT+ (atom) and CNP+ (big core) */
#define GMBUS_PIN_2_BXT 2
#define GMBUS_PIN_3_BXT 3
#define GMBUS_PIN_4_CNP 4
#define GMBUS_PIN_9_TC1_ICP 9
#define GMBUS_PIN_10_TC2_ICP 10
#define GMBUS_PIN_11_TC3_ICP 11
#define GMBUS_PIN_12_TC4_ICP 12
#define GMBUS_PIN_13_TC5_TGP 13
#define GMBUS_PIN_14_TC6_TGP 14
#define GMBUS_NUM_PINS 15 /* including 0 */
int intel_gmbus_setup(struct drm_i915_private *dev_priv);
void intel_gmbus_teardown(struct drm_i915_private *dev_priv);
bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,

View File

@@ -14,7 +14,8 @@
#include <drm/i915_component.h>
#include "i915_reg.h"
#include "intel_drv.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_sideband.h"
@@ -244,8 +245,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
{
I915_WRITE(HDCP_SHA_TEXT, sha_text);
if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
DRM_ERROR("Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
}
@@ -475,9 +475,8 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
/* Tell the HW we're done with the hash and wait for it to ACK */
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE,
HDCP_SHA1_COMPLETE, 1)) {
if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE, 1)) {
DRM_ERROR("Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
@@ -523,12 +522,16 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
* authentication.
*/
num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
if (num_downstream == 0)
if (num_downstream == 0) {
DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
return -EINVAL;
}
ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
if (!ksv_fifo)
if (!ksv_fifo) {
DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
return -ENOMEM;
}
ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
if (ret)
@@ -616,9 +619,8 @@ static int intel_hdcp_auth(struct intel_connector *connector)
I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
HDCP_STATUS_AN_READY,
HDCP_STATUS_AN_READY, 1)) {
if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
HDCP_STATUS_AN_READY, 1)) {
DRM_ERROR("Timed out waiting for An\n");
return -ETIMEDOUT;
}
@@ -702,9 +704,9 @@ static int intel_hdcp_auth(struct intel_connector *connector)
}
/* Wait for encryption confirmation */
if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
HDCP_STATUS_ENC, HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -734,8 +736,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
hdcp->hdcp_encrypted = false;
I915_WRITE(PORT_HDCP_CONF(port), 0);
if (intel_wait_for_register(&dev_priv->uncore,
PORT_HDCP_STATUS(port), ~0, 0,
if (intel_de_wait_for_clear(dev_priv, PORT_HDCP_STATUS(port), ~0,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@@ -866,7 +867,6 @@ static void intel_hdcp_prop_work(struct work_struct *work)
prop_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct drm_device *dev = connector->base.dev;
struct drm_connector_state *state;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
mutex_lock(&hdcp->mutex);
@@ -876,10 +876,9 @@ static void intel_hdcp_prop_work(struct work_struct *work)
* those to UNDESIRED is handled by core. If value == UNDESIRED,
* we're running just after hdcp has been disabled, so just exit
*/
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
state = connector->base.state;
state->content_protection = hdcp->value;
}
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
drm_hdcp_update_content_protection(&connector->base,
hdcp->value);
mutex_unlock(&hdcp->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -1207,8 +1206,10 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (ret < 0)
return ret;
if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
return -EINVAL;
}
hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
@@ -1512,10 +1513,9 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
CTL_LINK_ENCRYPTION_REQ);
}
ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
LINK_ENCRYPTION_STATUS,
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS_DDI(port),
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
return ret;
}
@@ -1533,8 +1533,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
I915_WRITE(HDCP2_CTL_DDI(port),
I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
LINK_ENCRYPTION_STATUS, 0x0,
ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS_DDI(port),
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
DRM_DEBUG_KMS("Disable Encryption Timedout");
@@ -1749,14 +1749,15 @@ static const struct component_ops i915_hdcp_component_ops = {
.unbind = i915_hdcp_component_unbind,
};
static inline int initialize_hdcp_port_data(struct intel_connector *connector)
static inline int initialize_hdcp_port_data(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp_port_data *data = &hdcp->port_data;
data->port = connector->encoder->port;
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)hdcp->shim->protocol;
data->protocol = (u8)shim->protocol;
data->k = 1;
if (!data->streams)
@@ -1806,12 +1807,13 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
}
}
static void intel_hdcp2_init(struct intel_connector *connector)
static void intel_hdcp2_init(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
ret = initialize_hdcp_port_data(connector);
ret = initialize_hdcp_port_data(connector, shim);
if (ret) {
DRM_DEBUG_KMS("Mei hdcp data init failed\n");
return;
@@ -1830,23 +1832,28 @@ int intel_hdcp_init(struct intel_connector *connector,
if (!shim)
return -EINVAL;
ret = drm_connector_attach_content_protection_property(&connector->base);
if (ret)
if (is_hdcp2_supported(dev_priv))
intel_hdcp2_init(connector, shim);
ret =
drm_connector_attach_content_protection_property(&connector->base,
hdcp->hdcp2_supported);
if (ret) {
hdcp->hdcp2_supported = false;
kfree(hdcp->port_data.streams);
return ret;
}
hdcp->shim = shim;
mutex_init(&hdcp->mutex);
INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
if (is_hdcp2_supported(dev_priv))
intel_hdcp2_init(connector);
init_waitqueue_head(&hdcp->cp_irq_queue);
return 0;
}
int intel_hdcp_enable(struct intel_connector *connector)
int intel_hdcp_enable(struct intel_connector *connector, u8 content_type)
{
struct intel_hdcp *hdcp = &connector->hdcp;
unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
@@ -1857,6 +1864,7 @@ int intel_hdcp_enable(struct intel_connector *connector)
mutex_lock(&hdcp->mutex);
WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = content_type;
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -1868,8 +1876,12 @@ int intel_hdcp_enable(struct intel_connector *connector)
check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
}
/* When HDCP2.2 fails, HDCP1.4 will be attempted */
if (ret && intel_hdcp_capable(connector)) {
/*
* When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
* be attempted.
*/
if (ret && intel_hdcp_capable(connector) &&
hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
ret = _intel_hdcp_enable(connector);
}
@@ -1951,12 +1963,15 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
/*
* Nothing to do if the state didn't change, or HDCP was activated since
* the last commit
* the last commit. And also no change in hdcp content type.
*/
if (old_cp == new_cp ||
(old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
return;
new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
if (old_state->hdcp_content_type ==
new_state->hdcp_content_type)
return;
}
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
new_state->crtc);

View File

@@ -21,7 +21,7 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *new_state);
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *hdcp_shim);
int intel_hdcp_enable(struct intel_connector *connector);
int intel_hdcp_enable(struct intel_connector *connector, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);

View File

@@ -45,17 +45,17 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
#include "intel_drv.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_lspcon.h"
#include "intel_sdvo.h"
#include "intel_panel.h"
#include "intel_sdvo.h"
#include "intel_sideband.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
@@ -1514,29 +1514,28 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
return true;
}
static struct hdcp2_hdmi_msg_data {
struct hdcp2_hdmi_msg_data {
u8 msg_id;
u32 timeout;
u32 timeout2;
} hdcp2_msg_data[] = {
{HDCP_2_2_AKE_INIT, 0, 0},
{HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0},
{HDCP_2_2_AKE_NO_STORED_KM, 0, 0},
{HDCP_2_2_AKE_STORED_KM, 0, 0},
{HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
{HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS,
0},
{HDCP_2_2_LC_INIT, 0, 0},
{HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0},
{HDCP_2_2_SKE_SEND_EKS, 0, 0},
{HDCP_2_2_REP_SEND_RECVID_LIST,
HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
{HDCP_2_2_REP_SEND_ACK, 0, 0},
{HDCP_2_2_REP_STREAM_MANAGE, 0, 0},
{HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS,
0},
};
};
static const struct hdcp2_hdmi_msg_data hdcp2_msg_data[] = {
{ HDCP_2_2_AKE_INIT, 0, 0 },
{ HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
{ HDCP_2_2_AKE_NO_STORED_KM, 0, 0 },
{ HDCP_2_2_AKE_STORED_KM, 0, 0 },
{ HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
{ HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
{ HDCP_2_2_LC_INIT, 0, 0 },
{ HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0 },
{ HDCP_2_2_SKE_SEND_EKS, 0, 0 },
{ HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
{ HDCP_2_2_REP_SEND_ACK, 0, 0 },
{ HDCP_2_2_REP_STREAM_MANAGE, 0, 0 },
{ HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
};
static
int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
@@ -2930,51 +2929,34 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
u8 ddc_pin;
enum phy phy = intel_port_to_phy(dev_priv, port);
switch (port) {
case PORT_A:
ddc_pin = GMBUS_PIN_1_BXT;
break;
case PORT_B:
ddc_pin = GMBUS_PIN_2_BXT;
break;
case PORT_C:
ddc_pin = GMBUS_PIN_9_TC1_ICP;
break;
case PORT_D:
ddc_pin = GMBUS_PIN_10_TC2_ICP;
break;
case PORT_E:
ddc_pin = GMBUS_PIN_11_TC3_ICP;
break;
case PORT_F:
ddc_pin = GMBUS_PIN_12_TC4_ICP;
break;
default:
MISSING_CASE(port);
ddc_pin = GMBUS_PIN_2_BXT;
break;
}
return ddc_pin;
if (intel_phy_is_combo(dev_priv, phy))
return GMBUS_PIN_1_BXT + port;
else if (intel_phy_is_tc(dev_priv, phy))
return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
WARN(1, "Unknown port:%c\n", port_name(port));
return GMBUS_PIN_2_BXT;
}
static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
enum phy phy = intel_port_to_phy(dev_priv, port);
u8 ddc_pin;
switch (port) {
case PORT_A:
switch (phy) {
case PHY_A:
ddc_pin = GMBUS_PIN_1_BXT;
break;
case PORT_B:
case PHY_B:
ddc_pin = GMBUS_PIN_2_BXT;
break;
case PORT_C:
case PHY_C:
ddc_pin = GMBUS_PIN_9_TC1_ICP;
break;
default:
MISSING_CASE(port);
MISSING_CASE(phy);
ddc_pin = GMBUS_PIN_1_BXT;
break;
}
@@ -3019,7 +3001,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_ICP(dev_priv))
else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_ICP(dev_priv))
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
@@ -3143,6 +3125,32 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
DRM_DEBUG_KMS("CEC notifier get failed\n");
}
static enum intel_hotplug_state
intel_hdmi_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector, bool irq_received)
{
enum intel_hotplug_state state;
state = intel_encoder_hotplug(encoder, connector, irq_received);
/*
* On many platforms the HDMI live state signal is known to be
* unreliable, so we can't use it to detect if a sink is connected or
* not. Instead we detect if it's connected based on whether we can
* read the EDID or not. That in turn has a problem during disconnect,
* since the HPD interrupt may be raised before the DDC lines get
* disconnected (due to how the required length of DDC vs. HPD
* connector pins are specified) and so we'll still be able to get a
* valid EDID. To solve this schedule another detection cycle if this
* time around we didn't detect any change in the sink's connection
* status.
*/
if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
state = INTEL_HOTPLUG_RETRY;
return state;
}
void intel_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
@@ -3166,7 +3174,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
"HDMI %c", port_name(port));
intel_encoder->hotplug = intel_encoder_hotplug;
intel_encoder->hotplug = intel_hdmi_hotplug;
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_hdmi;

View File

@@ -26,7 +26,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
/**
@@ -104,6 +104,12 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
if (IS_CNL_WITH_PORT_F(dev_priv))
return HPD_PORT_E;
return HPD_PORT_F;
case PORT_G:
return HPD_PORT_G;
case PORT_H:
return HPD_PORT_H;
case PORT_I:
return HPD_PORT_I;
default:
MISSING_CASE(port);
return HPD_NONE;
@@ -112,6 +118,7 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
#define HPD_RETRY_DELAY 1000
/**
* intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
@@ -266,8 +273,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
bool intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector)
enum intel_hotplug_state
intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector,
bool irq_received)
{
struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
@@ -279,7 +288,7 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
drm_helper_probe_detect(&connector->base, NULL, false);
if (old_status == connector->base.status)
return false;
return INTEL_HOTPLUG_UNCHANGED;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.base.id,
@@ -287,7 +296,7 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->base.status));
return true;
return INTEL_HOTPLUG_CHANGED;
}
static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
@@ -339,7 +348,7 @@ static void i915_digport_work_func(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->hotplug.event_bits |= old_bits;
spin_unlock_irq(&dev_priv->irq_lock);
schedule_work(&dev_priv->hotplug.hotplug_work);
queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
}
}
@@ -349,14 +358,16 @@ static void i915_digport_work_func(struct work_struct *work)
static void i915_hotplug_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, hotplug.hotplug_work);
container_of(work, struct drm_i915_private,
hotplug.hotplug_work.work);
struct drm_device *dev = &dev_priv->drm;
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
bool changed = false;
u32 changed = 0, retry = 0;
u32 hpd_event_bits;
u32 hpd_retry_bits;
mutex_lock(&dev->mode_config.mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
@@ -365,6 +376,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
hpd_event_bits = dev_priv->hotplug.event_bits;
dev_priv->hotplug.event_bits = 0;
hpd_retry_bits = dev_priv->hotplug.retry_bits;
dev_priv->hotplug.retry_bits = 0;
/* Enable polling for connectors which had HPD IRQ storms */
intel_hpd_irq_storm_switch_to_polling(dev_priv);
@@ -373,16 +386,29 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
u32 hpd_bit;
intel_connector = to_intel_connector(connector);
if (!intel_connector->encoder)
continue;
intel_encoder = intel_connector->encoder;
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
hpd_bit = BIT(intel_encoder->hpd_pin);
if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
connector->name, intel_encoder->hpd_pin);
changed |= intel_encoder->hotplug(intel_encoder,
intel_connector);
switch (intel_encoder->hotplug(intel_encoder,
intel_connector,
hpd_event_bits & hpd_bit)) {
case INTEL_HOTPLUG_UNCHANGED:
break;
case INTEL_HOTPLUG_CHANGED:
changed |= hpd_bit;
break;
case INTEL_HOTPLUG_RETRY:
retry |= hpd_bit;
break;
}
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -390,6 +416,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
if (changed)
drm_kms_helper_hotplug_event(dev);
/* Remove shared HPD pins that have changed */
retry &= ~changed;
if (retry) {
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->hotplug.retry_bits |= retry;
spin_unlock_irq(&dev_priv->irq_lock);
mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
}
}
@@ -516,7 +553,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (queue_dig)
queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
if (queue_hp)
schedule_work(&dev_priv->hotplug.hotplug_work);
queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
}
/**
@@ -636,7 +673,8 @@ void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
{
INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
i915_hotplug_work_func);
INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
@@ -650,11 +688,12 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
dev_priv->hotplug.long_port_mask = 0;
dev_priv->hotplug.short_port_mask = 0;
dev_priv->hotplug.event_bits = 0;
dev_priv->hotplug.retry_bits = 0;
spin_unlock_irq(&dev_priv->irq_lock);
cancel_work_sync(&dev_priv->hotplug.dig_port_work);
cancel_work_sync(&dev_priv->hotplug.hotplug_work);
cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
cancel_work_sync(&dev_priv->hotplug.poll_init_work);
cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
}

View File

@@ -15,8 +15,9 @@ struct intel_connector;
struct intel_encoder;
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
bool intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector,
bool irq_received);
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);

View File

@@ -27,8 +27,8 @@
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_edid.h>
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_lspcon.h"
/* LSPCON OUI Vendor ID(signatures) */

View File

@@ -42,7 +42,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
#include "intel_lvds.h"
#include "intel_panel.h"
@@ -318,8 +318,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
POSTING_READ(lvds_encoder->reg);
if (intel_wait_for_register(&dev_priv->uncore,
PP_STATUS(0), PP_ON, PP_ON, 5000))
if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
@@ -333,8 +332,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
if (intel_wait_for_register(&dev_priv->uncore,
PP_STATUS(0), PP_ON, 0, 1000))
if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);

View File

@@ -35,7 +35,7 @@
#include "display/intel_panel.h"
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_opregion.h"
#define OPREGION_HEADER_OFFSET 0

View File

@@ -33,7 +33,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_overlay.h"
@@ -175,6 +175,7 @@ struct overlay_registers {
struct intel_overlay {
struct drm_i915_private *i915;
struct intel_context *context;
struct intel_crtc *crtc;
struct i915_vma *vma;
struct i915_vma *old_vma;
@@ -190,7 +191,8 @@ struct intel_overlay {
struct overlay_registers __iomem *regs;
u32 flip_addr;
/* flip handling */
struct i915_active_request last_flip;
struct i915_active last_flip;
void (*flip_complete)(struct intel_overlay *ovl);
};
static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
@@ -216,32 +218,25 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
}
static void intel_overlay_submit_request(struct intel_overlay *overlay,
struct i915_request *rq,
i915_active_retire_fn retire)
static struct i915_request *
alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
{
GEM_BUG_ON(i915_active_request_peek(&overlay->last_flip,
&overlay->i915->drm.struct_mutex));
i915_active_request_set_retire_fn(&overlay->last_flip, retire,
&overlay->i915->drm.struct_mutex);
__i915_active_request_set(&overlay->last_flip, rq);
i915_request_add(rq);
}
struct i915_request *rq;
int err;
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
struct i915_request *rq,
i915_active_retire_fn retire)
{
intel_overlay_submit_request(overlay, rq, retire);
return i915_active_request_retire(&overlay->last_flip,
&overlay->i915->drm.struct_mutex);
}
overlay->flip_complete = fn;
static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
struct intel_engine_cs *engine = overlay->i915->engine[RCS0];
rq = i915_request_create(overlay->context);
if (IS_ERR(rq))
return rq;
return i915_request_create(engine->kernel_context);
err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
if (err) {
i915_request_add(rq);
return ERR_PTR(err);
}
return rq;
}
/* overlay needs to be disable in OCMD reg */
@@ -253,7 +248,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
WARN_ON(overlay->active);
rq = alloc_request(overlay);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -274,7 +269,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return intel_overlay_do_wait_request(overlay, rq, NULL);
i915_request_add(rq);
return i915_active_wait(&overlay->last_flip);
}
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
@@ -284,9 +281,9 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
WARN_ON(overlay->old_vma);
i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
vma ? vma->obj : NULL,
INTEL_FRONTBUFFER_OVERLAY(pipe));
intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
vma ? vma->obj->frontbuffer : NULL,
INTEL_FRONTBUFFER_OVERLAY(pipe));
intel_frontbuffer_flip_prepare(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -318,7 +315,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
rq = alloc_request(overlay);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -333,8 +330,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, vma);
intel_overlay_submit_request(overlay, rq, NULL);
i915_request_add(rq);
return 0;
}
@@ -355,20 +351,13 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
}
static void
intel_overlay_release_old_vid_tail(struct i915_active_request *active,
struct i915_request *rq)
intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
intel_overlay_release_old_vma(overlay);
}
static void intel_overlay_off_tail(struct i915_active_request *active,
struct i915_request *rq)
static void intel_overlay_off_tail(struct intel_overlay *overlay)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
struct drm_i915_private *dev_priv = overlay->i915;
intel_overlay_release_old_vma(overlay);
@@ -381,6 +370,16 @@ static void intel_overlay_off_tail(struct i915_active_request *active,
i830_overlay_clock_gating(dev_priv, true);
}
static void
intel_overlay_last_flip_retire(struct i915_active *active)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
if (overlay->flip_complete)
overlay->flip_complete(overlay);
}
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
@@ -395,7 +394,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
rq = alloc_request(overlay);
rq = alloc_request(overlay, intel_overlay_off_tail);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -418,17 +417,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, NULL);
i915_request_add(rq);
return intel_overlay_do_wait_request(overlay, rq,
intel_overlay_off_tail);
return i915_active_wait(&overlay->last_flip);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
return i915_active_request_retire(&overlay->last_flip,
&overlay->i915->drm.struct_mutex);
return i915_active_wait(&overlay->last_flip);
}
/* Wait for pending overlay flip and release old frame.
@@ -438,43 +436,40 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct i915_request *rq;
u32 *cs;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Only wait if there is actually an old frame to release to
/*
* Only wait if there is actually an old frame to release to
* guarantee forward progress.
*/
if (!overlay->old_vma)
return 0;
if (I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
struct i915_request *rq;
if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
intel_overlay_release_old_vid_tail(overlay);
return 0;
}
rq = alloc_request(overlay);
if (IS_ERR(rq))
return PTR_ERR(rq);
rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
if (IS_ERR(rq))
return PTR_ERR(rq);
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs)) {
i915_request_add(rq);
return PTR_ERR(cs);
}
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs)) {
i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
ret = intel_overlay_do_wait_request(overlay, rq,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
} else
intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
i915_request_add(rq);
return 0;
return i915_active_wait(&overlay->last_flip);
}
void intel_overlay_reset(struct drm_i915_private *dev_priv)
@@ -773,11 +768,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
ret = PTR_ERR(vma);
goto out_pin_section;
}
intel_fb_obj_flush(new_bo, ORIGIN_DIRTYFB);
ret = i915_vma_put_fence(vma);
if (ret)
goto out_unpin;
intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
if (!overlay->active) {
u32 oconfig;
@@ -1359,11 +1350,16 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (!HAS_OVERLAY(dev_priv))
return;
if (!HAS_ENGINE(dev_priv, RCS0))
return;
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return;
overlay->i915 = dev_priv;
overlay->context = dev_priv->engine[RCS0]->kernel_context;
GEM_BUG_ON(!overlay->context);
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
@@ -1371,7 +1367,9 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
INIT_ACTIVE_REQUEST(&overlay->last_flip);
i915_active_init(dev_priv,
&overlay->last_flip,
NULL, intel_overlay_last_flip_retire);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
if (ret)
@@ -1405,6 +1403,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
WARN_ON(overlay->active);
i915_gem_object_put(overlay->reg_bo);
i915_active_fini(&overlay->last_flip);
kfree(overlay);
}

View File

@@ -35,8 +35,8 @@
#include <linux/pwm.h>
#include "intel_connector.h"
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
#include "intel_drv.h"
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"

View File

@@ -30,7 +30,7 @@
#include <linux/seq_file.h>
#include "intel_atomic.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_pipe_crc.h"
static const char * const pipe_crc_sources[] = {
@@ -667,5 +667,5 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
POSTING_READ(PIPE_CRC_CTL(crtc->index));
synchronize_irq(dev_priv->drm.irq);
intel_synchronize_irq(dev_priv);
}

View File

@@ -26,7 +26,7 @@
#include "display/intel_dp.h"
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -825,8 +825,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
}
/* Wait till PSR is idle */
if (intel_wait_for_register(&dev_priv->uncore,
psr_status, psr_status_mask, 0, 2000))
if (intel_de_wait_for_clear(dev_priv, psr_status,
psr_status_mask, 2000))
DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
@@ -988,7 +988,7 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->psr.lock);
err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
if (err)
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");

View File

@@ -5,7 +5,7 @@
#include <linux/dmi.h>
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_quirks.h"
/*

View File

@@ -39,7 +39,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdmi.h"
@@ -274,130 +274,145 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
return false;
}
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
#define SDVO_CMD_NAME_ENTRY(cmd_) { .cmd = SDVO_CMD_ ## cmd_, .name = #cmd_ }
/** Mapping of command numbers to names, for debug output */
static const struct _sdvo_cmd_name {
static const struct {
u8 cmd;
const char *name;
} __attribute__ ((packed)) sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
SDVO_CMD_NAME_ENTRY(RESET),
SDVO_CMD_NAME_ENTRY(GET_DEVICE_CAPS),
SDVO_CMD_NAME_ENTRY(GET_FIRMWARE_REV),
SDVO_CMD_NAME_ENTRY(GET_TRAINED_INPUTS),
SDVO_CMD_NAME_ENTRY(GET_ACTIVE_OUTPUTS),
SDVO_CMD_NAME_ENTRY(SET_ACTIVE_OUTPUTS),
SDVO_CMD_NAME_ENTRY(GET_IN_OUT_MAP),
SDVO_CMD_NAME_ENTRY(SET_IN_OUT_MAP),
SDVO_CMD_NAME_ENTRY(GET_ATTACHED_DISPLAYS),
SDVO_CMD_NAME_ENTRY(GET_HOT_PLUG_SUPPORT),
SDVO_CMD_NAME_ENTRY(SET_ACTIVE_HOT_PLUG),
SDVO_CMD_NAME_ENTRY(GET_ACTIVE_HOT_PLUG),
SDVO_CMD_NAME_ENTRY(GET_INTERRUPT_EVENT_SOURCE),
SDVO_CMD_NAME_ENTRY(SET_TARGET_INPUT),
SDVO_CMD_NAME_ENTRY(SET_TARGET_OUTPUT),
SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(CREATE_PREFERRED_INPUT_TIMING),
SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART1),
SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART2),
SDVO_CMD_NAME_ENTRY(GET_INPUT_PIXEL_CLOCK_RANGE),
SDVO_CMD_NAME_ENTRY(GET_OUTPUT_PIXEL_CLOCK_RANGE),
SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_CLOCK_RATE_MULTS),
SDVO_CMD_NAME_ENTRY(GET_CLOCK_RATE_MULT),
SDVO_CMD_NAME_ENTRY(SET_CLOCK_RATE_MULT),
SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_TV_FORMATS),
SDVO_CMD_NAME_ENTRY(GET_TV_FORMAT),
SDVO_CMD_NAME_ENTRY(SET_TV_FORMAT),
SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_POWER_STATES),
SDVO_CMD_NAME_ENTRY(GET_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SET_ENCODER_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SET_DISPLAY_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SET_CONTROL_BUS_SWITCH),
SDVO_CMD_NAME_ENTRY(GET_SDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(GET_SCALED_HDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_ENHANCEMENTS),
/* Add the op code for SDVO enhancements */
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
SDVO_CMD_NAME_ENTRY(GET_MAX_HPOS),
SDVO_CMD_NAME_ENTRY(GET_HPOS),
SDVO_CMD_NAME_ENTRY(SET_HPOS),
SDVO_CMD_NAME_ENTRY(GET_MAX_VPOS),
SDVO_CMD_NAME_ENTRY(GET_VPOS),
SDVO_CMD_NAME_ENTRY(SET_VPOS),
SDVO_CMD_NAME_ENTRY(GET_MAX_SATURATION),
SDVO_CMD_NAME_ENTRY(GET_SATURATION),
SDVO_CMD_NAME_ENTRY(SET_SATURATION),
SDVO_CMD_NAME_ENTRY(GET_MAX_HUE),
SDVO_CMD_NAME_ENTRY(GET_HUE),
SDVO_CMD_NAME_ENTRY(SET_HUE),
SDVO_CMD_NAME_ENTRY(GET_MAX_CONTRAST),
SDVO_CMD_NAME_ENTRY(GET_CONTRAST),
SDVO_CMD_NAME_ENTRY(SET_CONTRAST),
SDVO_CMD_NAME_ENTRY(GET_MAX_BRIGHTNESS),
SDVO_CMD_NAME_ENTRY(GET_BRIGHTNESS),
SDVO_CMD_NAME_ENTRY(SET_BRIGHTNESS),
SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_H),
SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_H),
SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_H),
SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER),
SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER),
SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER),
SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_ADAPTIVE),
SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_ADAPTIVE),
SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_ADAPTIVE),
SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_2D),
SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_2D),
SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_2D),
SDVO_CMD_NAME_ENTRY(GET_MAX_SHARPNESS),
SDVO_CMD_NAME_ENTRY(GET_SHARPNESS),
SDVO_CMD_NAME_ENTRY(SET_SHARPNESS),
SDVO_CMD_NAME_ENTRY(GET_DOT_CRAWL),
SDVO_CMD_NAME_ENTRY(SET_DOT_CRAWL),
SDVO_CMD_NAME_ENTRY(GET_MAX_TV_CHROMA_FILTER),
SDVO_CMD_NAME_ENTRY(GET_TV_CHROMA_FILTER),
SDVO_CMD_NAME_ENTRY(SET_TV_CHROMA_FILTER),
SDVO_CMD_NAME_ENTRY(GET_MAX_TV_LUMA_FILTER),
SDVO_CMD_NAME_ENTRY(GET_TV_LUMA_FILTER),
SDVO_CMD_NAME_ENTRY(SET_TV_LUMA_FILTER),
/* HDMI op code */
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
SDVO_CMD_NAME_ENTRY(GET_SUPP_ENCODE),
SDVO_CMD_NAME_ENTRY(GET_ENCODE),
SDVO_CMD_NAME_ENTRY(SET_ENCODE),
SDVO_CMD_NAME_ENTRY(SET_PIXEL_REPLI),
SDVO_CMD_NAME_ENTRY(GET_PIXEL_REPLI),
SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY_CAP),
SDVO_CMD_NAME_ENTRY(SET_COLORIMETRY),
SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY),
SDVO_CMD_NAME_ENTRY(GET_AUDIO_ENCRYPT_PREFER),
SDVO_CMD_NAME_ENTRY(SET_AUDIO_STAT),
SDVO_CMD_NAME_ENTRY(GET_AUDIO_STAT),
SDVO_CMD_NAME_ENTRY(GET_HBUF_INDEX),
SDVO_CMD_NAME_ENTRY(SET_HBUF_INDEX),
SDVO_CMD_NAME_ENTRY(GET_HBUF_INFO),
SDVO_CMD_NAME_ENTRY(GET_HBUF_AV_SPLIT),
SDVO_CMD_NAME_ENTRY(SET_HBUF_AV_SPLIT),
SDVO_CMD_NAME_ENTRY(GET_HBUF_TXRATE),
SDVO_CMD_NAME_ENTRY(SET_HBUF_TXRATE),
SDVO_CMD_NAME_ENTRY(SET_HBUF_DATA),
SDVO_CMD_NAME_ENTRY(GET_HBUF_DATA),
};
#undef SDVO_CMD_NAME_ENTRY
static const char *sdvo_cmd_name(u8 cmd)
{
int i;
for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd)
return sdvo_cmd_names[i].name;
}
return NULL;
}
#define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC")
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
const char *cmd_name;
int i, pos = 0;
#define BUF_LEN 256
char buffer[BUF_LEN];
@@ -412,15 +427,12 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
for (; i < 8; i++) {
BUF_PRINT(" ");
}
for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
break;
}
}
if (i == ARRAY_SIZE(sdvo_cmd_names)) {
cmd_name = sdvo_cmd_name(cmd);
if (cmd_name)
BUF_PRINT("(%s)", cmd_name);
else
BUF_PRINT("(%02X)", cmd);
}
BUG_ON(pos >= BUF_LEN - 1);
#undef BUF_PRINT
#undef BUF_LEN
@@ -429,15 +441,23 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
}
static const char * const cmd_status_names[] = {
"Power on",
"Success",
"Not supported",
"Invalid arg",
"Pending",
"Target not specified",
"Scaling not supported"
[SDVO_CMD_STATUS_POWER_ON] = "Power on",
[SDVO_CMD_STATUS_SUCCESS] = "Success",
[SDVO_CMD_STATUS_NOTSUPP] = "Not supported",
[SDVO_CMD_STATUS_INVALID_ARG] = "Invalid arg",
[SDVO_CMD_STATUS_PENDING] = "Pending",
[SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED] = "Target not specified",
[SDVO_CMD_STATUS_SCALING_NOT_SUPP] = "Scaling not supported",
};
static const char *sdvo_cmd_status(u8 status)
{
if (status < ARRAY_SIZE(cmd_status_names))
return cmd_status_names[status];
else
return NULL;
}
static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len,
bool unlocked)
@@ -516,6 +536,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
const char *cmd_status;
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i, pos = 0;
@@ -562,8 +583,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
#define BUF_PRINT(args...) \
pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
BUF_PRINT("(%s)", cmd_status_names[status]);
cmd_status = sdvo_cmd_status(status);
if (cmd_status)
BUF_PRINT("(%s)", cmd_status);
else
BUF_PRINT("(??? %d)", status);
@@ -929,6 +951,20 @@ static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
&audio_state, 1);
}
static bool intel_sdvo_get_hbuf_size(struct intel_sdvo *intel_sdvo,
u8 *hbuf_size)
{
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
hbuf_size, 1))
return false;
/* Buffer size is 0 based, hooray! However zero means zero. */
if (*hbuf_size)
(*hbuf_size)++;
return true;
}
#if 0
static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
{
@@ -972,14 +1008,10 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
set_buf_index, 2))
return false;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
&hbuf_size, 1))
if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
return false;
/* Buffer size is 0 based, hooray! */
hbuf_size++;
DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
DRM_DEBUG_KMS("writing sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
if (hbuf_size < length)
@@ -1030,14 +1062,10 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
if (tx_rate == SDVO_HBUF_TX_DISABLED)
return 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
&hbuf_size, 1))
return -ENXIO;
if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
return false;
/* Buffer size is 0 based, hooray! */
hbuf_size++;
DRM_DEBUG_KMS("reading sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
DRM_DEBUG_KMS("reading sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
hbuf_size = min_t(unsigned int, length, hbuf_size);
@@ -1893,12 +1921,14 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
&intel_sdvo->hotplug_active, 2);
}
static bool intel_sdvo_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector)
static enum intel_hotplug_state
intel_sdvo_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector,
bool irq_received)
{
intel_sdvo_enable_hotplug(encoder);
return intel_encoder_hotplug(encoder, connector);
return intel_encoder_hotplug(encoder, connector, irq_received);
}
static bool

View File

@@ -40,8 +40,9 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_atomic_plane.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_pm.h"
#include "intel_psr.h"
@@ -330,6 +331,12 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0;
}
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
{
return INTEL_GEN(dev_priv) >= 11 &&
icl_hdr_plane_mask() & BIT(plane_id);
}
static unsigned int
skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -441,9 +448,21 @@ icl_program_input_csc(struct intel_plane *plane,
*/
[DRM_COLOR_YCBCR_BT709] = {
0x7C98, 0x7800, 0x0,
0x9EF8, 0x7800, 0xABF8,
0x9EF8, 0x7800, 0xAC00,
0x0, 0x7800, 0x7ED8,
},
/*
* BT.2020 full range YCbCr -> full range RGB
* The matrix required is :
* [1.000, 0.000, 1.474,
* 1.000, -0.1645, -0.5713,
* 1.000, 1.8814, 0.0000]
*/
[DRM_COLOR_YCBCR_BT2020] = {
0x7BC8, 0x7800, 0x0,
0x8928, 0x7800, 0xAA88,
0x0, 0x7800, 0x7F10,
},
};
/* Matrix for Limited Range to Full Range Conversion */
@@ -451,26 +470,38 @@ icl_program_input_csc(struct intel_plane *plane,
/*
* BT.601 Limted range YCbCr -> full range RGB
* The matrix required is :
* [1.164384, 0.000, 1.596370,
* 1.138393, -0.382500, -0.794598,
* 1.138393, 1.971696, 0.0000]
* [1.164384, 0.000, 1.596027,
* 1.164384, -0.39175, -0.812813,
* 1.164384, 2.017232, 0.0000]
*/
[DRM_COLOR_YCBCR_BT601] = {
0x7CC8, 0x7950, 0x0,
0x8CB8, 0x7918, 0x9C40,
0x0, 0x7918, 0x7FC8,
0x8D00, 0x7950, 0x9C88,
0x0, 0x7950, 0x6810,
},
/*
* BT.709 Limited range YCbCr -> full range RGB
* The matrix required is :
* [1.164, 0.000, 1.833671,
* 1.138393, -0.213249, -0.532909,
* 1.138393, 2.112402, 0.0000]
* [1.164384, 0.000, 1.792741,
* 1.164384, -0.213249, -0.532909,
* 1.164384, 2.112402, 0.0000]
*/
[DRM_COLOR_YCBCR_BT709] = {
0x7EA8, 0x7950, 0x0,
0x8888, 0x7918, 0xADA8,
0x0, 0x7918, 0x6870,
0x7E58, 0x7950, 0x0,
0x8888, 0x7950, 0xADA8,
0x0, 0x7950, 0x6870,
},
/*
* BT.2020 Limited range YCbCr -> full range RGB
* The matrix required is :
* [1.164, 0.000, 1.678,
* 1.164, -0.1873, -0.6504,
* 1.164, 2.1417, 0.0000]
*/
[DRM_COLOR_YCBCR_BT2020] = {
0x7D70, 0x7950, 0x0,
0x8A68, 0x7950, 0xAC00,
0x0, 0x7950, 0x6890,
},
};
const u16 *csc;
@@ -492,8 +523,11 @@ icl_program_input_csc(struct intel_plane *plane,
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI);
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
PREOFF_YUV_TO_RGB_ME);
if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0);
else
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
PREOFF_YUV_TO_RGB_ME);
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
PREOFF_YUV_TO_RGB_LO);
I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
@@ -683,6 +717,16 @@ skl_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
/* The points are not evenly spaced. */
static const u8 in[8] = { 0, 1, 2, 4, 8, 16, 24, 32 };
int i;
for (i = 0; i < 8; i++)
gamma[i] = (in[i] << 8) / 32;
}
static void
chv_update_csc(const struct intel_plane_state *plane_state)
{
@@ -858,6 +902,31 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
static void vlv_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
u16 gamma[8];
int i;
/* Seems RGB data bypasses the gamma always */
if (!fb->format->is_yuv)
return;
i9xx_plane_linear_gamma(gamma);
/* FIXME these register are single buffered :( */
/* The two end points are implicit (0.0 and 1.0) */
for (i = 1; i < 8 - 1; i++)
I915_WRITE_FW(SPGAMC(pipe, plane_id, i - 1),
gamma[i] << 16 |
gamma[i] << 8 |
gamma[i]);
}
static void
vlv_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -916,6 +985,7 @@ vlv_update_plane(struct intel_plane *plane,
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
vlv_update_clrc(plane_state);
vlv_update_gamma(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1013,6 +1083,8 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
sprctl |= SPRITE_INT_GAMMA_DISABLE;
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
@@ -1033,6 +1105,45 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
static void ivb_sprite_linear_gamma(u16 gamma[18])
{
int i;
for (i = 0; i < 17; i++)
gamma[i] = (i << 10) / 16;
gamma[i] = 3 << 10;
i++;
}
static void ivb_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
u16 gamma[18];
int i;
ivb_sprite_linear_gamma(gamma);
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
I915_WRITE_FW(SPRGAMC(pipe, i),
gamma[i] << 20 |
gamma[i] << 10 |
gamma[i]);
I915_WRITE_FW(SPRGAMC16(pipe, 0), gamma[i]);
I915_WRITE_FW(SPRGAMC16(pipe, 1), gamma[i]);
I915_WRITE_FW(SPRGAMC16(pipe, 2), gamma[i]);
i++;
I915_WRITE_FW(SPRGAMC17(pipe, 0), gamma[i]);
I915_WRITE_FW(SPRGAMC17(pipe, 1), gamma[i]);
I915_WRITE_FW(SPRGAMC17(pipe, 2), gamma[i]);
i++;
}
static void
ivb_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1099,6 +1210,8 @@ ivb_update_plane(struct intel_plane *plane,
I915_WRITE_FW(SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
ivb_update_gamma(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1224,6 +1337,66 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
return dvscntr;
}
static void g4x_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
u16 gamma[8];
int i;
/* Seems RGB data bypasses the gamma always */
if (!fb->format->is_yuv)
return;
i9xx_plane_linear_gamma(gamma);
/* FIXME these register are single buffered :( */
/* The two end points are implicit (0.0 and 1.0) */
for (i = 1; i < 8 - 1; i++)
I915_WRITE_FW(DVSGAMC_G4X(pipe, i - 1),
gamma[i] << 16 |
gamma[i] << 8 |
gamma[i]);
}
static void ilk_sprite_linear_gamma(u16 gamma[17])
{
int i;
for (i = 0; i < 17; i++)
gamma[i] = (i << 10) / 16;
}
static void ilk_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
u16 gamma[17];
int i;
/* Seems RGB data bypasses the gamma always */
if (!fb->format->is_yuv)
return;
ilk_sprite_linear_gamma(gamma);
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
I915_WRITE_FW(DVSGAMC_ILK(pipe, i),
gamma[i] << 20 |
gamma[i] << 10 |
gamma[i]);
I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
i++;
}
static void
g4x_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1283,6 +1456,11 @@ g4x_update_plane(struct intel_plane *plane,
I915_WRITE_FW(DVSSURF(pipe),
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
if (IS_G4X(dev_priv))
g4x_update_gamma(plane_state);
else
ilk_update_gamma(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1347,7 +1525,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
const struct drm_framebuffer *fb = plane_state->base.fb;
const struct drm_rect *src = &plane_state->base.src;
const struct drm_rect *dst = &plane_state->base.dst;
int src_x, src_y, src_w, src_h, crtc_w, crtc_h;
int src_x, src_w, src_h, crtc_w, crtc_h;
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
unsigned int cpp = fb->format->cpp[0];
@@ -1358,7 +1536,6 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
crtc_h = drm_rect_height(dst);
src_x = src->x1 >> 16;
src_y = src->y1 >> 16;
src_w = drm_rect_width(src) >> 16;
src_h = drm_rect_height(src) >> 16;
@@ -1852,52 +2029,6 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
static const u32 icl_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
DRM_FORMAT_Y216,
DRM_FORMAT_XVYU2101010,
DRM_FORMAT_XVYU12_16161616,
DRM_FORMAT_XVYU16161616,
};
static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_XRGB16161616F,
DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_ARGB16161616F,
DRM_FORMAT_ABGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
DRM_FORMAT_Y216,
DRM_FORMAT_XVYU2101010,
DRM_FORMAT_XVYU12_16161616,
DRM_FORMAT_XVYU16161616,
};
static const u32 skl_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
@@ -1933,7 +2064,28 @@ static const u32 glk_planar_formats[] = {
DRM_FORMAT_P016,
};
static const u32 icl_planar_formats[] = {
static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
DRM_FORMAT_Y216,
DRM_FORMAT_XVYU2101010,
DRM_FORMAT_XVYU12_16161616,
DRM_FORMAT_XVYU16161616,
};
static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1958,7 +2110,7 @@ static const u32 icl_planar_formats[] = {
DRM_FORMAT_XVYU16161616,
};
static const u32 icl_hdr_planar_formats[] = {
static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -2201,9 +2353,6 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
if (INTEL_GEN(dev_priv) >= 11)
return plane_id <= PLANE_SPRITE3;
/* Display WA #0870: skl, bxt */
if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
return false;
@@ -2217,6 +2366,48 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
return true;
}
static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id,
int *num_formats)
{
if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
*num_formats = ARRAY_SIZE(skl_planar_formats);
return skl_planar_formats;
} else {
*num_formats = ARRAY_SIZE(skl_plane_formats);
return skl_plane_formats;
}
}
static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id,
int *num_formats)
{
if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
*num_formats = ARRAY_SIZE(glk_planar_formats);
return glk_planar_formats;
} else {
*num_formats = ARRAY_SIZE(skl_plane_formats);
return skl_plane_formats;
}
}
static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id,
int *num_formats)
{
if (icl_is_hdr_plane(dev_priv, plane_id)) {
*num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
return icl_hdr_plane_formats;
} else if (icl_is_nv12_y_plane(plane_id)) {
*num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
return icl_sdr_y_plane_formats;
} else {
*num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats);
return icl_sdr_uv_plane_formats;
}
}
static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2270,30 +2461,15 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
if (icl_is_nv12_y_plane(plane_id))
plane->update_slave = icl_update_slave;
if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
if (icl_is_hdr_plane(dev_priv, plane_id)) {
formats = icl_hdr_planar_formats;
num_formats = ARRAY_SIZE(icl_hdr_planar_formats);
} else if (INTEL_GEN(dev_priv) >= 11) {
formats = icl_planar_formats;
num_formats = ARRAY_SIZE(icl_planar_formats);
} else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) {
formats = glk_planar_formats;
num_formats = ARRAY_SIZE(glk_planar_formats);
} else {
formats = skl_planar_formats;
num_formats = ARRAY_SIZE(skl_planar_formats);
}
} else if (icl_is_hdr_plane(dev_priv, plane_id)) {
formats = icl_hdr_plane_formats;
num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
} else if (INTEL_GEN(dev_priv) >= 11) {
formats = icl_plane_formats;
num_formats = ARRAY_SIZE(icl_plane_formats);
} else {
formats = skl_plane_formats;
num_formats = ARRAY_SIZE(skl_plane_formats);
}
if (INTEL_GEN(dev_priv) >= 11)
formats = icl_get_plane_formats(dev_priv, pipe,
plane_id, &num_formats);
else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
formats = glk_get_plane_formats(dev_priv, pipe,
plane_id, &num_formats);
else
formats = skl_get_plane_formats(dev_priv, pipe,
plane_id, &num_formats);
plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (plane->has_ccs)

View File

@@ -8,7 +8,6 @@
#include <linux/types.h>
#include "i915_drv.h"
#include "intel_display.h"
struct drm_device;
@@ -49,11 +48,6 @@ static inline u8 icl_hdr_plane_mask(void)
BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
}
static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
enum plane_id plane_id)
{
return INTEL_GEN(dev_priv) >= 11 &&
icl_hdr_plane_mask() & BIT(plane_id);
}
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
#endif /* __INTEL_SPRITE_H__ */

View File

@@ -0,0 +1,544 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_dp_mst.h"
#include "intel_tc.h"
static const char *tc_port_mode_name(enum tc_port_mode mode)
{
static const char * const names[] = {
[TC_PORT_TBT_ALT] = "tbt-alt",
[TC_PORT_DP_ALT] = "dp-alt",
[TC_PORT_LEGACY] = "legacy",
};
if (WARN_ON(mode >= ARRAY_SIZE(names)))
mode = TC_PORT_TBT_ALT;
return names[mode];
}
static bool has_modular_fia(struct drm_i915_private *i915)
{
if (!INTEL_INFO(i915)->display.has_modular_fia)
return false;
return intel_uncore_read(&i915->uncore,
PORT_TX_DFLEXDPSP(FIA1)) & MODULAR_FIA_MASK;
}
static enum phy_fia tc_port_to_fia(struct drm_i915_private *i915,
enum tc_port tc_port)
{
if (!has_modular_fia(i915))
return FIA1;
/*
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
* than two TC ports, there are multiple instances of Modular FIA.
*/
return tc_port / 2;
}
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 lane_mask;
lane_mask = intel_uncore_read(uncore,
PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
WARN_ON(lane_mask == 0xffffffff);
return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
DP_LANE_ASSIGNMENT_SHIFT(tc_port);
}
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
intel_wakeref_t wakeref;
u32 lane_mask;
if (dig_port->tc_mode != TC_PORT_DP_ALT)
return 4;
lane_mask = 0;
with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
lane_mask = intel_tc_port_get_lane_mask(dig_port);
switch (lane_mask) {
default:
MISSING_CASE(lane_mask);
/* fall-through */
case 0x1:
case 0x2:
case 0x4:
case 0x8:
return 1;
case 0x3:
case 0xc:
return 2;
case 0xf:
return 4;
}
}
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
struct intel_uncore *uncore = &i915->uncore;
u32 val;
WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
switch (required_lanes) {
case 1:
val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
DFLEXDPMLE1_DPMLETC_ML0(tc_port);
break;
case 2:
val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
break;
case 4:
val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
break;
default:
MISSING_CASE(required_lanes);
}
intel_uncore_write(uncore,
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
}
static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
u32 live_status_mask)
{
u32 valid_hpd_mask;
if (dig_port->tc_legacy_port)
valid_hpd_mask = BIT(TC_PORT_LEGACY);
else
valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
BIT(TC_PORT_TBT_ALT);
if (!(live_status_mask & ~valid_hpd_mask))
return;
/* If live status mismatches the VBT flag, trust the live status. */
DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
dig_port->tc_port_name, live_status_mask);
dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
}
static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 mask = 0;
u32 val;
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n",
dig_port->tc_port_name);
return mask;
}
if (val & TC_LIVE_STATE_TBT(tc_port))
mask |= BIT(TC_PORT_TBT_ALT);
if (val & TC_LIVE_STATE_TC(tc_port))
mask |= BIT(TC_PORT_DP_ALT);
if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
mask |= BIT(TC_PORT_LEGACY);
/* The sink can be connected only in a single mode. */
if (!WARN_ON(hweight32(mask) > 1))
tc_port_fixup_legacy_flag(dig_port, mask);
return mask;
}
static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n",
dig_port->tc_port_name);
return false;
}
return val & DP_PHY_MODE_STATUS_COMPLETED(tc_port);
}
static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
bool enable)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
dig_port->tc_port_name,
enableddisabled(enable));
return false;
}
val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
if (!enable)
val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
intel_uncore_write(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n",
dig_port->tc_port_name);
return true;
}
static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n",
dig_port->tc_port_name);
return true;
}
return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port));
}
/*
* This function implements the first part of the Connect Flow described by our
* specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
* lanes, EDID, etc) is done as needed in the typical places.
*
* Unlike the other ports, type-C ports are not available to use as soon as we
* get a hotplug. The type-C PHYs can be shared between multiple controllers:
* display, USB, etc. As a result, handshaking through FIA is required around
* connect and disconnect to cleanly transfer ownership with the controller and
* set the type-C power state.
*/
static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
int required_lanes)
{
int max_lanes;
if (!icl_tc_phy_status_complete(dig_port)) {
DRM_DEBUG_KMS("Port %s: PHY not ready\n",
dig_port->tc_port_name);
goto out_set_tbt_alt_mode;
}
if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
!WARN_ON(dig_port->tc_legacy_port))
goto out_set_tbt_alt_mode;
max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
if (dig_port->tc_legacy_port) {
WARN_ON(max_lanes != 4);
dig_port->tc_mode = TC_PORT_LEGACY;
return;
}
/*
* Now we have to re-check the live state, in case the port recently
* became disconnected. Not necessary for legacy mode.
*/
if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
dig_port->tc_port_name);
goto out_set_safe_mode;
}
if (max_lanes < required_lanes) {
DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
dig_port->tc_port_name,
max_lanes, required_lanes);
goto out_set_safe_mode;
}
dig_port->tc_mode = TC_PORT_DP_ALT;
return;
out_set_safe_mode:
icl_tc_phy_set_safe_mode(dig_port, true);
out_set_tbt_alt_mode:
dig_port->tc_mode = TC_PORT_TBT_ALT;
}
/*
* See the comment at the connect function. This implements the Disconnect
* Flow.
*/
static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
{
switch (dig_port->tc_mode) {
case TC_PORT_LEGACY:
/* Nothing to do, we never disconnect from legacy mode */
break;
case TC_PORT_DP_ALT:
icl_tc_phy_set_safe_mode(dig_port, true);
dig_port->tc_mode = TC_PORT_TBT_ALT;
break;
case TC_PORT_TBT_ALT:
/* Nothing to do, we stay in TBT-alt mode */
break;
default:
MISSING_CASE(dig_port->tc_mode);
}
}
static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
{
if (!icl_tc_phy_status_complete(dig_port)) {
DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
dig_port->tc_port_name);
return dig_port->tc_mode == TC_PORT_TBT_ALT;
}
if (icl_tc_phy_is_in_safe_mode(dig_port)) {
DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
dig_port->tc_port_name);
return false;
}
return dig_port->tc_mode == TC_PORT_DP_ALT ||
dig_port->tc_mode == TC_PORT_LEGACY;
}
static enum tc_port_mode
intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
u32 live_status_mask = tc_port_live_status_mask(dig_port);
bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
enum tc_port_mode mode;
if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port)))
return TC_PORT_TBT_ALT;
mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
if (live_status_mask) {
enum tc_port_mode live_mode = fls(live_status_mask) - 1;
if (!WARN_ON(live_mode == TC_PORT_TBT_ALT))
mode = live_mode;
}
return mode;
}
static enum tc_port_mode
intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
{
u32 live_status_mask = tc_port_live_status_mask(dig_port);
if (live_status_mask)
return fls(live_status_mask) - 1;
return icl_tc_phy_status_complete(dig_port) &&
dig_port->tc_legacy_port ? TC_PORT_LEGACY :
TC_PORT_TBT_ALT;
}
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
int required_lanes)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
intel_display_power_flush_work(i915);
WARN_ON(intel_display_power_is_enabled(i915,
intel_aux_power_domain(dig_port)));
icl_tc_phy_disconnect(dig_port);
icl_tc_phy_connect(dig_port, required_lanes);
DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n",
dig_port->tc_port_name,
tc_port_mode_name(old_tc_mode),
tc_port_mode_name(dig_port->tc_mode));
}
static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
int refcount)
{
WARN_ON(dig_port->tc_link_refcount);
dig_port->tc_link_refcount = refcount;
}
void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
struct intel_encoder *encoder = &dig_port->base;
int active_links = 0;
mutex_lock(&dig_port->tc_lock);
dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
if (dig_port->dp.is_mst)
active_links = intel_dp_mst_encoder_active_links(dig_port);
else if (encoder->base.crtc)
active_links = to_intel_crtc(encoder->base.crtc)->active;
if (active_links) {
if (!icl_tc_phy_is_connected(dig_port))
DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
dig_port->tc_port_name, active_links);
intel_tc_port_link_init_refcount(dig_port, active_links);
goto out;
}
if (dig_port->tc_legacy_port)
icl_tc_phy_connect(dig_port, 1);
out:
DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
dig_port->tc_port_name,
tc_port_mode_name(dig_port->tc_mode));
mutex_unlock(&dig_port->tc_lock);
}
static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
{
return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
}
/*
* The type-C ports are different because even when they are connected, they may
* not be available/usable by the graphics driver: see the comment on
* icl_tc_phy_connect(). So in our driver instead of adding the additional
* concept of "usable" and make everything check for "connected and usable" we
* define a port as "connected" when it is not only connected, but also when it
* is usable by the rest of the driver. That maintains the old assumption that
* connected ports are usable, and avoids exposing to the users objects they
* can't really use.
*/
bool intel_tc_port_connected(struct intel_digital_port *dig_port)
{
bool is_connected;
intel_tc_port_lock(dig_port);
is_connected = tc_port_live_status_mask(dig_port) &
BIT(dig_port->tc_mode);
intel_tc_port_unlock(dig_port);
return is_connected;
}
static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
int required_lanes)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
intel_wakeref_t wakeref;
wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
mutex_lock(&dig_port->tc_lock);
if (!dig_port->tc_link_refcount &&
intel_tc_port_needs_reset(dig_port))
intel_tc_port_reset_mode(dig_port, required_lanes);
WARN_ON(dig_port->tc_lock_wakeref);
dig_port->tc_lock_wakeref = wakeref;
}
void intel_tc_port_lock(struct intel_digital_port *dig_port)
{
__intel_tc_port_lock(dig_port, 1);
}
void intel_tc_port_unlock(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);
mutex_unlock(&dig_port->tc_lock);
intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
wakeref);
}
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
{
return mutex_is_locked(&dig_port->tc_lock) ||
dig_port->tc_link_refcount;
}
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes)
{
__intel_tc_port_lock(dig_port, required_lanes);
dig_port->tc_link_refcount++;
intel_tc_port_unlock(dig_port);
}
void intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
mutex_lock(&dig_port->tc_lock);
dig_port->tc_link_refcount--;
mutex_unlock(&dig_port->tc_lock);
}
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(i915, port);
if (WARN_ON(tc_port == PORT_TC_NONE))
return;
snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
"%c/TC#%d", port_name(port), tc_port + 1);
mutex_init(&dig_port->tc_lock);
dig_port->tc_legacy_port = is_legacy;
dig_port->tc_link_refcount = 0;
dig_port->tc_phy_fia = tc_port_to_fia(i915, tc_port);
}

View File

@@ -0,0 +1,30 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_TC_H__
#define __INTEL_TC_H__
#include <linux/mutex.h>
#include <linux/types.h>
struct intel_digital_port;
bool intel_tc_port_connected(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);
void intel_tc_port_sanitize(struct intel_digital_port *dig_port);
void intel_tc_port_lock(struct intel_digital_port *dig_port);
void intel_tc_port_unlock(struct intel_digital_port *dig_port);
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes);
void intel_tc_port_put_link(struct intel_digital_port *dig_port);
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port);
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy);
#endif /* __INTEL_TC_H__ */

View File

@@ -37,7 +37,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
#include "intel_tv.h"

View File

@@ -310,13 +310,13 @@ enum vbt_gmbus_ddi {
DDC_BUS_DDI_F,
ICL_DDC_BUS_DDI_A = 0x1,
ICL_DDC_BUS_DDI_B,
TGL_DDC_BUS_DDI_C,
ICL_DDC_BUS_PORT_1 = 0x4,
ICL_DDC_BUS_PORT_2,
ICL_DDC_BUS_PORT_3,
ICL_DDC_BUS_PORT_4,
MCC_DDC_BUS_DDI_A = 0x1,
MCC_DDC_BUS_DDI_B,
MCC_DDC_BUS_DDI_C = 0x4,
TGL_DDC_BUS_PORT_5,
TGL_DDC_BUS_PORT_6,
};
#define DP_AUX_A 0x40

View File

@@ -9,7 +9,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_vdsc.h"
enum ROW_INDEX_BPP {
@@ -459,17 +459,23 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/*
* On ICL VDSC/joining for eDP transcoder uses a separate power well PW2
* This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain.
* On ICL VDSC/joining for eDP transcoder uses a separate power well,
* PW2. This requires POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain.
* For any other transcoder, VDSC/joining uses the power well associated
* with the pipe/transcoder in use. Hence another reference on the
* transcoder power domain will suffice.
*
* On TGL we have the same mapping, but for transcoder A (the special
* TRANSCODER_EDP is gone).
*/
if (cpu_transcoder == TRANSCODER_EDP)
return POWER_DOMAIN_TRANSCODER_EDP_VDSC;
if (INTEL_GEN(i915) >= 12 && cpu_transcoder == TRANSCODER_A)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
else if (cpu_transcoder == TRANSCODER_EDP)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
else
return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
}

View File

@@ -34,7 +34,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_panel.h"
@@ -84,9 +84,8 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port), mask, mask,
100))
if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
mask, 100))
DRM_ERROR("DPI FIFOs are not empty\n");
}
@@ -154,10 +153,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
if (packet.payload_length) {
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port),
data_mask, 0,
50))
if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
data_mask, 50))
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
write_data(dev_priv, data_reg, packet.payload,
@@ -168,10 +165,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
}
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port),
ctrl_mask, 0,
50)) {
if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
ctrl_mask, 50)) {
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
}
@@ -180,10 +175,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_INTR_STAT(port),
data_mask, data_mask,
50))
if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
data_mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
@@ -240,9 +233,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_INTR_STAT(port), mask, mask,
100))
if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
return 0;
@@ -359,11 +350,8 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED,
GLK_MIPIIO_PORT_POWERED,
20))
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 20))
DRM_ERROR("MIPIO port is powergated\n");
}
@@ -385,11 +373,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY,
GLK_PHY_STATUS_PORT_READY,
20))
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not ON\n");
}
@@ -413,11 +398,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_ULPS_NOT_ACTIVE,
0,
20))
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_ULPS_NOT_ACTIVE, 20))
DRM_ERROR("ULPS not active\n");
/* Exit ULPS */
@@ -440,21 +422,15 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_DATA_LANE_STOP_STATE,
GLK_DATA_LANE_STOP_STATE,
20))
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
GLK_DATA_LANE_STOP_STATE, 20))
DRM_ERROR("Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(&dev_priv->uncore,
BXT_MIPI_PORT_CTRL(port),
AFE_LATCHOUT,
AFE_LATCHOUT,
20))
if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
AFE_LATCHOUT, 20))
DRM_ERROR("D-PHY not entering LP-11 state\n");
}
}
@@ -554,17 +530,15 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 0, 20))
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 0, 20))
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 20))
DRM_ERROR("MIPI IO Port is not powergated\n");
}
}
@@ -583,9 +557,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 0, 20))
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not turning OFF\n");
}
@@ -633,9 +606,8 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
intel_wait_for_register(&dev_priv->uncore,
port_ctrl, AFE_LATCHOUT, 0,
30))
intel_de_wait_for_clear(dev_priv, port_ctrl,
AFE_LATCHOUT, 30))
DRM_ERROR("DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
@@ -1644,7 +1616,7 @@ vlv_dsi_get_panel_orientation(struct intel_connector *connector)
return intel_dsi_get_panel_orientation(connector);
}
static void intel_dsi_add_properties(struct intel_connector *connector)
static void vlv_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1983,7 +1955,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
intel_dsi_add_properties(intel_connector);
vlv_dsi_add_properties(intel_connector);
return;

View File

@@ -28,7 +28,7 @@
#include <linux/kernel.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_sideband.h"
@@ -246,11 +246,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
if (intel_wait_for_register(&dev_priv->uncore,
BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED,
0,
1))
if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1))
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
}
@@ -530,11 +527,8 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
if (intel_wait_for_register(&dev_priv->uncore,
BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED,
BXT_DSI_PLL_LOCKED,
1)) {
if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1)) {
DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
return;
}

View File

@@ -1 +1,5 @@
include $(src)/Makefile.header-test # Extra header tests
# For building individual subdir files on the command line
subdir-ccflags-y += -I$(srctree)/$(src)/..
# Extra header tests
header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h

View File

@@ -1,16 +0,0 @@
# SPDX-License-Identifier: MIT
# Copyright © 2019 Intel Corporation
# Test the headers are compilable as standalone units
header_test := $(notdir $(wildcard $(src)/*.h))
quiet_cmd_header_test = HDRTEST $@
cmd_header_test = echo "\#include \"$(<F)\"" > $@
header_test_%.c: %.h
$(call cmd,header_test)
extra-$(CONFIG_DRM_I915_WERROR) += \
$(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))

View File

@@ -82,7 +82,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
struct reservation_object_list *list;
struct dma_resv_list *list;
unsigned int seq;
int err;
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
* !reservation_object_test_signaled_rcu(obj->resv, true);
* !dma_resv_test_signaled_rcu(obj->resv, true);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/

View File

@@ -8,87 +8,67 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
static DEFINE_SPINLOCK(clflush_lock);
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
struct clflush {
struct dma_fence dma; /* Must be first for dma_fence_free() */
struct i915_sw_fence wait;
struct work_struct work;
struct dma_fence_work base;
struct drm_i915_gem_object *obj;
};
static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
{
return DRIVER_NAME;
}
static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
{
return "clflush";
}
static void i915_clflush_release(struct dma_fence *fence)
{
struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
i915_sw_fence_fini(&clflush->wait);
BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
dma_fence_free(&clflush->dma);
}
static const struct dma_fence_ops i915_clflush_ops = {
.get_driver_name = i915_clflush_get_driver_name,
.get_timeline_name = i915_clflush_get_timeline_name,
.release = i915_clflush_release,
};
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
static void __do_clflush(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages);
intel_fb_obj_flush(obj, ORIGIN_CPU);
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
}
static void i915_clflush_work(struct work_struct *work)
static int clflush_work(struct dma_fence_work *base)
{
struct clflush *clflush = container_of(work, typeof(*clflush), work);
struct drm_i915_gem_object *obj = clflush->obj;
struct clflush *clflush = container_of(base, typeof(*clflush), base);
struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
int err;
if (i915_gem_object_pin_pages(obj)) {
DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
goto out;
}
__i915_do_clflush(obj);
err = i915_gem_object_pin_pages(obj);
if (err)
goto put;
__do_clflush(obj);
i915_gem_object_unpin_pages(obj);
out:
put:
i915_gem_object_put(obj);
dma_fence_signal(&clflush->dma);
dma_fence_put(&clflush->dma);
return err;
}
static int __i915_sw_fence_call
i915_clflush_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
static void clflush_release(struct dma_fence_work *base)
{
struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
struct clflush *clflush = container_of(base, typeof(*clflush), base);
switch (state) {
case FENCE_COMPLETE:
schedule_work(&clflush->work);
break;
if (clflush->obj)
i915_gem_object_put(clflush->obj);
}
case FENCE_FREE:
dma_fence_put(&clflush->dma);
break;
}
static const struct dma_fence_work_ops clflush_ops = {
.name = "clflush",
.work = clflush_work,
.release = clflush_release,
};
return NOTIFY_DONE;
static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
{
struct clflush *clflush;
GEM_BUG_ON(!obj->cache_dirty);
clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
if (!clflush)
return NULL;
dma_fence_work_init(&clflush->base, &clflush_ops);
clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
return clflush;
}
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
@@ -126,33 +106,16 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
clflush = NULL;
if (!(flags & I915_CLFLUSH_SYNC))
clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
clflush = clflush_work_create(obj);
if (clflush) {
GEM_BUG_ON(!obj->cache_dirty);
dma_fence_init(&clflush->dma,
&i915_clflush_ops,
&clflush_lock,
to_i915(obj->base.dev)->mm.unordered_timeline,
0);
i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
clflush->obj = i915_gem_object_get(obj);
INIT_WORK(&clflush->work, i915_clflush_work);
dma_fence_get(&clflush->dma);
i915_sw_fence_await_reservation(&clflush->wait,
obj->base.resv, NULL,
true, I915_FENCE_TIMEOUT,
i915_sw_fence_await_reservation(&clflush->base.chain,
obj->base.resv, NULL, true,
I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
reservation_object_add_excl_fence(obj->base.resv,
&clflush->dma);
i915_sw_fence_commit(&clflush->wait);
dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
dma_fence_work_commit(&clflush->base);
} else if (obj->mm.pages) {
__i915_do_clflush(obj);
__do_clflush(obj);
} else {
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
}

View File

@@ -2,10 +2,13 @@
/*
* Copyright © 2019 Intel Corporation
*/
#include "i915_gem_client_blt.h"
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_pool.h"
#include "i915_gem_client_blt.h"
#include "i915_gem_object_blt.h"
#include "intel_drv.h"
struct i915_sleeve {
struct i915_vma *vma;
@@ -72,7 +75,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
vma->ops = &proxy_vma_ops;
sleeve->vma = vma;
sleeve->obj = i915_gem_object_get(obj);
sleeve->pages = pages;
sleeve->page_sizes = *page_sizes;
@@ -85,7 +87,6 @@ err_free:
static void destroy_sleeve(struct i915_sleeve *sleeve)
{
i915_gem_object_put(sleeve->obj);
kfree(sleeve);
}
@@ -154,21 +155,23 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_private *i915 = w->ce->gem_context->i915;
struct drm_i915_gem_object *obj = w->sleeve->obj;
struct drm_i915_private *i915 = w->ce->engine->i915;
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
struct i915_vma *batch;
int err = w->dma.error;
if (unlikely(err))
goto out_signal;
if (obj->cache_dirty) {
obj->write_domain = 0;
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(w->sleeve->pages);
obj->cache_dirty = false;
}
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
/* XXX: we need to kill this */
mutex_lock(&i915->drm.struct_mutex);
@@ -176,10 +179,16 @@ static void clear_pages_worker(struct work_struct *work)
if (unlikely(err))
goto out_unlock;
rq = i915_request_create(w->ce);
batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_unpin;
}
rq = intel_context_create_request(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unpin;
goto out_batch;
}
/* There's no way the fence has signalled */
@@ -187,20 +196,28 @@ static void clear_pages_worker(struct work_struct *work)
clear_pages_dma_fence_cb))
GEM_BUG_ON(1);
err = intel_emit_vma_mark_active(batch, rq);
if (unlikely(err))
goto out_request;
if (w->ce->engine->emit_init_breadcrumb) {
err = w->ce->engine->emit_init_breadcrumb(rq);
if (unlikely(err))
goto out_request;
}
/* XXX: more feverish nightmares await */
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
/*
* w->dma is already exported via (vma|obj)->resv we need only
* keep track of the GPU activity within this vma/request, and
* propagate the signal from the request to w->dma.
*/
err = i915_active_ref(&vma->active, rq->timeline, rq);
if (err)
goto out_request;
err = intel_emit_vma_fill_blt(rq, vma, w->value);
err = w->ce->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
0);
out_request:
if (unlikely(err)) {
i915_request_skip(rq, err);
@@ -208,6 +225,8 @@ out_request:
}
i915_request_add(rq);
out_batch:
intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
out_unlock:
@@ -248,14 +267,11 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
struct i915_page_sizes *page_sizes,
u32 value)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_gem_context *ctx = ce->gem_context;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct clear_pages_work *work;
struct i915_sleeve *sleeve;
int err;
sleeve = create_sleeve(vm, obj, pages, page_sizes);
sleeve = create_sleeve(ce->vm, obj, pages, page_sizes);
if (IS_ERR(sleeve))
return PTR_ERR(sleeve);
@@ -273,11 +289,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
dma_fence_init(&work->dma,
&clear_pages_work_ops,
&fence_lock,
i915->mm.unordered_timeline,
0);
dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
i915_gem_object_lock(obj);
@@ -288,7 +300,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
if (err < 0) {
dma_fence_set_error(&work->dma, err);
} else {
reservation_object_add_excl_fence(obj->base.resv, &work->dma);
dma_resv_add_excl_fence(obj->base.resv, &work->dma);
err = 0;
}
i915_gem_object_unlock(obj);

View File

@@ -70,6 +70,7 @@
#include <drm/i915_drm.h>
#include "gt/intel_lrc_reg.h"
#include "gt/intel_engine_user.h"
#include "i915_gem_context.h"
#include "i915_globals.h"
@@ -158,7 +159,7 @@ lookup_user_engine(struct i915_gem_context *ctx,
if (!engine)
return ERR_PTR(-EINVAL);
idx = engine->id;
idx = engine->legacy_idx;
} else {
idx = ci->engine_instance;
}
@@ -172,7 +173,9 @@ static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
lockdep_assert_held(&i915->contexts.mutex);
if (INTEL_GEN(i915) >= 11)
if (INTEL_GEN(i915) >= 12)
max = GEN12_MAX_CONTEXT_HW_ID;
else if (INTEL_GEN(i915) >= 11)
max = GEN11_MAX_CONTEXT_HW_ID;
else if (USES_GUC_SUBMISSION(i915))
/*
@@ -278,6 +281,7 @@ static void free_engines_rcu(struct rcu_head *rcu)
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
{
const struct intel_gt *gt = &ctx->i915->gt;
struct intel_engine_cs *engine;
struct i915_gem_engines *e;
enum intel_engine_id id;
@@ -287,7 +291,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
return ERR_PTR(-ENOMEM);
init_rcu_head(&e->rcu);
for_each_engine(engine, ctx->i915, id) {
for_each_engine(engine, gt, id) {
struct intel_context *ce;
ce = intel_context_create(ctx, engine);
@@ -297,8 +301,8 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
}
e->engines[id] = ce;
e->num_engines = id + 1;
}
e->num_engines = id;
return e;
}
@@ -316,7 +320,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
mutex_destroy(&ctx->engines_mutex);
if (ctx->timeline)
i915_timeline_put(ctx->timeline);
intel_timeline_put(ctx->timeline);
kfree(ctx->name);
put_pid(ctx->pid);
@@ -397,30 +401,6 @@ static void context_close(struct i915_gem_context *ctx)
i915_gem_context_put(ctx);
}
static u32 default_desc_template(const struct drm_i915_private *i915,
const struct i915_address_space *vm)
{
u32 address_mode;
u32 desc;
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
if (vm && i915_vm_is_4lvl(vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (IS_GEN(i915, 8))
desc |= GEN8_CTX_L3LLC_COHERENT;
/* TODO: WaDisableLiteRestore when we start using semaphore
* signalling between Command Streamers
* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
*/
return desc;
}
static struct i915_gem_context *
__create_context(struct drm_i915_private *i915)
{
@@ -458,10 +438,6 @@ __create_context(struct drm_i915_private *i915)
i915_gem_context_set_bannable(ctx);
i915_gem_context_set_recoverable(ctx);
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template =
default_desc_template(i915, &i915->mm.aliasing_ppgtt->vm);
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -472,13 +448,34 @@ err_free:
return ERR_PTR(err);
}
static void
context_apply_all(struct i915_gem_context *ctx,
void (*fn)(struct intel_context *ce, void *data),
void *data)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
fn(ce, data);
i915_gem_context_unlock_engines(ctx);
}
static void __apply_ppgtt(struct intel_context *ce, void *vm)
{
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(vm);
}
static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
struct i915_address_space *old = ctx->vm;
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
ctx->vm = i915_vm_get(vm);
ctx->desc_template = default_desc_template(ctx->i915, vm);
context_apply_all(ctx, __apply_ppgtt, vm);
return old;
}
@@ -494,6 +491,29 @@ static void __assign_ppgtt(struct i915_gem_context *ctx,
i915_vm_put(vm);
}
static void __set_timeline(struct intel_timeline **dst,
struct intel_timeline *src)
{
struct intel_timeline *old = *dst;
*dst = src ? intel_timeline_get(src) : NULL;
if (old)
intel_timeline_put(old);
}
static void __apply_timeline(struct intel_context *ce, void *timeline)
{
__set_timeline(&ce->timeline, timeline);
}
static void __assign_timeline(struct i915_gem_context *ctx,
struct intel_timeline *timeline)
{
__set_timeline(&ctx->timeline, timeline);
context_apply_all(ctx, __apply_timeline, timeline);
}
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
{
@@ -528,15 +548,16 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
}
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
struct i915_timeline *timeline;
struct intel_timeline *timeline;
timeline = i915_timeline_create(dev_priv, NULL);
timeline = intel_timeline_create(&dev_priv->gt, NULL);
if (IS_ERR(timeline)) {
context_close(ctx);
return ERR_CAST(timeline);
}
ctx->timeline = timeline;
__assign_timeline(ctx, timeline);
intel_timeline_put(timeline);
}
trace_i915_context_create(ctx);
@@ -544,53 +565,6 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ctx;
}
/**
* i915_gem_context_create_gvt - create a GVT GEM context
* @dev: drm device *
*
* This function is used to create a GVT specific GEM context.
*
* Returns:
* pointer to i915_gem_context on success, error pointer if failed
*
*/
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev)
{
struct i915_gem_context *ctx;
int ret;
if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
return ERR_PTR(-ENODEV);
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ERR_PTR(ret);
ctx = i915_gem_create_context(to_i915(dev), 0);
if (IS_ERR(ctx))
goto out;
ret = i915_gem_context_pin_hw_id(ctx);
if (ret) {
context_close(ctx);
ctx = ERR_PTR(ret);
goto out;
}
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_force_single_submission(ctx);
if (!USES_GUC_SUBMISSION(to_i915(dev)))
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
out:
mutex_unlock(&dev->struct_mutex);
return ctx;
}
static void
destroy_kernel_context(struct i915_gem_context **ctxp)
{
@@ -622,7 +596,6 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
i915_gem_context_clear_bannable(ctx);
ctx->sched.priority = I915_USER_PRIORITY(prio);
ctx->ring_size = PAGE_SIZE;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -644,20 +617,13 @@ static void init_contexts(struct drm_i915_private *i915)
init_llist_head(&i915->contexts.free_list);
}
static bool needs_preempt_context(struct drm_i915_private *i915)
{
return HAS_EXECLISTS(i915);
}
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
/* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
init_contexts(dev_priv);
/* lowest priority; idle task */
@@ -677,15 +643,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
dev_priv->kernel_context = ctx;
/* highest priority; preempting task */
if (needs_preempt_context(dev_priv)) {
ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
if (!IS_ERR(ctx))
dev_priv->preempt_context = ctx;
else
DRM_ERROR("Failed to create preempt context; disabling preemption\n");
}
DRM_DEBUG_DRIVER("%s context support initialized\n",
DRIVER_CAPS(dev_priv)->has_logical_contexts ?
"logical" : "fake");
@@ -696,8 +653,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->drm.struct_mutex);
if (i915->preempt_context)
destroy_kernel_context(&i915->preempt_context);
destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */
@@ -923,8 +878,12 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (!cb)
return -ENOMEM;
i915_active_init(i915, &cb->base, cb_retire);
i915_active_acquire(&cb->base);
i915_active_init(i915, &cb->base, NULL, cb_retire);
err = i915_active_acquire(&cb->base);
if (err) {
kfree(cb);
return err;
}
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq;
@@ -951,7 +910,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (emit)
err = emit(rq, data);
if (err == 0)
err = i915_active_ref(&cb->base, rq->fence.context, rq);
err = i915_active_ref(&cb->base, rq->timeline, rq);
i915_request_add(rq);
if (err)
@@ -1019,7 +978,7 @@ static void set_ppgtt_barrier(void *data)
static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
struct i915_address_space *vm = rq->gem_context->vm;
struct i915_address_space *vm = rq->hw_context->vm;
struct intel_engine_cs *engine = rq->engine;
u32 base = engine->mmio_base;
u32 *cs;
@@ -1128,9 +1087,8 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
set_ppgtt_barrier,
old);
if (err) {
ctx->vm = old;
ctx->desc_template = default_desc_template(ctx->i915, old);
i915_vm_put(vm);
i915_vm_put(__set_ppgtt(ctx, old));
i915_vm_put(old);
}
unlock:
@@ -1187,26 +1145,11 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
if (IS_ERR(rq))
return PTR_ERR(rq);
/* Queue this switch after all other activity by this context. */
ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
if (ret)
goto out_add;
/* Serialise with the remote context */
ret = intel_context_prepare_remote_request(ce, rq);
if (ret == 0)
ret = gen8_emit_rpcs_config(rq, ce, sseu);
/*
* Guarantee context image and the timeline remains pinned until the
* modifying request is retired by setting the ce activity tracker.
*
* But we only need to take one pin on the account of it. Or in other
* words transfer the pinned ce object to tracked active request.
*/
GEM_BUG_ON(i915_active_is_idle(&ce->active));
ret = i915_active_ref(&ce->active, rq->fence.context, rq);
if (ret)
goto out_add;
ret = gen8_emit_rpcs_config(rq, ce, sseu);
out_add:
i915_request_add(rq);
return ret;
}
@@ -1217,7 +1160,7 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
{
int ret;
GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
ret = intel_context_lock_pinned(ce);
if (ret)
@@ -1239,7 +1182,7 @@ unlock:
static int
intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
struct drm_i915_private *i915 = ce->gem_context->i915;
struct drm_i915_private *i915 = ce->engine->i915;
int ret;
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
@@ -1636,6 +1579,7 @@ set_engines(struct i915_gem_context *ctx,
for (n = 0; n < num_engines; n++) {
struct i915_engine_class_instance ci;
struct intel_engine_cs *engine;
struct intel_context *ce;
if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
__free_engines(set.engines, n);
@@ -1658,11 +1602,13 @@ set_engines(struct i915_gem_context *ctx,
return -ENOENT;
}
set.engines->engines[n] = intel_context_create(ctx, engine);
if (!set.engines->engines[n]) {
ce = intel_context_create(ctx, engine);
if (IS_ERR(ce)) {
__free_engines(set.engines, n);
return -ENOMEM;
return PTR_ERR(ce);
}
set.engines->engines[n] = ce;
}
set.engines->num_engines = num_engines;
@@ -1776,7 +1722,7 @@ get_engines(struct i915_gem_context *ctx,
if (e->engines[n]) {
ci.engine_class = e->engines[n]->engine->uabi_class;
ci.engine_instance = e->engines[n]->engine->instance;
ci.engine_instance = e->engines[n]->engine->uabi_instance;
}
if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
@@ -2011,13 +1957,8 @@ unlock:
static int clone_timeline(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
if (src->timeline) {
GEM_BUG_ON(src->timeline == dst->timeline);
if (dst->timeline)
i915_timeline_put(dst->timeline);
dst->timeline = i915_timeline_get(src->timeline);
}
if (src->timeline)
__assign_timeline(dst, src->timeline);
return 0;
}
@@ -2141,7 +2082,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
return -EINVAL;
ret = i915_terminally_wedged(i915);
ret = intel_gt_terminally_wedged(&i915->gt);
if (ret)
return ret;
@@ -2287,8 +2228,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->size = 0;
if (ctx->vm)
args->value = ctx->vm->total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
else if (to_i915(dev)->ggtt.alias)
args->value = to_i915(dev)->ggtt.alias->vm.total;
else
args->value = to_i915(dev)->ggtt.vm.total;
break;

View File

@@ -141,8 +141,6 @@ int i915_gem_context_open(struct drm_i915_private *i915,
void i915_gem_context_close(struct drm_file *file);
void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev);
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -197,12 +195,6 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
mutex_unlock(&ctx->engines_mutex);
}
static inline struct intel_context *
i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx)
{
return i915_gem_context_engines(ctx)->engines[idx];
}
static inline struct intel_context *
i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
{

View File

@@ -26,7 +26,7 @@ struct pid;
struct drm_i915_private;
struct drm_i915_file_private;
struct i915_address_space;
struct i915_timeline;
struct intel_timeline;
struct intel_ring;
struct i915_gem_engines {
@@ -77,7 +77,7 @@ struct i915_gem_context {
struct i915_gem_engines __rcu *engines;
struct mutex engines_mutex; /* guards writes to engines */
struct i915_timeline *timeline;
struct intel_timeline *timeline;
/**
* @vm: unique address space (GTT)
@@ -169,11 +169,6 @@ struct i915_gem_context {
struct i915_sched_attr sched;
/** ring_size: size for allocating the per-engine ring buffer */
u32 ring_size;
/** desc_template: invariant fields for the HW context descriptor */
u32 desc_template;
/** guilty_count: How many times this context has caused a GPU hang. */
atomic_t guilty_count;
/**

View File

@@ -6,7 +6,7 @@
#include <linux/dma-buf.h>
#include <linux/highmem.h>
#include <linux/reservation.h>
#include <linux/dma-resv.h>
#include "i915_drv.h"
#include "i915_gem_object.h"
@@ -204,8 +204,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -222,7 +221,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
return drm_gem_dmabuf_export(dev, &exp_info);
return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)

View File

@@ -221,6 +221,8 @@ restart:
* state and so involves less work.
*/
if (atomic_read(&obj->bind_count)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
/* Before we change the PTE, the GPU must not be accessing it.
* If we wait upon the object, we know that all the bound
* VMA are no longer active.
@@ -232,18 +234,30 @@ restart:
if (ret)
return ret;
if (!HAS_LLC(to_i915(obj->base.dev)) &&
cache_level != I915_CACHE_NONE) {
/* Access to snoopable pages through the GTT is
if (!HAS_LLC(i915) && cache_level != I915_CACHE_NONE) {
intel_wakeref_t wakeref =
intel_runtime_pm_get(&i915->runtime_pm);
/*
* Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force
* userspace to refault in the pages and we can
* then double check if the GTT mapping is still
* valid for that pointer access.
*/
i915_gem_object_release_mmap(obj);
ret = mutex_lock_interruptible(&i915->ggtt.vm.mutex);
if (ret) {
intel_runtime_pm_put(&i915->runtime_pm,
wakeref);
return ret;
}
/* As we no longer need a fence for GTT access,
if (obj->userfault_count)
__i915_gem_object_release_mmap(obj);
/*
* As we no longer need a fence for GTT access,
* we can relinquish it now (and so prevent having
* to steal a fence from someone else on the next
* fence request). Note GPU activity would have
@@ -251,12 +265,17 @@ restart:
* supposed to be linear.
*/
for_each_ggtt_vma(vma, obj) {
ret = i915_vma_put_fence(vma);
ret = i915_vma_revoke_fence(vma);
if (ret)
return ret;
break;
}
mutex_unlock(&i915->ggtt.vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
if (ret)
return ret;
} else {
/* We either have incoherent backing store and
/*
* We either have incoherent backing store and
* so no GTT access or the architecture is fully
* coherent. In such cases, existing GTT mmaps
* ignore the cache bit in the PTE and we can
@@ -551,13 +570,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
static inline enum fb_op_origin
fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
{
return (domain == I915_GEM_DOMAIN_GTT ?
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
@@ -661,9 +673,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
i915_gem_object_unlock(obj);
if (write_domain != 0)
intel_fb_obj_invalidate(obj,
fb_write_origin(obj, write_domain));
if (write_domain)
intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -783,7 +794,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
}
out:
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
obj->mm.dirty = true;
/* return with the pages pinned */
return 0;

View File

@@ -5,7 +5,7 @@
*/
#include <linux/intel-iommu.h>
#include <linux/reservation.h>
#include <linux/dma-resv.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -16,13 +16,15 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "i915_gem_ioctls.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_ioctls.h"
#include "i915_trace.h"
#include "intel_drv.h"
enum {
FORCE_CPU_RELOC = 1,
@@ -222,7 +224,6 @@ struct i915_execbuffer {
struct intel_engine_cs *engine; /** engine to queue the request to */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
struct i915_address_space *vm; /** GTT and vma for the request */
struct i915_request *request; /** our request to build */
struct i915_vma *batch; /** identity of the batch obj/vma */
@@ -696,7 +697,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
case 1:
/* Too fragmented, unbind everything and retry */
err = i915_gem_evict_vm(eb->vm);
err = i915_gem_evict_vm(eb->context->vm);
if (err)
return err;
break;
@@ -724,12 +725,8 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->gem_context = ctx;
if (ctx->vm) {
eb->vm = ctx->vm;
if (ctx->vm)
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
} else {
eb->vm = &eb->i915->ggtt.vm;
}
eb->context_flags = 0;
if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
@@ -738,63 +735,6 @@ static int eb_select_context(struct i915_execbuffer *eb)
return 0;
}
static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
{
struct i915_request *rq;
/*
* Completely unscientific finger-in-the-air estimates for suitable
* maximum user request size (to avoid blocking) and then backoff.
*/
if (intel_ring_update_space(ring) >= PAGE_SIZE)
return NULL;
/*
* Find a request that after waiting upon, there will be at least half
* the ring available. The hysteresis allows us to compete for the
* shared ring and should mean that we sleep less often prior to
* claiming our resources, but not so long that the ring completely
* drains before we can submit our next request.
*/
list_for_each_entry(rq, &ring->request_list, ring_link) {
if (__intel_ring_space(rq->postfix,
ring->emit, ring->size) > ring->size / 2)
break;
}
if (&rq->ring_link == &ring->request_list)
return NULL; /* weird, we will check again later for real */
return i915_request_get(rq);
}
static int eb_wait_for_ring(const struct i915_execbuffer *eb)
{
struct i915_request *rq;
int ret = 0;
/*
* Apply a light amount of backpressure to prevent excessive hogs
* from blocking waiting for space whilst holding struct_mutex and
* keeping all of their resources pinned.
*/
rq = __eb_wait_for_ring(eb->context->ring);
if (rq) {
mutex_unlock(&eb->i915->drm.struct_mutex);
if (i915_request_wait(rq,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT) < 0)
ret = -EINTR;
i915_request_put(rq);
mutex_lock(&eb->i915->drm.struct_mutex);
}
return ret;
}
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
@@ -831,7 +771,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_vma;
}
vma = i915_vma_instance(obj, eb->vm, NULL);
vma = i915_vma_instance(obj, eb->context->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -994,7 +934,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
__i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
i915_gem_object_unpin_map(cache->rq->batch->obj);
i915_gem_chipset_flush(cache->rq->i915);
intel_gt_chipset_flush(cache->rq->engine->gt);
i915_request_add(cache->rq);
cache->rq = NULL;
@@ -1018,11 +958,12 @@ static void reloc_cache_reset(struct reloc_cache *cache)
kunmap_atomic(vaddr);
i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
} else {
wmb();
io_mapping_unmap_atomic((void __iomem *)vaddr);
if (cache->node.allocated) {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
if (cache->node.allocated) {
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
@@ -1077,11 +1018,15 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
void *vaddr;
if (cache->vaddr) {
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
int err;
if (i915_gem_object_is_tiled(obj))
return ERR_PTR(-EINVAL);
if (use_cpu_reloc(cache, obj))
return NULL;
@@ -1093,8 +1038,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONBLOCK |
PIN_NONFAULT);
PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range
@@ -1105,12 +1050,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
err = i915_vma_put_fence(vma);
if (err) {
i915_vma_unpin(vma);
return ERR_PTR(err);
}
cache->node.start = vma->node.start;
cache->node.mm = (void *)vma;
}
@@ -1118,7 +1057,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
wmb();
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@@ -1201,25 +1139,26 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
unsigned int len)
{
struct reloc_cache *cache = &eb->reloc_cache;
struct drm_i915_gem_object *obj;
struct intel_engine_pool_node *pool;
struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
if (IS_ERR(pool))
return PTR_ERR(pool);
cmd = i915_gem_object_pin_map(obj,
cmd = i915_gem_object_pin_map(pool->obj,
cache->has_llc ?
I915_MAP_FORCE_WB :
I915_MAP_FORCE_WC);
i915_gem_object_unpin_pages(obj);
if (IS_ERR(cmd))
return PTR_ERR(cmd);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_pool;
}
batch = i915_vma_instance(obj, vma->vm, NULL);
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_unmap;
@@ -1235,6 +1174,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_unpin;
}
err = intel_engine_pool_mark_active(pool, rq);
if (err)
goto err_request;
err = reloc_move_to_gpu(rq, vma);
if (err)
goto err_request;
@@ -1246,8 +1189,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto skip_request;
i915_vma_lock(batch);
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
err = i915_vma_move_to_active(batch, rq, 0);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err)
goto skip_request;
@@ -1260,7 +1204,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
cache->rq_size = 0;
/* Return with batch mapping (cmd) still pinned */
return 0;
goto out_pool;
skip_request:
i915_request_skip(rq, err);
@@ -1269,7 +1213,9 @@ err_request:
err_unpin:
i915_vma_unpin(batch);
err_unmap:
i915_gem_object_unpin_map(obj);
i915_gem_object_unpin_map(pool->obj);
out_pool:
intel_engine_pool_put(pool);
return err;
}
@@ -1317,7 +1263,7 @@ relocate_entry(struct i915_vma *vma,
if (!eb->reloc_cache.vaddr &&
(DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
!reservation_object_test_signaled_rcu(vma->resv, true))) {
!dma_resv_test_signaled_rcu(vma->resv, true))) {
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
u32 *batch;
@@ -1952,7 +1898,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
eb->exec = NULL;
/* Unconditionally flush any chipset caches (for streaming writes). */
i915_gem_chipset_flush(eb->i915);
intel_gt_chipset_flush(eb->engine->gt);
return 0;
err_skip:
@@ -2011,18 +1957,17 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
{
struct drm_i915_gem_object *shadow_batch_obj;
struct intel_engine_pool_node *pool;
struct i915_vma *vma;
int err;
shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
PAGE_ALIGN(eb->batch_len));
if (IS_ERR(shadow_batch_obj))
return ERR_CAST(shadow_batch_obj);
pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
if (IS_ERR(pool))
return ERR_CAST(pool);
err = intel_engine_cmd_parser(eb->engine,
eb->batch->obj,
shadow_batch_obj,
pool->obj,
eb->batch_start_offset,
eb->batch_len,
is_master);
@@ -2031,12 +1976,12 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
vma = NULL;
else
vma = ERR_PTR(err);
goto out;
goto err;
}
vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
goto out;
goto err;
eb->vma[eb->buffer_count] = i915_vma_get(vma);
eb->flags[eb->buffer_count] =
@@ -2044,16 +1989,24 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
vma->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++;
out:
i915_gem_object_unpin_pages(shadow_batch_obj);
vma->private = pool;
return vma;
err:
intel_engine_pool_put(pool);
return vma;
}
static void
add_to_client(struct i915_request *rq, struct drm_file *file)
{
rq->file_priv = file->driver_priv;
list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
struct drm_i915_file_private *file_priv = file->driver_priv;
rq->file_priv = file_priv;
spin_lock(&file_priv->mm.lock);
list_add_tail(&rq->client_link, &file_priv->mm.request_list);
spin_unlock(&file_priv->mm.lock);
}
static int eb_submit(struct i915_execbuffer *eb)
@@ -2093,6 +2046,12 @@ static int eb_submit(struct i915_execbuffer *eb)
return 0;
}
static int num_vcs_engines(const struct drm_i915_private *i915)
{
return hweight64(INTEL_INFO(i915)->engine_mask &
GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
}
/*
* Find one BSD ring to dispatch the corresponding BSD command.
* The engine index is returned.
@@ -2105,8 +2064,8 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine = atomic_fetch_xor(1,
&dev_priv->mm.bsd_engine_dispatch_index);
file_priv->bsd_engine =
get_random_int() % num_vcs_engines(dev_priv);
return file_priv->bsd_engine;
}
@@ -2119,15 +2078,80 @@ static const enum intel_engine_id user_ring_map[] = {
[I915_EXEC_VEBOX] = VECS0
};
static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
static struct i915_request *eb_throttle(struct intel_context *ce)
{
struct intel_ring *ring = ce->ring;
struct intel_timeline *tl = ce->timeline;
struct i915_request *rq;
/*
* Completely unscientific finger-in-the-air estimates for suitable
* maximum user request size (to avoid blocking) and then backoff.
*/
if (intel_ring_update_space(ring) >= PAGE_SIZE)
return NULL;
/*
* Find a request that after waiting upon, there will be at least half
* the ring available. The hysteresis allows us to compete for the
* shared ring and should mean that we sleep less often prior to
* claiming our resources, but not so long that the ring completely
* drains before we can submit our next request.
*/
list_for_each_entry(rq, &tl->requests, link) {
if (rq->ring != ring)
continue;
if (__intel_ring_space(rq->postfix,
ring->emit, ring->size) > ring->size / 2)
break;
}
if (&rq->link == &tl->requests)
return NULL; /* weird, we will check again later for real */
return i915_request_get(rq);
}
static int
__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
{
int err;
if (likely(atomic_inc_not_zero(&ce->pin_count)))
return 0;
err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
if (err)
return err;
err = __intel_context_do_pin(ce);
mutex_unlock(&eb->i915->drm.struct_mutex);
return err;
}
static void
__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
{
if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
return;
mutex_lock(&eb->i915->drm.struct_mutex);
intel_context_unpin(ce);
mutex_unlock(&eb->i915->drm.struct_mutex);
}
static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
{
struct intel_timeline *tl;
struct i915_request *rq;
int err;
/*
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
err = i915_terminally_wedged(eb->i915);
err = intel_gt_terminally_wedged(ce->engine->gt);
if (err)
return err;
@@ -2136,18 +2160,64 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
err = intel_context_pin(ce);
err = __eb_pin_context(eb, ce);
if (err)
return err;
/*
* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
* process, and require the engine to be kept awake between accesses.
* Upon dispatch, we acquire another prolonged wakeref that we hold
* until the timeline is idle, which in turn releases the wakeref
* taken on the engine, and the parent device.
*/
tl = intel_context_timeline_lock(ce);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto err_unpin;
}
intel_context_enter(ce);
rq = eb_throttle(ce);
intel_context_timeline_unlock(tl);
if (rq) {
if (i915_request_wait(rq,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT) < 0) {
i915_request_put(rq);
err = -EINTR;
goto err_exit;
}
i915_request_put(rq);
}
eb->engine = ce->engine;
eb->context = ce;
return 0;
err_exit:
mutex_lock(&tl->mutex);
intel_context_exit(ce);
intel_context_timeline_unlock(tl);
err_unpin:
__eb_unpin_context(eb, ce);
return err;
}
static void eb_unpin_context(struct i915_execbuffer *eb)
static void eb_unpin_engine(struct i915_execbuffer *eb)
{
intel_context_unpin(eb->context);
struct intel_context *ce = eb->context;
struct intel_timeline *tl = ce->timeline;
mutex_lock(&tl->mutex);
intel_context_exit(ce);
mutex_unlock(&tl->mutex);
__eb_unpin_context(eb, ce);
}
static unsigned int
@@ -2165,7 +2235,7 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
return -1;
}
if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) {
if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
@@ -2192,9 +2262,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
}
static int
eb_select_engine(struct i915_execbuffer *eb,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args)
eb_pin_engine(struct i915_execbuffer *eb,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args)
{
struct intel_context *ce;
unsigned int idx;
@@ -2209,7 +2279,7 @@ eb_select_engine(struct i915_execbuffer *eb,
if (IS_ERR(ce))
return PTR_ERR(ce);
err = eb_pin_context(eb, ce);
err = __eb_pin_engine(eb, ce);
intel_context_put(ce);
return err;
@@ -2427,25 +2497,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_destroy;
/*
* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
* process. Upon first dispatch, we acquire another prolonged
* wakeref that we hold until the GPU has been idle for at least
* 100ms.
*/
intel_gt_pm_get(eb.i915);
err = eb_pin_engine(&eb, file, args);
if (unlikely(err))
goto err_context;
err = i915_mutex_lock_interruptible(dev);
if (err)
goto err_rpm;
err = eb_select_engine(&eb, file, args);
if (unlikely(err))
goto err_unlock;
err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
if (unlikely(err))
goto err_engine;
err = eb_relocate(&eb);
@@ -2572,6 +2629,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* to explicitly hold another reference here.
*/
eb.request->batch = eb.batch;
if (eb.batch->private)
intel_engine_pool_mark_active(eb.batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
@@ -2596,15 +2655,15 @@ err_request:
err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)
i915_vma_unpin(eb.batch);
if (eb.batch->private)
intel_engine_pool_put(eb.batch->private);
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
err_engine:
eb_unpin_context(&eb);
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_gt_pm_put(eb.i915);
err_engine:
eb_unpin_engine(&eb);
err_context:
i915_gem_context_put(eb.gem_context);
err_destroy:
eb_destroy(&eb);

View File

@@ -69,8 +69,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
i915_sw_fence_init(&stub->chain, stub_notify);
dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
to_i915(obj->base.dev)->mm.unordered_timeline,
0);
0, 0);
if (i915_sw_fence_await_reservation(&stub->chain,
obj->base.resv, NULL,
@@ -78,7 +77,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
I915_FENCE_GFP) < 0)
goto err;
reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
return &stub->dma;

View File

@@ -7,12 +7,14 @@
#include <linux/mman.h>
#include <linux/sizes.h>
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
#include "i915_trace.h"
#include "i915_vma.h"
#include "intel_drv.h"
static inline bool
__vma_matches(struct vm_area_struct *vma, struct file *filp,
@@ -99,9 +101,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
up_write(&mm->mmap_sem);
if (IS_ERR_VALUE(addr))
goto err;
/* This may race, but that's ok, it only gets set */
WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
}
i915_gem_object_put(obj);
@@ -246,7 +245,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
wakeref = intel_runtime_pm_get(rpm);
srcu = i915_reset_trylock(i915);
srcu = intel_gt_reset_trylock(ggtt->vm.gt);
if (srcu < 0) {
ret = srcu;
goto err_rpm;
@@ -265,15 +264,15 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONBLOCK |
PIN_NONFAULT);
PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT);
if (IS_ERR(vma)) {
/* Use a partial view if it is bigger than available space */
struct i915_ggtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
unsigned int flags;
flags = PIN_MAPPABLE;
flags = PIN_MAPPABLE | PIN_NOSEARCH;
if (view.type == I915_GGTT_VIEW_NORMAL)
flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
@@ -281,10 +280,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
* Userspace is now writing through an untracked VMA, abandon
* all hope that the hardware is able to track future writes.
*/
obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
if (IS_ERR(vma) && !view.type) {
if (IS_ERR(vma)) {
flags = PIN_MAPPABLE;
view.type = I915_GGTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
@@ -308,14 +306,17 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err_fence;
/* Mark as being mmapped into userspace for later revocation */
assert_rpm_wakelock_held(rpm);
/* Mark as being mmapped into userspace for later revocation */
mutex_lock(&i915->ggtt.vm.mutex);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
mutex_unlock(&i915->ggtt.vm.mutex);
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
GEM_BUG_ON(!obj->userfault_count);
i915_vma_set_ggtt_write(vma);
@@ -326,7 +327,7 @@ err_unpin:
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_reset:
i915_reset_unlock(i915, srcu);
intel_gt_reset_unlock(ggtt->vm.gt, srcu);
err_rpm:
intel_runtime_pm_put(rpm, wakeref);
i915_gem_object_unpin_pages(obj);
@@ -339,7 +340,7 @@ err:
* fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
if (!i915_terminally_wedged(i915))
if (!intel_gt_is_wedged(ggtt->vm.gt))
return VM_FAULT_SIGBUS;
/* else, fall through */
case -EAGAIN:
@@ -410,8 +411,8 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
* requirement that operations to the GGTT be made holding the RPM
* wakeref.
*/
lockdep_assert_held(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
mutex_lock(&i915->ggtt.vm.mutex);
if (!obj->userfault_count)
goto out;
@@ -428,6 +429,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
wmb();
out:
mutex_unlock(&i915->ggtt.vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}

View File

@@ -23,12 +23,13 @@
*/
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
#include "i915_trace.h"
static struct i915_global_object {
struct i915_global base;
@@ -45,16 +46,6 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
return kmem_cache_free(global.slab_objects, obj);
}
static void
frontbuffer_retire(struct i915_active_request *active,
struct i915_request *request)
{
struct drm_i915_gem_object *obj =
container_of(active, typeof(*obj), frontbuffer_write);
intel_fb_obj_flush(obj, ORIGIN_CS);
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
@@ -63,17 +54,14 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
INIT_LIST_HEAD(&obj->mm.link);
INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
init_rcu_head(&obj->rcu);
obj->ops = ops;
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
i915_active_request_init(&obj->frontbuffer_write,
NULL, frontbuffer_retire);
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
@@ -146,6 +134,19 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
}
}
static void __i915_gem_free_object_rcu(struct rcu_head *head)
{
struct drm_i915_gem_object *obj =
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
dma_resv_fini(&obj->base._resv);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
atomic_dec(&i915->mm.free_count);
}
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
@@ -160,7 +161,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
GEM_BUG_ON(i915_gem_object_is_active(obj));
list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
GEM_BUG_ON(i915_vma_is_active(vma));
vma->flags &= ~I915_VMA_PIN_MASK;
@@ -169,110 +169,70 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(!list_empty(&obj->vma.list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
/*
* This serializes freeing with the shrinker. Since the free
* is delayed, first by RCU then by the workqueue, we want the
* shrinker to be able to free pages of unreferenced objects,
* or else we may oom whilst there are plenty of deferred
* freed objects.
*/
if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_shrinkable(obj)) {
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
list_del_init(&obj->mm.link);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list));
if (obj->ops->release)
obj->ops->release(obj);
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
bitmap_free(obj->bit_17);
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
drm_gem_object_release(&obj->base);
drm_gem_free_mmap_offset(&obj->base);
bitmap_free(obj->bit_17);
i915_gem_object_free(obj);
if (obj->ops->release)
obj->ops->release(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
atomic_dec(&i915->mm.free_count);
cond_resched();
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
struct llist_node *freed;
struct llist_node *freed = llist_del_all(&i915->mm.free_list);
/* Free the oldest, most stale object to keep the free_list short */
freed = NULL;
if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
/* Only one consumer of llist_del_first() allowed */
spin_lock(&i915->mm.free_lock);
freed = llist_del_first(&i915->mm.free_list);
spin_unlock(&i915->mm.free_lock);
}
if (unlikely(freed)) {
freed->next = NULL;
if (unlikely(freed))
__i915_gem_free_objects(i915, freed);
}
}
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, struct drm_i915_private, mm.free_work);
struct llist_node *freed;
/*
* All file-owned VMA should have been released by this point through
* i915_gem_close_object(), or earlier by i915_gem_context_close().
* However, the object may also be bound into the global GTT (e.g.
* older GPUs without per-process support, or for direct access through
* the GTT either for the user or for scanout). Those VMA still need to
* unbound now.
*/
spin_lock(&i915->mm.free_lock);
while ((freed = llist_del_all(&i915->mm.free_list))) {
spin_unlock(&i915->mm.free_lock);
__i915_gem_free_objects(i915, freed);
if (need_resched())
return;
spin_lock(&i915->mm.free_lock);
}
spin_unlock(&i915->mm.free_lock);
i915_gem_flush_free_objects(i915);
}
static void __i915_gem_free_object_rcu(struct rcu_head *head)
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj =
container_of(head, typeof(*obj), rcu);
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
/*
* We reuse obj->rcu for the freed list, so we had better not treat
* it like a rcu_head from this point forwards. And we expect all
* objects to be freed via this path.
* Before we free the object, make sure any pure RCU-only
* read-side critical sections are complete, e.g.
* i915_gem_busy_ioctl(). For the corresponding synchronized
* lookup see i915_gem_object_lookup_rcu().
*/
destroy_rcu_head(&obj->rcu);
atomic_inc(&i915->mm.free_count);
/*
* This serializes freeing with the shrinker. Since the free
* is delayed, first by RCU then by the workqueue, we want the
* shrinker to be able to free pages of unreferenced objects,
* or else we may oom whilst there are plenty of deferred
* freed objects.
*/
i915_gem_object_make_unshrinkable(obj);
/*
* Since we require blocking on struct_mutex to unbind the freed
@@ -288,27 +248,6 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
queue_work(i915->wq, &i915->mm.free_work);
}
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
/*
* Before we free the object, make sure any pure RCU-only
* read-side critical sections are complete, e.g.
* i915_gem_busy_ioctl(). For the corresponding synchronized
* lookup see i915_gem_object_lookup_rcu().
*/
atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}
static inline enum fb_op_origin
fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
{
return (domain == I915_GEM_DOMAIN_GTT ?
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_level == I915_CACHE_NONE ||
@@ -319,7 +258,6 @@ void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
assert_object_held(obj);
@@ -329,10 +267,10 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
i915_gem_flush_ggtt_writes(dev_priv);
for_each_ggtt_vma(vma, obj)
intel_gt_flush_ggtt_writes(vma->vm->gt);
intel_fb_obj_flush(obj,
fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
for_each_ggtt_vma(vma, obj) {
if (vma->iomap)
@@ -340,6 +278,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
i915_vma_unset_ggtt_write(vma);
}
break;
case I915_GEM_DOMAIN_WC:

View File

@@ -81,7 +81,7 @@ i915_gem_object_lookup(struct drm_file *file, u32 handle)
}
__deprecated
extern struct drm_gem_object *
struct drm_gem_object *
drm_gem_object_lookup(struct drm_file *file, u32 handle);
__attribute__((nonnull))
@@ -99,22 +99,22 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
__drm_gem_object_put(&obj->base);
}
#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
reservation_object_lock(obj->base.resv, NULL);
dma_resv_lock(obj->base.resv, NULL);
}
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
return reservation_object_lock_interruptible(obj->base.resv, NULL);
return dma_resv_lock_interruptible(obj->base.resv, NULL);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
reservation_object_unlock(obj->base.resv);
dma_resv_unlock(obj->base.resv);
}
struct dma_fence *
@@ -158,16 +158,10 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
}
static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
{
return READ_ONCE(obj->active_count);
}
static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
return READ_ONCE(obj->framebuffer_references);
return READ_ONCE(obj->frontbuffer);
}
static inline unsigned int
@@ -373,7 +367,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
struct dma_fence *fence;
rcu_read_lock();
fence = reservation_object_get_excl_rcu(obj->base.resv);
fence = dma_resv_get_excl_rcu(obj->base.resv);
rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
@@ -400,6 +394,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
unsigned int flags);
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
if (obj->cache_dirty)

View File

@@ -3,59 +3,136 @@
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
#include "i915_gem_clflush.h"
#include "i915_gem_object_blt.h"
#include "i915_gem_clflush.h"
#include "intel_drv.h"
int intel_emit_vma_fill_blt(struct i915_request *rq,
struct i915_vma *vma,
u32 value)
struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
struct i915_vma *vma,
u32 value)
{
u32 *cs;
struct drm_i915_private *i915 = ce->vm->i915;
const u32 block_size = S16_MAX * PAGE_SIZE;
struct intel_engine_pool_node *pool;
struct i915_vma *batch;
u64 offset;
u64 count;
u64 rem;
u32 size;
u32 *cmd;
int err;
cs = intel_ring_begin(rq, 8);
if (IS_ERR(cs))
return PTR_ERR(cs);
GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
intel_engine_pm_get(ce->engine);
if (INTEL_GEN(rq->i915) >= 8) {
*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
*cs++ = 0;
*cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
*cs++ = lower_32_bits(vma->node.start);
*cs++ = upper_32_bits(vma->node.start);
*cs++ = value;
*cs++ = MI_NOOP;
} else {
*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
*cs++ = 0;
*cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
*cs++ = vma->node.start;
*cs++ = value;
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
count = div_u64(vma->size, block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
pool = intel_engine_pool_get(&ce->engine->pool, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
}
intel_ring_advance(rq, cs);
cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_put;
}
return 0;
rem = vma->size;
offset = vma->node.start;
do {
u32 size = min_t(u64, rem, block_size);
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
if (INTEL_GEN(i915) >= 8) {
*cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
*cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
*cmd++ = 0;
*cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
*cmd++ = lower_32_bits(offset);
*cmd++ = upper_32_bits(offset);
*cmd++ = value;
} else {
*cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
*cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
*cmd++ = 0;
*cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
*cmd++ = offset;
*cmd++ = value;
}
/* Allow ourselves to be preempted in between blocks. */
*cmd++ = MI_ARB_CHECK;
offset += size;
rem -= size;
} while (rem);
*cmd = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(ce->vm->gt);
i915_gem_object_unpin_map(pool->obj);
batch = i915_vma_instance(pool->obj, ce->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_put;
}
err = i915_vma_pin(batch, 0, 0, PIN_USER);
if (unlikely(err))
goto out_put;
batch->private = pool;
return batch;
out_put:
intel_engine_pool_put(pool);
out_pm:
intel_engine_pm_put(ce->engine);
return ERR_PTR(err);
}
int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
{
int err;
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
if (unlikely(err))
return err;
return intel_engine_pool_mark_active(vma->private, rq);
}
void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
{
i915_vma_unpin(vma);
intel_engine_pool_put(vma->private);
intel_engine_pm_put(ce->engine);
}
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_gem_context *ctx = ce->gem_context;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_request *rq;
struct i915_vma *batch;
struct i915_vma *vma;
int err;
/* XXX: ce->vm please */
vma = i915_vma_instance(obj, vm, NULL);
vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -69,12 +146,22 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
i915_gem_object_unlock(obj);
}
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
batch = intel_emit_vma_fill_blt(ce, vma, value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_unpin;
}
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
}
err = intel_emit_vma_mark_active(batch, rq);
if (unlikely(err))
goto out_request;
err = i915_request_await_object(rq, obj, true);
if (unlikely(err))
goto out_request;
@@ -86,22 +173,229 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
}
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (unlikely(err))
goto out_request;
err = intel_emit_vma_fill_blt(rq, vma, value);
err = ce->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
0);
out_request:
if (unlikely(err))
i915_request_skip(rq, err);
i915_request_add(rq);
out_batch:
intel_emit_vma_release(ce, batch);
out_unpin:
i915_vma_unpin(vma);
return err;
}
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
struct i915_vma *src,
struct i915_vma *dst)
{
struct drm_i915_private *i915 = ce->vm->i915;
const u32 block_size = S16_MAX * PAGE_SIZE;
struct intel_engine_pool_node *pool;
struct i915_vma *batch;
u64 src_offset, dst_offset;
u64 count, rem;
u32 size, *cmd;
int err;
GEM_BUG_ON(src->size != dst->size);
GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
intel_engine_pm_get(ce->engine);
count = div_u64(dst->size, block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
pool = intel_engine_pool_get(&ce->engine->pool, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
}
cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_put;
}
rem = src->size;
src_offset = src->node.start;
dst_offset = dst->node.start;
do {
size = min_t(u64, rem, block_size);
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
if (INTEL_GEN(i915) >= 9) {
*cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
*cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
*cmd++ = 0;
*cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
*cmd++ = lower_32_bits(dst_offset);
*cmd++ = upper_32_bits(dst_offset);
*cmd++ = 0;
*cmd++ = PAGE_SIZE;
*cmd++ = lower_32_bits(src_offset);
*cmd++ = upper_32_bits(src_offset);
} else if (INTEL_GEN(i915) >= 8) {
*cmd++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
*cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
*cmd++ = 0;
*cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
*cmd++ = lower_32_bits(dst_offset);
*cmd++ = upper_32_bits(dst_offset);
*cmd++ = 0;
*cmd++ = PAGE_SIZE;
*cmd++ = lower_32_bits(src_offset);
*cmd++ = upper_32_bits(src_offset);
} else {
*cmd++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
*cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
*cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
*cmd++ = dst_offset;
*cmd++ = PAGE_SIZE;
*cmd++ = src_offset;
}
/* Allow ourselves to be preempted in between blocks. */
*cmd++ = MI_ARB_CHECK;
src_offset += size;
dst_offset += size;
rem -= size;
} while (rem);
*cmd = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(ce->vm->gt);
i915_gem_object_unpin_map(pool->obj);
batch = i915_vma_instance(pool->obj, ce->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_put;
}
err = i915_vma_pin(batch, 0, 0, PIN_USER);
if (unlikely(err))
goto out_put;
batch->private = pool;
return batch;
out_put:
intel_engine_pool_put(pool);
out_pm:
intel_engine_pm_put(ce->engine);
return ERR_PTR(err);
}
static int move_to_gpu(struct i915_vma *vma, struct i915_request *rq, bool write)
{
struct drm_i915_gem_object *obj = vma->obj;
if (obj->cache_dirty & ~obj->cache_coherent)
i915_gem_clflush_object(obj, 0);
return i915_request_await_object(rq, obj, write);
}
int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
struct drm_i915_gem_object *dst,
struct intel_context *ce)
{
struct drm_gem_object *objs[] = { &src->base, &dst->base };
struct i915_address_space *vm = ce->vm;
struct i915_vma *vma[2], *batch;
struct ww_acquire_ctx acquire;
struct i915_request *rq;
int err, i;
vma[0] = i915_vma_instance(src, vm, NULL);
if (IS_ERR(vma[0]))
return PTR_ERR(vma[0]);
err = i915_vma_pin(vma[0], 0, 0, PIN_USER);
if (unlikely(err))
return err;
vma[1] = i915_vma_instance(dst, vm, NULL);
if (IS_ERR(vma[1]))
goto out_unpin_src;
err = i915_vma_pin(vma[1], 0, 0, PIN_USER);
if (unlikely(err))
goto out_unpin_src;
batch = intel_emit_vma_copy_blt(ce, vma[0], vma[1]);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_unpin_dst;
}
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
}
err = intel_emit_vma_mark_active(batch, rq);
if (unlikely(err))
goto out_request;
err = drm_gem_lock_reservations(objs, ARRAY_SIZE(objs), &acquire);
if (unlikely(err))
goto out_request;
for (i = 0; i < ARRAY_SIZE(vma); i++) {
err = move_to_gpu(vma[i], rq, i);
if (unlikely(err))
goto out_unlock;
}
for (i = 0; i < ARRAY_SIZE(vma); i++) {
unsigned int flags = i ? EXEC_OBJECT_WRITE : 0;
err = i915_vma_move_to_active(vma[i], rq, flags);
if (unlikely(err))
goto out_unlock;
}
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
if (unlikely(err))
goto out_unlock;
}
err = rq->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
0);
out_unlock:
drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
out_request:
if (unlikely(err))
i915_request_skip(rq, err);
i915_request_add(rq);
out_batch:
intel_emit_vma_release(ce, batch);
out_unpin_dst:
i915_vma_unpin(vma[1]);
out_unpin_src:
i915_vma_unpin(vma[0]);
return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_gem_object_blt.c"
#endif

View File

@@ -8,17 +8,30 @@
#include <linux/types.h>
struct drm_i915_gem_object;
struct intel_context;
struct i915_request;
struct i915_vma;
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_pool.h"
#include "i915_vma.h"
int intel_emit_vma_fill_blt(struct i915_request *rq,
struct i915_vma *vma,
u32 value);
struct drm_i915_gem_object;
struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
struct i915_vma *vma,
u32 value);
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
struct i915_vma *src,
struct i915_vma *dst);
int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq);
void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma);
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value);
int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
struct drm_i915_gem_object *dst,
struct intel_context *ce);
#endif

View File

@@ -13,6 +13,7 @@
#include "i915_selftest.h"
struct drm_i915_gem_object;
struct intel_fronbuffer;
/*
* struct i915_lut_handle tracks the fast lookups from handle to vma used
@@ -114,7 +115,6 @@ struct drm_i915_gem_object {
unsigned int userfault_count;
struct list_head userfault_link;
struct list_head batch_pool_link;
I915_SELFTEST_DECLARE(struct list_head st_link);
/*
@@ -142,9 +142,7 @@ struct drm_i915_gem_object {
*/
u16 write_domain;
atomic_t frontbuffer_bits;
unsigned int frontbuffer_ggtt_origin; /* write once */
struct i915_active_request frontbuffer_write;
struct intel_frontbuffer *frontbuffer;
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride;
@@ -154,7 +152,6 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
atomic_t bind_count;
unsigned int active_count;
/** Count of how many global VMA are currently pinned for use by HW */
unsigned int pin_global;
@@ -226,9 +223,6 @@ struct drm_i915_gem_object {
bool quirked:1;
} mm;
/** References from framebuffers, locks out tiling changes. */
unsigned int framebuffer_references;
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;

View File

@@ -153,24 +153,13 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
pages = fetch_and_zero(&obj->mm.pages);
if (IS_ERR_OR_NULL(pages))
return pages;
if (i915_gem_object_is_shrinkable(obj)) {
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
list_del(&obj->mm.link);
i915->mm.shrink_count--;
i915->mm.shrink_memory -= obj->base.size;
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) {
void *ptr;

View File

@@ -13,6 +13,7 @@
#include <drm/drm_legacy.h> /* for drm_pci.h! */
#include <drm/drm_pci.h>
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
@@ -60,7 +61,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
vaddr += PAGE_SIZE;
}
i915_gem_chipset_flush(to_i915(obj->base.dev));
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) {
@@ -132,16 +133,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
drm_pci_free(obj->base.dev, obj->phys_handle);
}
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
static void phys_release(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_pages(obj);
fput(obj->base.filp);
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
.release = i915_gem_object_release_phys,
.release = phys_release,
};
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
@@ -158,7 +159,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (obj->ops != &i915_gem_shmem_ops)
return -EINVAL;
err = i915_gem_object_unbind(obj);
err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
if (err)
return err;

View File

@@ -5,6 +5,7 @@
*/
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "i915_drv.h"
@@ -33,12 +34,9 @@ static void i915_gem_park(struct drm_i915_private *i915)
lockdep_assert_held(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id) {
for_each_engine(engine, i915, id)
call_idle_barriers(engine); /* cleanup after wedging */
i915_gem_batch_pool_fini(&engine->batch_pool);
}
i915_timelines_park(i915);
i915_vma_parked(i915);
i915_globals_park();
@@ -54,7 +52,8 @@ static void idle_work_handler(struct work_struct *work)
mutex_lock(&i915->drm.struct_mutex);
intel_wakeref_lock(&i915->gt.wakeref);
park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work);
park = (!intel_wakeref_is_active(&i915->gt.wakeref) &&
!work_pending(work));
intel_wakeref_unlock(&i915->gt.wakeref);
if (park)
i915_gem_park(i915);
@@ -105,18 +104,18 @@ static int pm_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
static bool switch_to_kernel_context_sync(struct intel_gt *gt)
{
bool result = !i915_terminally_wedged(i915);
bool result = !intel_gt_is_wedged(gt);
do {
if (i915_gem_wait_for_idle(i915,
if (i915_gem_wait_for_idle(gt->i915,
I915_WAIT_LOCKED |
I915_WAIT_FOR_IDLE_BOOST,
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* XXX hide warning from gem_eio */
if (i915_modparams.reset) {
dev_err(i915->drm.dev,
dev_err(gt->i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
GEM_TRACE_DUMP();
}
@@ -125,18 +124,20 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
* Forcibly cancel outstanding work and leave
* the gpu quiet.
*/
i915_gem_set_wedged(i915);
intel_gt_set_wedged(gt);
result = false;
}
} while (i915_retire_requests(i915) && result);
} while (i915_retire_requests(gt->i915) && result);
if (intel_gt_pm_wait_for_idle(gt))
result = false;
GEM_BUG_ON(i915->gt.awake);
return result;
}
bool i915_gem_load_power_context(struct drm_i915_private *i915)
{
return switch_to_kernel_context_sync(i915);
return switch_to_kernel_context_sync(&i915->gt);
}
void i915_gem_suspend(struct drm_i915_private *i915)
@@ -157,22 +158,15 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
switch_to_kernel_context_sync(i915);
switch_to_kernel_context_sync(&i915->gt);
mutex_unlock(&i915->drm.struct_mutex);
/*
* Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
GEM_BUG_ON(i915->gt.awake);
flush_work(&i915->gem.idle_work);
cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&i915->gt.hangcheck.work);
i915_gem_drain_freed_objects(i915);
intel_uc_suspend(i915);
intel_uc_suspend(&i915->gt.uc);
}
static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
@@ -237,7 +231,6 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
intel_uc_sanitize(i915);
i915_gem_sanitize(i915);
}
@@ -245,8 +238,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
{
GEM_TRACE("\n");
WARN_ON(i915->gt.awake);
mutex_lock(&i915->drm.struct_mutex);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
@@ -261,10 +252,10 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
if (intel_gt_resume(i915))
if (intel_gt_resume(&i915->gt))
goto err_wedged;
intel_uc_resume(i915);
intel_uc_resume(&i915->gt.uc);
/* Always reload a context for powersaving. */
if (!i915_gem_load_power_context(i915))
@@ -276,10 +267,10 @@ out_unlock:
return;
err_wedged:
if (!i915_reset_failed(i915)) {
if (!intel_gt_is_wedged(&i915->gt)) {
dev_err(i915->drm.dev,
"Failed to re-initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(i915);
intel_gt_set_wedged(&i915->gt);
}
goto out_unlock;
}

View File

@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
/*
* Move pages to appropriate lru and release the pagevec, decrementing the
@@ -414,6 +415,11 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
static void shmem_release(struct drm_i915_gem_object *obj)
{
fput(obj->base.filp);
}
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
@@ -424,6 +430,8 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.writeback = shmem_writeback,
.pwrite = shmem_pwrite,
.release = shmem_release,
};
static int create_shmem(struct drm_i915_private *i915,

View File

@@ -88,10 +88,18 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
}
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
unsigned long shrink)
{
if (i915_gem_object_unbind(obj) == 0)
unsigned long flags;
flags = 0;
if (shrink & I915_SHRINK_ACTIVE)
flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
if (i915_gem_object_unbind(obj, flags) == 0)
__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
return !i915_gem_object_has_pages(obj);
}
@@ -169,7 +177,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
*/
trace_i915_gem_shrink(i915, target, shrink);
i915_retire_requests(i915);
/*
* Unbinding of objects will require HW access; Let us not wake the
@@ -230,8 +237,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
continue;
if (!(shrink & I915_SHRINK_ACTIVE) &&
(i915_gem_object_is_active(obj) ||
i915_gem_object_is_framebuffer(obj)))
i915_gem_object_is_framebuffer(obj))
continue;
if (!(shrink & I915_SHRINK_BOUND) &&
@@ -246,7 +252,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
if (unsafe_drop_pages(obj)) {
if (unsafe_drop_pages(obj, shrink)) {
/* May arrive from get_pages on another bo */
mutex_lock_nested(&obj->mm.lock,
I915_MM_SHRINKER);
@@ -269,8 +275,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
i915_retire_requests(i915);
shrinker_unlock(i915, unlock);
if (nr_scanned)
@@ -427,12 +431,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
if (!shrinker_lock(i915, 0, &unlock))
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
if (i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT))
goto out;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
@@ -455,20 +453,13 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
}
mutex_unlock(&i915->ggtt.vm.mutex);
out:
shrinker_unlock(i915, unlock);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
/**
* i915_gem_shrinker_register - Register the i915 shrinker
* @i915: i915 device
*
* This function registers and sets up the i915 shrinker and OOM handler.
*/
void i915_gem_shrinker_register(struct drm_i915_private *i915)
void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
{
i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
@@ -483,13 +474,7 @@ void i915_gem_shrinker_register(struct drm_i915_private *i915)
WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
}
/**
* i915_gem_shrinker_unregister - Unregisters the i915 shrinker
* @i915: i915 device
*
* This function unregisters the i915 shrinker and OOM handler.
*/
void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
{
WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
@@ -533,3 +518,61 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
if (unlock)
mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
}
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
{
/*
* We can only be called while the pages are pinned or when
* the pages are released. If pinned, we should only be called
* from a single caller under controlled conditions; and on release
* only one caller may release us. Neither the two may cross.
*/
if (!list_empty(&obj->mm.link)) { /* pinned by caller */
struct drm_i915_private *i915 = obj_to_i915(obj);
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
GEM_BUG_ON(list_empty(&obj->mm.link));
list_del_init(&obj->mm.link);
i915->mm.shrink_count--;
i915->mm.shrink_memory -= obj->base.size;
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
struct list_head *head)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
GEM_BUG_ON(!list_empty(&obj->mm.link));
if (i915_gem_object_is_shrinkable(obj)) {
struct drm_i915_private *i915 = obj_to_i915(obj);
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
GEM_BUG_ON(!kref_read(&obj->base.refcount));
list_add_tail(&obj->mm.link, head);
i915->mm.shrink_count++;
i915->mm.shrink_memory += obj->base.size;
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
{
__i915_gem_object_make_shrinkable(obj,
&obj_to_i915(obj)->mm.shrink_list);
}
void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
{
__i915_gem_object_make_shrinkable(obj,
&obj_to_i915(obj)->mm.purge_list);
}

View File

@@ -0,0 +1,31 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __I915_GEM_SHRINKER_H__
#define __I915_GEM_SHRINKER_H__
#include <linux/bits.h>
struct drm_i915_private;
struct mutex;
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
unsigned flags);
#define I915_SHRINK_UNBOUND BIT(0)
#define I915_SHRINK_BOUND BIT(1)
#define I915_SHRINK_ACTIVE BIT(2)
#define I915_SHRINK_VMAPS BIT(3)
#define I915_SHRINK_WRITEBACK BIT(4)
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_driver_register__shrinker(struct drm_i915_private *i915);
void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915);
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
struct mutex *mutex);
#endif /* __I915_GEM_SHRINKER_H__ */

View File

@@ -11,6 +11,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_stolen.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
@@ -362,12 +363,16 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->mm.stolen_lock);
if (intel_vgpu_active(dev_priv)) {
DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
dev_notice(dev_priv->drm.dev,
"%s, disabling use of stolen memory\n",
"iGVT-g active");
return 0;
}
if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
dev_notice(dev_priv->drm.dev,
"%s, disabling use of stolen memory\n",
"DMAR active");
return 0;
}
@@ -529,8 +534,6 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!stolen);
__i915_gem_object_unpin_pages(obj);
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
}

Some files were not shown because too many files have changed in this diff Show More