Merge tag 'drm-intel-next-2020-04-30' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Driver Changes:

- Fix GitLab #1698: Performance regression with Linux 5.7-rc1 on
  Iris Plus 655 and 4K screen (Chris)
- Add Wa_14011059788 for Tigerlake (Matt A)
- Add per ctx batchbuffer wa for timestamp for Gen12 (Mika)
- Use indirect ctx bb to load cmd buffer control value
  from context image to avoid corruption (Mika)
- Enable DP Display Audio WA (Uma, Jani)
- Update forcewake firmware ranges for Icelake (Radhakrishna)
- Add missing deinitialization cases of load failure for display (Jose)
- Implement TC cold sequences for Icelake and Tigerlake (Jose)
- Unbreak enable_dpcd_backlight modparam (Lyude)
- Move the late flush_submission in retire to the end (Chris)
- Demote "Reducing compressed framebufer size" message to info (Peter)
- Push MST link retraining to the hotplug work (Ville)
- Hold obj->vma.lock over for_each_ggtt_vma() (Chris)
- Fix timeout handling during TypeC AUX power well enabling for ICL (Imre)
- Fix skl+ non-scaled pfit modes (Ville)
- Prefer soft-rc6 over RPS DOWN_TIMEOUT (Chris)
- Sanitize GT first before poisoning HWSP (Chris)
- Fix up clock RPS frequency readout (Chris)
- Avoid reusing the same logical CCID (Chris)
- Avoid dereferencing a dead context (Chris)
- Always enable busy-stats for execlists (Chris)
- Apply the aggressive downclocking to parking (Chris)
- Restore aggressive post-boost downclocking (Chris)

- Scrub execlists state on resume (Chris)
- Add debugfs attributes for LPSP (Ansuman)
- Improvements to kernel selftests (Chris, Mika)
- Add tiled blits selftest (Zbigniew)
- Fix error handling in __live_lrc_indirect_ctx_bb() (Dan)
- Add pre/post plane updates for SAGV (Stanislav)
- Add ICL PG3 PW ID for EHL (Anshuman)
- Fix Sphinx build duplicate label warning (Jani)
- Error log non-zero audio power refcount after unbind (Jani)
- Remove object_is_locked assertion from unpin_from_display_plane (Chris)
- Use single set of AUX powerwell ops for gen11+ (Matt R)
- Prefer drm_WARN_ON over WARN_ON (Pankaj)
- Poison residual state [HWSP] across resume (Chris, Tvrtko)
- Convert request-before-CS assertion to debug (Chris)
- Carefully order virtual_submission_tasklet (Chris)
- Check carefully for an idle engine in wait-for-idle (Chris)
- Only close vma we open (Chris)
- Trace RPS events (Chris)
- Use the RPM config register to determine clk frequencies (Chris)
- Drop rq->ring->vma peeking from error capture (Chris)
- Check preempt-timeout target before submit_ports (Chris)
- Check HWSP cacheline is valid before acquiring (Chris)
- Use proper fault mask in interrupt postinstall too (Matt R)
- Keep a no-frills swappable copy of the default context state (Chris)

- Add atomic helpers for bandwidth (Stanislav)
- Refactor setting dma info to a common helper from device info (Michael)
- Refactor DDI transcoder code for clairty (Ville)
- Extend PG3 power well ID to ICL (Anshuman)
- Refactor PFIT code for readability and future extensibility (Ville)
- Clarify code split between intel_ddi.c and intel_dp.c (Ville)
- Move out code to return the digital_port of the aux ch (Jose)
- Move rps.enabled/active  and use of RPS interrupts to flags (Chris)
- Remove superfluous inlines and dead code (Jani)
- Re-disable -Wframe-address from top-level Makefile (Nick)
- Static checker and spelling fixes (Colin, Nathan)
- Split long lines (Ville)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200430124904.GA100924@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
Dave Airlie
2020-05-14 11:33:09 +10:00
108 changed files with 6211 additions and 1979 deletions

View File

@@ -36,15 +36,15 @@
#include "intel_panel.h"
#include "intel_vdsc.h"
static inline int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
static int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
{
return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
>> FREE_HEADER_CREDIT_SHIFT;
}
static inline int payload_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
static int payload_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
{
return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
>> FREE_PLOAD_CREDIT_SHIFT;
@@ -1195,7 +1195,7 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
WARN_ON(crtc_state->has_pch_encoder);
drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
@@ -1525,15 +1525,18 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *fixed_mode =
intel_connector->panel.fixed_mode;
intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
&pipe_config->hw.adjusted_mode;
int ret;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
ret = intel_pch_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
adjusted_mode->flags = 0;

View File

@@ -125,7 +125,7 @@ intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct intel_plane_state *plane_state = to_intel_plane_state(state);
WARN_ON(plane_state->vma);
drm_WARN_ON(plane->dev, plane_state->vma);
__drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
if (plane_state->hw.fb)
@@ -396,7 +396,7 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
}
/* should never happen */
WARN_ON(1);
drm_WARN_ON(state->base.dev, 1);
return NULL;
}

View File

@@ -514,6 +514,143 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
mutex_unlock(&dev_priv->av_mutex);
}
/* Add a factor to take care of rounding and truncations */
#define ROUNDING_FACTOR 10000
static unsigned int get_hblank_early_enable_config(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
unsigned int link_clks_available, link_clks_required;
unsigned int tu_data, tu_line, link_clks_active;
unsigned int hblank_rise, hblank_early_prog;
unsigned int h_active, h_total, hblank_delta, pixel_clk, v_total;
unsigned int fec_coeff, refresh_rate, cdclk, vdsc_bpp;
h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay;
h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
v_total = crtc_state->hw.adjusted_mode.crtc_vtotal;
pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
refresh_rate = crtc_state->hw.adjusted_mode.vrefresh;
vdsc_bpp = crtc_state->dsc.compressed_bpp;
cdclk = i915->cdclk.hw.cdclk;
/* fec= 0.972261, using rounding multiplier of 1000000 */
fec_coeff = 972261;
drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :"
"lanes = %u vdsc_bpp = %u cdclk = %u\n",
h_active, crtc_state->port_clock, crtc_state->lane_count,
vdsc_bpp, cdclk);
if (WARN_ON(!crtc_state->port_clock || !crtc_state->lane_count ||
!crtc_state->dsc.compressed_bpp || !i915->cdclk.hw.cdclk))
return 0;
link_clks_available = ((((h_total - h_active) *
((crtc_state->port_clock * ROUNDING_FACTOR) /
pixel_clk)) / ROUNDING_FACTOR) - 28);
link_clks_required = DIV_ROUND_UP(192000, (refresh_rate *
v_total)) * ((48 /
crtc_state->lane_count) + 2);
if (link_clks_available > link_clks_required)
hblank_delta = 32;
else
hblank_delta = DIV_ROUND_UP(((((5 * ROUNDING_FACTOR) /
crtc_state->port_clock) + ((5 *
ROUNDING_FACTOR) /
cdclk)) * pixel_clk),
ROUNDING_FACTOR);
tu_data = (pixel_clk * vdsc_bpp * 8) / ((crtc_state->port_clock *
crtc_state->lane_count * fec_coeff) / 1000000);
tu_line = (((h_active * crtc_state->port_clock * fec_coeff) /
1000000) / (64 * pixel_clk));
link_clks_active = (tu_line - 1) * 64 + tu_data;
hblank_rise = ((link_clks_active + 6 * DIV_ROUND_UP(link_clks_active,
250) + 4) * ((pixel_clk * ROUNDING_FACTOR) /
crtc_state->port_clock)) / ROUNDING_FACTOR;
hblank_early_prog = h_active - hblank_rise + hblank_delta;
return hblank_early_prog;
}
static unsigned int get_sample_room_req_config(const struct intel_crtc_state *crtc_state)
{
unsigned int h_active, h_total, pixel_clk;
unsigned int samples_room;
h_active = crtc_state->hw.adjusted_mode.hdisplay;
h_total = crtc_state->hw.adjusted_mode.htotal;
pixel_clk = crtc_state->hw.adjusted_mode.clock;
samples_room = ((((h_total - h_active) * ((crtc_state->port_clock *
ROUNDING_FACTOR) / pixel_clk)) /
ROUNDING_FACTOR) - 12) / ((48 /
crtc_state->lane_count) + 2);
return samples_room;
}
static void enable_audio_dsc_wa(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
unsigned int hblank_early_prog, samples_room;
unsigned int val;
if (INTEL_GEN(i915) < 11)
return;
val = intel_de_read(i915, AUD_CONFIG_BE);
if (INTEL_GEN(i915) == 11)
val |= HBLANK_EARLY_ENABLE_ICL(pipe);
else if (INTEL_GEN(i915) >= 12)
val |= HBLANK_EARLY_ENABLE_TGL(pipe);
if (crtc_state->dsc.compression_enable &&
(crtc_state->hw.adjusted_mode.hdisplay >= 3840 &&
crtc_state->hw.adjusted_mode.vdisplay >= 2160)) {
/* Get hblank early enable value required */
hblank_early_prog = get_hblank_early_enable_config(encoder,
crtc_state);
if (hblank_early_prog < 32) {
val &= ~HBLANK_START_COUNT_MASK(pipe);
val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32);
} else if (hblank_early_prog < 64) {
val &= ~HBLANK_START_COUNT_MASK(pipe);
val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64);
} else if (hblank_early_prog < 96) {
val &= ~HBLANK_START_COUNT_MASK(pipe);
val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96);
} else {
val &= ~HBLANK_START_COUNT_MASK(pipe);
val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128);
}
/* Get samples room value required */
samples_room = get_sample_room_req_config(crtc_state);
if (samples_room < 3) {
val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room);
} else {
/* Program 0 i.e "All Samples available in buffer" */
val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0);
}
}
intel_de_write(i915, AUD_CONFIG_BE, val);
}
#undef ROUNDING_FACTOR
static void hsw_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -531,6 +668,10 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
mutex_lock(&dev_priv->av_mutex);
/* Enable Audio WA for 4k DSC usecases */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
enable_audio_dsc_wa(encoder, crtc_state);
/* Enable audio presence detect, invalidate ELD */
tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
@@ -1138,6 +1279,10 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
drm_modeset_unlock_all(&dev_priv->drm);
device_link_remove(hda_kdev, i915_kdev);
if (dev_priv->audio_power_refcount)
drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n",
dev_priv->audio_power_refcount);
}
static const struct component_ops i915_audio_component_bind_ops = {

View File

@@ -375,7 +375,29 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
return data_rate;
}
static struct intel_bw_state *
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *bw_state;
bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
return to_intel_bw_state(bw_state);
}
struct intel_bw_state *
intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *bw_state;
bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
return to_intel_bw_state(bw_state);
}
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);

View File

@@ -24,6 +24,15 @@ struct intel_bw_state {
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
struct intel_bw_state *
intel_atomic_get_new_bw_state(struct intel_atomic_state *state);
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state);
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);

View File

@@ -33,6 +33,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
@@ -123,6 +124,8 @@ int intel_connector_register(struct drm_connector *connector)
goto err_backlight;
}
intel_connector_debugfs_add(connector);
return 0;
err_backlight:

View File

@@ -294,7 +294,7 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state,
hsw_fdi_link_train(encoder, crtc_state);
intel_ddi_enable_pipe_clock(crtc_state);
intel_ddi_enable_pipe_clock(encoder, crtc_state);
}
static void hsw_enable_crt(struct intel_atomic_state *state,
@@ -308,6 +308,8 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
intel_ddi_enable_transcoder_func(encoder, crtc_state);
intel_enable_pipe(crtc_state);
lpt_pch_enable(crtc_state);

View File

@@ -1261,7 +1261,10 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE);
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
DP_TP_CTL_LINK_TRAIN_PAT1 |
DP_TP_CTL_ENABLE);
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
* DDI E does not support port reversal, the functionality is
@@ -1337,7 +1340,10 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
/* Enable normal pixel sending for FDI */
intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE);
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_LINK_TRAIN_NORMAL |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
DP_TP_CTL_ENABLE);
}
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
@@ -1351,27 +1357,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
}
static struct intel_encoder *
intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder, *ret = NULL;
int num_encoders = 0;
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
ret = encoder;
num_encoders++;
}
if (num_encoders != 1)
drm_WARN(dev, 1, "%d encoders on crtc for pipe %c\n",
num_encoders,
pipe_name(crtc->pipe));
BUG_ON(ret == NULL);
return ret;
}
static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -1512,10 +1497,10 @@ static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
* intel_ddi_config_transcoder_func().
*/
static u32
intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -1617,7 +1602,8 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
return temp;
}
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1640,7 +1626,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2);
}
ctl = intel_ddi_transcoder_func_reg_val_get(crtc_state);
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
@@ -1651,14 +1637,15 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
* bit.
*/
static void
intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
ctl = intel_ddi_transcoder_func_reg_val_get(crtc_state);
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
}
@@ -1927,7 +1914,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
return true;
}
static inline enum intel_display_power_domain
static enum intel_display_power_domain
intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
{
/* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
@@ -1986,11 +1973,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
intel_dsc_power_domain(crtc_state));
}
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = encoder->port;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -2654,8 +2641,9 @@ static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
static u32 translate_signal_level(int signal_levels)
static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int i;
for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
@@ -2663,8 +2651,9 @@ static u32 translate_signal_level(int signal_levels)
return i;
}
WARN(1, "Unsupported voltage swing/pre-emphasis level: 0x%x\n",
signal_levels);
drm_WARN(&i915->drm, 1,
"Unsupported voltage swing/pre-emphasis level: 0x%x\n",
signal_levels);
return 0;
}
@@ -2675,46 +2664,73 @@ static u32 intel_ddi_dp_level(struct intel_dp *intel_dp)
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
return translate_signal_level(signal_levels);
return translate_signal_level(intel_dp, signal_levels);
}
u32 bxt_signal_levels(struct intel_dp *intel_dp)
static void
tgl_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
struct intel_encoder *encoder = &dport->base;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int level = intel_ddi_dp_level(intel_dp);
if (INTEL_GEN(dev_priv) >= 12)
tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
else if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, level, encoder->type);
else
bxt_ddi_vswing_sequence(encoder, level, encoder->type);
return 0;
tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
}
u32 ddi_signal_levels(struct intel_dp *intel_dp)
static void
icl_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
struct intel_encoder *encoder = &dport->base;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int level = intel_ddi_dp_level(intel_dp);
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
}
static void
cnl_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int level = intel_ddi_dp_level(intel_dp);
cnl_ddi_vswing_sequence(encoder, level, encoder->type);
}
static void
bxt_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int level = intel_ddi_dp_level(intel_dp);
bxt_ddi_vswing_sequence(encoder, level, encoder->type);
}
static void
hsw_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int level = intel_ddi_dp_level(intel_dp);
enum port port = encoder->port;
u32 signal_levels;
signal_levels = DDI_BUF_TRANS_SELECT(level);
drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~DDI_BUF_EMP_MASK;
intel_dp->DP |= signal_levels;
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, encoder->type);
return DDI_BUF_TRANS_SELECT(level);
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
}
static inline
u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
enum phy phy)
static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
enum phy phy)
{
if (intel_phy_is_combo(dev_priv, phy)) {
return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
@@ -3158,13 +3174,13 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 7.a Configure Transcoder Clock Select to direct the Port clock to the
* Transcoder.
*/
intel_ddi_enable_pipe_clock(crtc_state);
intel_ddi_enable_pipe_clock(encoder, crtc_state);
/*
* 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
* Transport Select
*/
intel_ddi_config_transcoder_func(crtc_state);
intel_ddi_config_transcoder_func(encoder, crtc_state);
/*
* 7.c Configure & enable DP_TP_CTL with link training pattern 1
@@ -3252,9 +3268,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
intel_edp_panel_on(intel_dp);
intel_ddi_clk_select(encoder, crtc_state);
@@ -3299,7 +3312,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_fec(encoder, crtc_state);
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
intel_ddi_enable_pipe_clock(encoder, crtc_state);
intel_dsc_enable(encoder, crtc_state);
}
@@ -3360,7 +3373,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
intel_ddi_enable_pipe_clock(crtc_state);
intel_ddi_enable_pipe_clock(encoder, crtc_state);
intel_dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
@@ -3766,7 +3779,9 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
WARN_ON(crtc_state->has_pch_encoder);
drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
intel_ddi_enable_transcoder_func(encoder, crtc_state);
intel_enable_pipe(crtc_state);
@@ -3877,7 +3892,7 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
int required_lanes = crtc_state ? crtc_state->lane_count : 1;
WARN_ON(crtc && crtc->active);
drm_WARN_ON(state->base.dev, crtc && crtc->active);
intel_tc_port_get_link(enc_to_dig_port(encoder),
required_lanes);
@@ -3969,6 +3984,74 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
udelay(600);
}
static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
enum port port = dp_to_dig_port(intel_dp)->base.port;
u32 temp;
temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
else
temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & train_pat_mask) {
case DP_TRAINING_PATTERN_DISABLE:
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
break;
case DP_TRAINING_PATTERN_1:
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
break;
case DP_TRAINING_PATTERN_2:
temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
break;
case DP_TRAINING_PATTERN_3:
temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
break;
case DP_TRAINING_PATTERN_4:
temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
break;
}
intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
}
static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
u32 val;
val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
/*
* Until TGL on PORT_A we can have only eDP in SST mode. There the only
* reason we need to set idle transmission mode is to work around a HW
* issue where we enable the pipe while not in idle link-training mode.
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
return;
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_IDLE_DONE, 1))
drm_err(&dev_priv->drm,
"Timed out waiting for DP idle patterns\n");
}
static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
@@ -4061,12 +4144,18 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 temp, flags = 0;
/* XXX: DSI transcoder paranoia */
if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
return;
if (INTEL_GEN(dev_priv) >= 12) {
intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(cpu_transcoder);
intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(cpu_transcoder);
}
intel_dsc_get_config(encoder, pipe_config);
temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -4396,6 +4485,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static struct intel_connector *
intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
struct intel_connector *connector;
enum port port = intel_dig_port->base.port;
@@ -4406,6 +4496,24 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
intel_dig_port->dp.prepare_link_retrain =
intel_ddi_prepare_link_retrain;
intel_dig_port->dp.set_link_train = intel_ddi_set_link_train;
intel_dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
if (INTEL_GEN(dev_priv) >= 12)
intel_dig_port->dp.set_signal_levels = tgl_set_signal_levels;
else if (INTEL_GEN(dev_priv) >= 11)
intel_dig_port->dp.set_signal_levels = icl_set_signal_levels;
else if (IS_CANNONLAKE(dev_priv))
intel_dig_port->dp.set_signal_levels = cnl_set_signal_levels;
else if (IS_GEN9_LP(dev_priv))
intel_dig_port->dp.set_signal_levels = bxt_set_signal_levels;
else
intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels;
if (INTEL_GEN(dev_priv) < 12) {
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
}
if (!intel_dp_init_connector(intel_dig_port, connector)) {
kfree(connector);

View File

@@ -25,9 +25,11 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);

View File

@@ -238,9 +238,9 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq);
}
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *pipe_config)
/* units of 100MHz */
static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *pipe_config)
{
if (HAS_DDI(dev_priv))
return pipe_config->port_clock; /* SPLL */
@@ -1973,16 +1973,16 @@ static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
{
WARN_ON(!is_ccs_modifier(fb->modifier) ||
(main_plane && main_plane >= fb->format->num_planes / 2));
drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
(main_plane && main_plane >= fb->format->num_planes / 2));
return fb->format->num_planes / 2 + main_plane;
}
static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
{
WARN_ON(!is_ccs_modifier(fb->modifier) ||
ccs_plane < fb->format->num_planes / 2);
drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
ccs_plane < fb->format->num_planes / 2);
return ccs_plane - fb->format->num_planes / 2;
}
@@ -2992,7 +2992,7 @@ setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
fb->modifier != I915_FORMAT_MOD_Yf_TILED)
return 0;
if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
return 0;
rot_info->plane[plane] = *plane_info;
@@ -6089,30 +6089,26 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
/**
* skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
*
* @state: crtc's scaler state
*
* Return
* 0 - scaler_usage updated successfully
* error - requested scaling cannot be supported or other error condition
*/
int skl_update_scaler_crtc(struct intel_crtc_state *state)
static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
{
const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
bool need_scaler = false;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int width, height;
if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
state->pch_pfit.enabled)
need_scaler = true;
if (crtc_state->pch_pfit.enabled) {
width = drm_rect_width(&crtc_state->pch_pfit.dst);
height = drm_rect_height(&crtc_state->pch_pfit.dst);
} else {
width = adjusted_mode->crtc_hdisplay;
height = adjusted_mode->crtc_vdisplay;
}
return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay,
adjusted_mode->crtc_vdisplay, NULL, 0,
need_scaler);
return skl_update_scaler(crtc_state, !crtc_state->hw.active,
SKL_CRTC_INDEX,
&crtc_state->scaler_state.scaler_id,
crtc_state->pipe_src_w, crtc_state->pipe_src_h,
width, height, NULL, 0,
crtc_state->pch_pfit.enabled);
}
/**
@@ -6221,70 +6217,80 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct drm_rect src = {
.x2 = crtc_state->pipe_src_w << 16,
.y2 = crtc_state->pipe_src_h << 16,
};
const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
u16 uv_rgb_hphase, uv_rgb_vphase;
enum pipe pipe = crtc->pipe;
int width = drm_rect_width(dst);
int height = drm_rect_height(dst);
int x = dst->x1;
int y = dst->y1;
int hscale, vscale;
unsigned long irqflags;
int id;
if (crtc_state->pch_pfit.enabled) {
u16 uv_rgb_hphase, uv_rgb_vphase;
int pfit_w, pfit_h, hscale, vscale;
unsigned long irqflags;
int id;
if (!crtc_state->pch_pfit.enabled)
return;
if (drm_WARN_ON(&dev_priv->drm,
crtc_state->scaler_state.scaler_id < 0))
return;
if (drm_WARN_ON(&dev_priv->drm,
crtc_state->scaler_state.scaler_id < 0))
return;
pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
id = scaler_state->scaler_id;
id = scaler_state->scaler_id;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
x << 16 | y);
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
width << 16 | height);
intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
crtc_state->pch_pfit.pos);
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
crtc_state->pch_pfit.size);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
enum pipe pipe = crtc->pipe;
int width = drm_rect_width(dst);
int height = drm_rect_height(dst);
int x = dst->x1;
int y = dst->y1;
if (crtc_state->pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
intel_de_write(dev_priv, PF_CTL(pipe),
PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
else
intel_de_write(dev_priv, PF_CTL(pipe),
PF_ENABLE | PF_FILTER_MED_3x3);
intel_de_write(dev_priv, PF_WIN_POS(pipe),
crtc_state->pch_pfit.pos);
intel_de_write(dev_priv, PF_WIN_SZ(pipe),
crtc_state->pch_pfit.size);
}
if (!crtc_state->pch_pfit.enabled)
return;
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
else
intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
PF_FILTER_MED_3x3);
intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
}
void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
@@ -6626,7 +6632,7 @@ intel_connector_primary_encoder(struct intel_connector *connector)
return &dp_to_dig_port(connector->mst_port)->base;
encoder = intel_attached_encoder(connector);
WARN_ON(!encoder);
drm_WARN_ON(connector->base.dev, !encoder);
return encoder;
}
@@ -7071,9 +7077,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_transcoder_func(new_crtc_state);
if (dev_priv->display.initial_watermarks)
dev_priv->display.initial_watermarks(state, crtc);
@@ -7104,11 +7107,12 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
if (old_crtc_state->pch_pfit.enabled) {
intel_de_write(dev_priv, PF_CTL(pipe), 0);
intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
}
if (!old_crtc_state->pch_pfit.enabled)
return;
intel_de_write(dev_priv, PF_CTL(pipe), 0);
intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
}
static void ilk_crtc_disable(struct intel_atomic_state *state,
@@ -7296,7 +7300,17 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
}
}
switch (dig_port->aux_ch) {
return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
}
/*
* Converts aux_ch to power_domain without caring about TBT ports for that use
* intel_aux_power_domain()
*/
enum intel_display_power_domain
intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
{
switch (aux_ch) {
case AUX_CH_A:
return POWER_DOMAIN_AUX_A;
case AUX_CH_B:
@@ -7312,7 +7326,7 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
case AUX_CH_G:
return POWER_DOMAIN_AUX_G;
default:
MISSING_CASE(dig_port->aux_ch);
MISSING_CASE(aux_ch);
return POWER_DOMAIN_AUX_A;
}
}
@@ -7926,39 +7940,36 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
{
u32 pixel_rate;
pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock;
unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
/*
* We only use IF-ID interlacing. If we ever use
* PF-ID we'll need to adjust the pixel_rate here.
*/
if (pipe_config->pch_pfit.enabled) {
u64 pipe_w, pipe_h, pfit_w, pfit_h;
u32 pfit_size = pipe_config->pch_pfit.size;
if (!crtc_state->pch_pfit.enabled)
return pixel_rate;
pipe_w = pipe_config->pipe_src_w;
pipe_h = pipe_config->pipe_src_h;
pipe_w = crtc_state->pipe_src_w;
pipe_h = crtc_state->pipe_src_h;
pfit_w = (pfit_size >> 16) & 0xFFFF;
pfit_h = pfit_size & 0xFFFF;
if (pipe_w < pfit_w)
pipe_w = pfit_w;
if (pipe_h < pfit_h)
pipe_h = pfit_h;
pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
if (WARN_ON(!pfit_w || !pfit_h))
return pixel_rate;
if (pipe_w < pfit_w)
pipe_w = pfit_w;
if (pipe_h < pfit_h)
pipe_h = pfit_h;
pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
pfit_w * pfit_h);
}
if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
!pfit_w || !pfit_h))
return pixel_rate;
return pixel_rate;
return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
pfit_w * pfit_h);
}
static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
@@ -8127,7 +8138,7 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
}
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_modparams.panel_use_ssc >= 0)
return i915_modparams.panel_use_ssc != 0;
@@ -9151,9 +9162,9 @@ static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
@@ -9173,9 +9184,9 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
return;
}
pipe_config->gmch_pfit.control = tmp;
pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv,
PFIT_PGM_RATIOS);
crtc_state->gmch_pfit.control = tmp;
crtc_state->gmch_pfit.pgm_ratios =
intel_de_read(dev_priv, PFIT_PGM_RATIOS);
}
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@@ -9425,7 +9436,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
i9xx_get_pfit_config(crtc, pipe_config);
i9xx_get_pfit_config(pipe_config);
if (INTEL_GEN(dev_priv) >= 4) {
/* No way to read it out on pipes B and C */
@@ -10395,37 +10406,47 @@ static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
&pipe_config->fdi_m_n, NULL);
}
static void skl_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
u32 pos, u32 size)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
u32 ps_ctrl = 0;
drm_rect_init(&crtc_state->pch_pfit.dst,
pos >> 16, pos & 0xffff,
size >> 16, size & 0xffff);
}
static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
int id = -1;
int i;
/* find scaler attached to this pipe */
for (i = 0; i < crtc->num_scalers; i++) {
ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
id = i;
pipe_config->pch_pfit.enabled = true;
pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
SKL_PS_WIN_POS(crtc->pipe, i));
pipe_config->pch_pfit.size = intel_de_read(dev_priv,
SKL_PS_WIN_SZ(crtc->pipe, i));
scaler_state->scalers[i].in_use = true;
break;
}
u32 ctl, pos, size;
ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
continue;
id = i;
crtc_state->pch_pfit.enabled = true;
pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
ilk_get_pfit_pos_size(crtc_state, pos, size);
scaler_state->scalers[i].in_use = true;
break;
}
scaler_state->scaler_id = id;
if (id >= 0) {
if (id >= 0)
scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
} else {
else
scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
}
static void
@@ -10561,30 +10582,30 @@ error:
kfree(intel_fb);
}
static void ilk_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 ctl, pos, size;
tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
if ((ctl & PF_ENABLE) == 0)
return;
if (tmp & PF_ENABLE) {
pipe_config->pch_pfit.enabled = true;
pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
PF_WIN_POS(crtc->pipe));
pipe_config->pch_pfit.size = intel_de_read(dev_priv,
PF_WIN_SZ(crtc->pipe));
crtc_state->pch_pfit.enabled = true;
/* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
if (IS_GEN(dev_priv, 7)) {
drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) !=
PF_PIPE_SEL_IVB(crtc->pipe));
}
}
pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
ilk_get_pfit_pos_size(crtc_state, pos, size);
/*
* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now.
*/
drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
(ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
}
static bool ilk_get_pipe_config(struct intel_crtc *crtc,
@@ -10695,7 +10716,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
ilk_get_pfit_config(crtc, pipe_config);
ilk_get_pfit_config(pipe_config);
ret = true;
@@ -11169,9 +11190,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
power_domain_mask |= BIT_ULL(power_domain);
if (INTEL_GEN(dev_priv) >= 9)
skl_get_pfit_config(crtc, pipe_config);
skl_get_pfit_config(pipe_config);
else
ilk_get_pfit_config(crtc, pipe_config);
ilk_get_pfit_config(pipe_config);
}
if (hsw_crtc_supports_ips(crtc)) {
@@ -12430,8 +12451,10 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
if (IS_ERR(linked_plane_state))
return PTR_ERR(linked_plane_state);
WARN_ON(linked_plane_state->planar_linked_plane != plane);
WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
drm_WARN_ON(state->base.dev,
linked_plane_state->planar_linked_plane != plane);
drm_WARN_ON(state->base.dev,
linked_plane_state->planar_slave == plane_state->planar_slave);
}
return 0;
@@ -12819,7 +12842,7 @@ static void intel_dump_crtc_timings(struct drm_i915_private *i915,
mode->type, mode->flags);
}
static inline void
static void
intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
const char *id, unsigned int lane_count,
const struct intel_link_m_n *m_n)
@@ -13030,9 +13053,8 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
pipe_config->gmch_pfit.lvds_border_bits);
else
drm_dbg_kms(&dev_priv->drm,
"pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
pipe_config->pch_pfit.pos,
pipe_config->pch_pfit.size,
"pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
enableddisabled(pipe_config->pch_pfit.enabled),
yesno(pipe_config->pch_pfit.force_thru));
@@ -13154,7 +13176,8 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
{
crtc_state->uapi.enable = crtc_state->hw.enable;
crtc_state->uapi.active = crtc_state->hw.active;
WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
drm_WARN_ON(crtc_state->uapi.crtc->dev,
drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
@@ -13773,8 +13796,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_X(pch_pfit.pos);
PIPE_CONF_CHECK_X(pch_pfit.size);
PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
}
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
@@ -15353,12 +15378,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_set_cdclk_pre_plane_update(state);
/*
* SKL workaround: bspec recommends we disable the SAGV when we
* have more then one pipe enabled
*/
if (!intel_can_enable_sagv(state))
intel_disable_sagv(dev_priv);
intel_sagv_pre_plane_update(state);
intel_modeset_verify_disabled(dev_priv, state);
}
@@ -15455,11 +15475,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
if (state->modeset)
if (state->modeset) {
intel_verify_planes(state);
if (state->modeset && intel_can_enable_sagv(state))
intel_enable_sagv(dev_priv);
intel_sagv_post_plane_update(state);
}
drm_atomic_helper_commit_hw_done(&state->base);

View File

@@ -583,13 +583,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
enum intel_display_power_domain
intel_legacy_aux_to_power_domain(enum aux_ch aux_ch);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,

View File

@@ -9,6 +9,7 @@
#include "i915_debugfs.h"
#include "intel_csr.h"
#include "intel_display_debugfs.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_fbc.h"
@@ -1143,6 +1144,51 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
return 0;
}
#define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
seq_puts(m, "LPSP: disabled\n"))
static bool
intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
enum i915_power_well_id power_well_id)
{
intel_wakeref_t wakeref;
bool is_enabled;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
is_enabled = intel_display_power_well_is_enabled(i915,
power_well_id);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return is_enabled;
}
static int i915_lpsp_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
switch (INTEL_GEN(i915)) {
case 12:
case 11:
LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
break;
case 10:
case 9:
LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
break;
default:
/*
* Apart from HASWELL/BROADWELL other legacy platform doesn't
* support lpsp.
*/
if (IS_HASWELL(i915) || IS_BROADWELL(i915))
LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
else
seq_puts(m, "LPSP: not supported\n");
}
return 0;
}
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1910,6 +1956,7 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
{"i915_drrs_status", i915_drrs_status, 0},
{"i915_lpsp_status", i915_lpsp_status, 0},
};
static const struct {
@@ -1991,6 +2038,48 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
seq_puts(m, "LPSP: incapable\n"))
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_encoder *encoder =
intel_attached_encoder(to_intel_connector(connector));
struct drm_i915_private *i915 = to_i915(connector->dev);
if (connector->status != connector_status_connected)
return -ENODEV;
switch (INTEL_GEN(i915)) {
case 12:
/*
* Actually TGL can drive LPSP on port till DDI_C
* but there is no physical connected DDI_C on TGL sku's,
* even driver is not initilizing DDI_C port for gen12.
*/
LPSP_CAPABLE(encoder->port <= PORT_B);
break;
case 11:
LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP);
break;
case 10:
case 9:
LPSP_CAPABLE(encoder->port == PORT_A &&
(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
break;
default:
if (IS_HASWELL(i915) || IS_BROADWELL(i915))
LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
@@ -2134,5 +2223,16 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
connector, &i915_dsc_fec_support_fops);
/* Legacy panels doesn't lpsp on any platform */
if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv)) &&
(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
debugfs_create_file("i915_lpsp_capability", 0444, root,
connector, &i915_lpsp_capability_fops);
return 0;
}

View File

@@ -151,6 +151,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "GT_IRQ";
case POWER_DOMAIN_DPLL_DC_OFF:
return "DPLL_DC_OFF";
case POWER_DOMAIN_TC_COLD_OFF:
return "TC_COLD_OFF";
default:
MISSING_CASE(domain);
return "?";
@@ -282,8 +284,51 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
}
#define ICL_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
int pw_idx = power_well->desc->hsw.idx;
return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
ICL_AUX_PW_TO_CH(pw_idx);
}
static struct intel_digital_port *
aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
enum aux_ch aux_ch)
{
struct intel_digital_port *dig_port = NULL;
struct intel_encoder *encoder;
for_each_intel_encoder(&dev_priv->drm, encoder) {
/* We'll check the MST primary port */
if (encoder->type == INTEL_OUTPUT_DP_MST)
continue;
dig_port = enc_to_dig_port(encoder);
if (!dig_port)
continue;
if (dig_port->aux_ch != aux_ch) {
dig_port = NULL;
continue;
}
break;
}
return dig_port;
}
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
struct i915_power_well *power_well,
bool timeout_expected)
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
@@ -294,8 +339,8 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
power_well->desc->name);
/* An AUX timeout is expected if the TBT DP tunnel is down. */
drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
drm_WARN_ON(&dev_priv->drm, !timeout_expected);
}
}
@@ -358,11 +403,11 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
bool wait_fuses = power_well->desc->hsw.has_fuses;
enum skl_power_gate uninitialized_var(pg);
u32 val;
if (wait_fuses) {
if (power_well->desc->hsw.has_fuses) {
enum skl_power_gate pg;
pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
/*
@@ -379,19 +424,27 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
val = intel_de_read(dev_priv, regs->driver);
intel_de_write(dev_priv, regs->driver,
val | HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_enable(dev_priv, power_well);
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: cnl */
if (IS_CANNONLAKE(dev_priv) &&
pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
u32 val;
val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
}
if (wait_fuses)
if (power_well->desc->hsw.has_fuses) {
enum skl_power_gate pg;
pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
gen9_wait_for_power_well_fuses(dev_priv, pg);
}
hsw_power_well_post_enable(dev_priv,
power_well->desc->hsw.irq_pipe_mask,
@@ -437,7 +490,7 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
val | ICL_LANE_ENABLE_AUX);
}
hsw_wait_for_power_well_enable(dev_priv, power_well);
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
@@ -470,21 +523,6 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
#define ICL_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
int pw_idx = power_well->desc->hsw.idx;
return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
ICL_AUX_PW_TO_CH(pw_idx);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
@@ -501,51 +539,28 @@ static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
}
static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
struct i915_power_well *power_well,
struct intel_digital_port *dig_port)
{
enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
struct intel_digital_port *dig_port = NULL;
struct intel_encoder *encoder;
/* Bypass the check if all references are released asynchronously */
if (power_well_async_ref_count(dev_priv, power_well) ==
power_well->count)
return;
aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
for_each_intel_encoder(&dev_priv->drm, encoder) {
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (!intel_phy_is_tc(dev_priv, phy))
continue;
/* We'll check the MST primary port */
if (encoder->type == INTEL_OUTPUT_DP_MST)
continue;
dig_port = enc_to_dig_port(encoder);
if (drm_WARN_ON(&dev_priv->drm, !dig_port))
continue;
if (dig_port->aux_ch != aux_ch) {
dig_port = NULL;
continue;
}
break;
}
if (drm_WARN_ON(&dev_priv->drm, !dig_port))
return;
if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
return;
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
}
#else
static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
struct i915_power_well *power_well,
struct intel_digital_port *dig_port)
{
}
@@ -553,24 +568,65 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
static void icl_tc_cold_exit(struct drm_i915_private *i915)
{
int ret, tries = 0;
while (1) {
ret = sandybridge_pcode_write_timeout(i915,
ICL_PCODE_EXIT_TCCOLD,
0, 250, 1);
if (ret != -EAGAIN || ++tries == 3)
break;
msleep(1);
}
/* Spec states that TC cold exit can take up to 1ms to complete */
if (!ret)
msleep(1);
/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
"succeeded");
}
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
bool is_tbt = power_well->desc->hsw.is_tc_tbt;
bool timeout_expected;
u32 val;
icl_tc_port_assert_ref_held(dev_priv, power_well);
icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
if (power_well->desc->hsw.is_tc_tbt)
if (is_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
hsw_power_well_enable(dev_priv, power_well);
val = intel_de_read(dev_priv, regs->driver);
intel_de_write(dev_priv, regs->driver,
val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
/*
* An AUX timeout is expected if the TBT DP tunnel is down,
* or need to enable AUX on a legacy TypeC port as part of the TC-cold
* exit sequence.
*/
timeout_expected = is_tbt;
if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) {
icl_tc_cold_exit(dev_priv);
timeout_expected = true;
}
hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
if (INTEL_GEN(dev_priv) >= 12 && !is_tbt) {
enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
@@ -588,11 +644,48 @@ static void
icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
icl_tc_port_assert_ref_held(dev_priv, power_well);
enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
hsw_power_well_disable(dev_priv, power_well);
}
static void
icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
int pw_idx = power_well->desc->hsw.idx;
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
bool is_tbt = power_well->desc->hsw.is_tc_tbt;
if (is_tbt || intel_phy_is_tc(dev_priv, phy))
return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
else if (IS_ICELAKE(dev_priv))
return icl_combo_phy_aux_power_well_enable(dev_priv,
power_well);
else
return hsw_power_well_enable(dev_priv, power_well);
}
static void
icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
int pw_idx = power_well->desc->hsw.idx;
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
bool is_tbt = power_well->desc->hsw.is_tc_tbt;
if (is_tbt || intel_phy_is_tc(dev_priv, phy))
return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
else if (IS_ICELAKE(dev_priv))
return icl_combo_phy_aux_power_well_disable(dev_priv,
power_well);
else
return hsw_power_well_disable(dev_priv, power_well);
}
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -943,7 +1036,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
/* Power wells at this level and above must be disabled for DC5 entry */
if (INTEL_GEN(dev_priv) >= 12)
high_pg = TGL_DISP_PW_3;
high_pg = ICL_DISP_PW_3;
else
high_pg = SKL_DISP_PW_2;
@@ -2805,6 +2898,21 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_AUX_E) | \
BIT_ULL(POWER_DOMAIN_AUX_F) | \
BIT_ULL(POWER_DOMAIN_AUX_G) | \
BIT_ULL(POWER_DOMAIN_AUX_H) | \
BIT_ULL(POWER_DOMAIN_AUX_I) | \
BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -3503,17 +3611,10 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
},
};
static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
static const struct i915_power_well_ops icl_aux_power_well_ops = {
.sync_hw = hsw_power_well_sync_hw,
.enable = icl_combo_phy_aux_power_well_enable,
.disable = icl_combo_phy_aux_power_well_disable,
.is_enabled = hsw_power_well_enabled,
};
static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
.sync_hw = hsw_power_well_sync_hw,
.enable = icl_tc_phy_aux_power_well_enable,
.disable = icl_tc_phy_aux_power_well_disable,
.enable = icl_aux_power_well_enable,
.disable = icl_aux_power_well_disable,
.is_enabled = hsw_power_well_enabled,
};
@@ -3571,7 +3672,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
.name = "power well 3",
.domains = ICL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
.id = ICL_DISP_PW_3,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -3643,7 +3744,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX A",
.domains = ICL_AUX_A_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3653,7 +3754,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX B",
.domains = ICL_AUX_B_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3663,7 +3764,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX C TC1",
.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3674,7 +3775,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX D TC2",
.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3685,7 +3786,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX E TC3",
.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3696,7 +3797,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX F TC4",
.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3707,7 +3808,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX C TBT1",
.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3718,7 +3819,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX D TBT2",
.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3729,7 +3830,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX E TBT3",
.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3740,7 +3841,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX F TBT4",
.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3762,149 +3863,89 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
};
static const struct i915_power_well_desc ehl_power_wells[] = {
{
.name = "always-on",
.always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_1,
.hsw.has_fuses = true,
},
},
{
.name = "DC off",
.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
.id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
.domains = ICL_PW_2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_2,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_2,
.hsw.has_fuses = true,
},
},
{
.name = "power well 3",
.domains = ICL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
.hsw.irq_pipe_mask = BIT(PIPE_B),
.hsw.has_vga = true,
.hsw.has_fuses = true,
},
},
{
.name = "DDI A IO",
.domains = ICL_DDI_IO_A_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_ddi_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
},
},
{
.name = "DDI B IO",
.domains = ICL_DDI_IO_B_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_ddi_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
},
},
{
.name = "DDI C IO",
.domains = ICL_DDI_IO_C_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_ddi_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
},
},
{
.name = "DDI D IO",
.domains = ICL_DDI_IO_D_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_ddi_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
},
},
{
.name = "AUX A",
.domains = ICL_AUX_A_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
},
},
{
.name = "AUX B",
.domains = ICL_AUX_B_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
},
},
{
.name = "AUX C",
.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
},
},
{
.name = "AUX D",
.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
},
},
{
.name = "power well 4",
.domains = ICL_PW_4_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_4,
.hsw.has_fuses = true,
.hsw.irq_pipe_mask = BIT(PIPE_C),
},
},
static void
tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
{
u8 tries = 0;
int ret;
while (1) {
u32 low_val = 0, high_val;
if (block)
high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
else
high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
/*
* Spec states that we should timeout the request after 200us
* but the function below will timeout after 500us
*/
ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
&high_val);
if (ret == 0) {
if (block &&
(low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
ret = -EIO;
else
break;
}
if (++tries == 3)
break;
if (ret == -EAGAIN)
msleep(1);
}
if (ret)
drm_err(&i915->drm, "TC cold %sblock failed\n",
block ? "" : "un");
else
drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
block ? "" : "un");
}
static void
tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
struct i915_power_well *power_well)
{
tgl_tc_cold_request(i915, true);
}
static void
tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
struct i915_power_well *power_well)
{
tgl_tc_cold_request(i915, false);
}
static void
tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
struct i915_power_well *power_well)
{
if (power_well->count > 0)
tgl_tc_cold_off_power_well_enable(i915, power_well);
else
tgl_tc_cold_off_power_well_disable(i915, power_well);
}
static bool
tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
/*
* Not the correctly implementation but there is no way to just read it
* from PCODE, so returning count to avoid state mismatch errors
*/
return power_well->count;
}
static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
.sync_hw = tgl_tc_cold_off_power_well_sync_hw,
.enable = tgl_tc_cold_off_power_well_enable,
.disable = tgl_tc_cold_off_power_well_disable,
.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
};
static const struct i915_power_well_desc tgl_power_wells[] = {
@@ -3949,7 +3990,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.name = "power well 3",
.domains = TGL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = TGL_DISP_PW_3,
.id = ICL_DISP_PW_3,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -4051,7 +4092,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX A",
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4061,7 +4102,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX B",
.domains = TGL_AUX_B_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4071,7 +4112,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX C",
.domains = TGL_AUX_C_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4081,7 +4122,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX D TC1",
.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4092,7 +4133,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX E TC2",
.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4103,7 +4144,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX F TC3",
.domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4114,7 +4155,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX G TC4",
.domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4125,7 +4166,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX H TC5",
.domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4136,7 +4177,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX I TC6",
.domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4147,7 +4188,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX D TBT1",
.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4158,7 +4199,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX E TBT2",
.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4169,7 +4210,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX F TBT3",
.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4180,7 +4221,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX G TBT4",
.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4191,7 +4232,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX H TBT5",
.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4202,7 +4243,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX I TBT6",
.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4234,6 +4275,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.hsw.irq_pipe_mask = BIT(PIPE_D),
},
},
{
.name = "TC cold off",
.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
.ops = &tgl_tc_cold_off_ops,
.id = DISP_PW_ID_NONE,
},
};
static int
@@ -4383,8 +4430,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (IS_GEN(dev_priv, 12)) {
err = set_power_wells(power_domains, tgl_power_wells);
} else if (IS_ELKHARTLAKE(dev_priv)) {
err = set_power_wells(power_domains, ehl_power_wells);
} else if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
@@ -4446,9 +4491,8 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
static inline
bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
i915_reg_t reg, bool enable)
static bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
i915_reg_t reg, bool enable)
{
u32 val, status;

View File

@@ -76,6 +76,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
POWER_DOMAIN_DPLL_DC_OFF,
POWER_DOMAIN_TC_COLD_OFF,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -100,7 +101,7 @@ enum i915_power_well_id {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
TGL_DISP_PW_3,
ICL_DISP_PW_3,
SKL_DISP_DC_OFF,
};
@@ -266,6 +267,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,

View File

@@ -974,8 +974,7 @@ struct intel_crtc_state {
/* Panel fitter placement and size for Ironlake+ */
struct {
u32 pos;
u32 size;
struct drm_rect dst;
bool enabled;
bool force_thru;
} pch_pfit;
@@ -1368,6 +1367,9 @@ struct intel_dp {
/* This is called before a link training is starterd */
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
void (*set_link_train)(struct intel_dp *intel_dp, u8 dp_train_pat);
void (*set_idle_link_train)(struct intel_dp *intel_dp);
void (*set_signal_levels)(struct intel_dp *intel_dp);
/* Displayport compliance testing */
struct intel_dp_compliance compliance;

View File

@@ -48,7 +48,6 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -2340,15 +2339,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
static int
intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
struct drm_connector *connector,
struct intel_crtc_state *crtc_state)
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_info *info = &connector->display_info;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int ret;
if (!drm_mode_is_420_only(info, adjusted_mode) ||
!intel_dp_get_colorimetry_status(intel_dp) ||
@@ -2357,17 +2354,7 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
/* YCBCR 420 output conversion needs a scaler */
ret = skl_update_scaler_crtc(crtc_state);
if (ret) {
drm_dbg_kms(&i915->drm,
"Scaler allocation for output failed\n");
return ret;
}
intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
return 0;
return intel_pch_panel_fitting(crtc_state, conn_state);
}
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
@@ -2546,7 +2533,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
enum port port = encoder->port;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
@@ -2562,9 +2548,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (lspcon->active)
lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
else
ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
pipe_config);
ret = intel_dp_ycbcr420_config(intel_dp, pipe_config,
conn_state);
if (ret)
return ret;
@@ -2580,18 +2565,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
if (INTEL_GEN(dev_priv) >= 9) {
ret = skl_update_scaler_crtc(pipe_config);
if (ret)
return ret;
}
if (HAS_GMCH(dev_priv))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
conn_state->scaling_mode);
ret = intel_gmch_panel_fitting(pipe_config, conn_state);
else
intel_pch_panel_fitting(intel_crtc, pipe_config,
conn_state->scaling_mode);
ret = intel_pch_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -2671,9 +2650,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_crtc_has_type(pipe_config,
INTEL_OUTPUT_DP_MST));
intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
/*
* There are four kinds of DP registers:
*
@@ -3642,90 +3618,63 @@ static void chv_post_disable_dp(struct intel_atomic_state *state,
}
static void
_intel_dp_set_link_train(struct intel_dp *intel_dp,
u32 *DP,
u8 dp_train_pat)
cpt_set_link_train(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
u32 *DP = &intel_dp->DP;
if (dp_train_pat & train_pat_mask)
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
*DP |= DP_LINK_TRAIN_OFF_CPT;
break;
case DP_TRAINING_PATTERN_1:
*DP |= DP_LINK_TRAIN_PAT_1_CPT;
break;
case DP_TRAINING_PATTERN_2:
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
drm_dbg_kms(&dev_priv->drm,
"Using DP training pattern TPS%d\n",
dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
else
temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & train_pat_mask) {
case DP_TRAINING_PATTERN_DISABLE:
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
break;
case DP_TRAINING_PATTERN_1:
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
break;
case DP_TRAINING_PATTERN_2:
temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
break;
case DP_TRAINING_PATTERN_3:
temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
break;
case DP_TRAINING_PATTERN_4:
temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
break;
}
intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
*DP |= DP_LINK_TRAIN_OFF_CPT;
break;
case DP_TRAINING_PATTERN_1:
*DP |= DP_LINK_TRAIN_PAT_1_CPT;
break;
case DP_TRAINING_PATTERN_2:
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
drm_dbg_kms(&dev_priv->drm,
"TPS3 not supported, using TPS2 instead\n");
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
} else {
*DP &= ~DP_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
*DP |= DP_LINK_TRAIN_OFF;
break;
case DP_TRAINING_PATTERN_1:
*DP |= DP_LINK_TRAIN_PAT_1;
break;
case DP_TRAINING_PATTERN_2:
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
drm_dbg_kms(&dev_priv->drm,
"TPS3 not supported, using TPS2 instead\n");
*DP |= DP_LINK_TRAIN_PAT_2;
break;
}
"TPS3 not supported, using TPS2 instead\n");
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
static void
g4x_set_link_train(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 *DP = &intel_dp->DP;
*DP &= ~DP_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
*DP |= DP_LINK_TRAIN_OFF;
break;
case DP_TRAINING_PATTERN_1:
*DP |= DP_LINK_TRAIN_PAT_1;
break;
case DP_TRAINING_PATTERN_2:
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
drm_dbg_kms(&dev_priv->drm,
"TPS3 not supported, using TPS2 instead\n");
*DP |= DP_LINK_TRAIN_PAT_2;
break;
}
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
static void intel_dp_enable_port(struct intel_dp *intel_dp,
@@ -4064,7 +4013,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
}
}
static u32 vlv_signal_levels(struct intel_dp *intel_dp)
static void vlv_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
unsigned long demph_reg_value, preemph_reg_value,
@@ -4092,7 +4041,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
uniqtranscale_reg_value = 0x5598DA3A;
break;
default:
return 0;
return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_1:
@@ -4111,7 +4060,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
return 0;
return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_2:
@@ -4126,7 +4075,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
return 0;
return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_3:
@@ -4137,20 +4086,18 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
return 0;
return;
}
break;
default:
return 0;
return;
}
vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value, 0);
return 0;
}
static u32 chv_signal_levels(struct intel_dp *intel_dp)
static void chv_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u32 deemph_reg_value, margin_reg_value;
@@ -4178,7 +4125,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
uniq_trans_scale = true;
break;
default:
return 0;
return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_1:
@@ -4196,7 +4143,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
margin_reg_value = 154;
break;
default:
return 0;
return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_2:
@@ -4210,7 +4157,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
margin_reg_value = 154;
break;
default:
return 0;
return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_3:
@@ -4220,21 +4167,18 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
margin_reg_value = 154;
break;
default:
return 0;
return;
}
break;
default:
return 0;
return;
}
chv_set_phy_signal_level(encoder, deemph_reg_value,
margin_reg_value, uniq_trans_scale);
return 0;
}
static u32
g4x_signal_levels(u8 train_set)
static u32 g4x_signal_levels(u8 train_set)
{
u32 signal_levels = 0;
@@ -4271,12 +4215,31 @@ g4x_signal_levels(u8 train_set)
return signal_levels;
}
/* SNB CPU eDP voltage swing and pre-emphasis control */
static u32
snb_cpu_edp_signal_levels(u8 train_set)
static void
g4x_set_signal_levels(struct intel_dp *intel_dp)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
signal_levels = g4x_signal_levels(train_set);
drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
intel_dp->DP |= signal_levels;
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
/* SNB CPU eDP voltage swing and pre-emphasis control */
static u32 snb_cpu_edp_signal_levels(u8 train_set)
{
u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -4299,12 +4262,31 @@ snb_cpu_edp_signal_levels(u8 train_set)
}
}
/* IVB CPU eDP voltage swing and pre-emphasis control */
static u32
ivb_cpu_edp_signal_levels(u8 train_set)
static void
snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
signal_levels = snb_cpu_edp_signal_levels(train_set);
drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
intel_dp->DP |= signal_levels;
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
/* IVB CPU eDP voltage swing and pre-emphasis control */
static u32 ivb_cpu_edp_signal_levels(u8 train_set)
{
u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return EDP_LINK_TRAIN_400MV_0DB_IVB;
@@ -4330,38 +4312,29 @@ ivb_cpu_edp_signal_levels(u8 train_set)
}
}
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp)
static void
ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
u32 signal_levels, mask = 0;
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
signal_levels = bxt_signal_levels(intel_dp);
} else if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev_priv)) {
signal_levels = chv_signal_levels(intel_dp);
} else if (IS_VALLEYVIEW(dev_priv)) {
signal_levels = vlv_signal_levels(intel_dp);
} else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
signal_levels = ivb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
} else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
signal_levels = snb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
signal_levels = g4x_signal_levels(train_set);
mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
}
signal_levels = ivb_cpu_edp_signal_levels(train_set);
if (mask)
drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
signal_levels);
drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
intel_dp->DP |= signal_levels;
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
void intel_dp_set_signal_levels(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 train_set = intel_dp->train_set[0];
drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
@@ -4372,55 +4345,28 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
" (max)" : "");
intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
intel_dp->set_signal_levels(intel_dp);
}
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
if (dp_train_pat & train_pat_mask)
drm_dbg_kms(&dev_priv->drm,
"Using DP training pattern TPS%d\n",
dp_train_pat & train_pat_mask);
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
intel_dp->set_link_train(intel_dp, dp_train_pat);
}
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
u32 val;
if (!HAS_DDI(dev_priv))
return;
val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
/*
* Until TGL on PORT_A we can have only eDP in SST mode. There the only
* reason we need to set idle transmission mode is to work around a HW
* issue where we enable the pipe while not in idle link-training mode.
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
return;
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_IDLE_DONE, 1))
drm_err(&dev_priv->drm,
"Timed out waiting for DP idle patterns\n");
if (intel_dp->set_idle_link_train)
intel_dp->set_idle_link_train(intel_dp);
}
static void
@@ -5567,7 +5513,7 @@ void intel_dp_process_phy_request(struct intel_dp *intel_dp)
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
{
u8 test_result = DP_TEST_NAK;
u8 test_result;
test_result = intel_dp_prepare_phytest(intel_dp);
if (test_result != DP_TEST_ACK)
@@ -5629,61 +5575,51 @@ static int
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
bool bret;
bool need_retrain = false;
if (intel_dp->is_mst) {
u8 esi[DP_DPRX_ESI_LEN] = { 0 };
int ret = 0;
if (!intel_dp->is_mst)
return -EINVAL;
WARN_ON_ONCE(intel_dp->active_mst_links < 0);
for (;;) {
u8 esi[DP_DPRX_ESI_LEN] = {};
bool bret, handled;
int retry;
bool handled;
WARN_ON_ONCE(intel_dp->active_mst_links < 0);
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
go_again:
if (bret == true) {
/* check link status - esi[10] = 0x200c */
if (intel_dp->active_mst_links > 0 &&
!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
drm_dbg_kms(&i915->drm,
"channel EQ not ok, retraining\n");
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
if (handled) {
for (retry = 0; retry < 3; retry++) {
int wret;
wret = drm_dp_dpcd_write(&intel_dp->aux,
DP_SINK_COUNT_ESI+1,
&esi[1], 3);
if (wret == 3) {
break;
}
}
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
if (bret == true) {
drm_dbg_kms(&i915->drm,
"got esi2 %3ph\n", esi);
goto go_again;
}
} else
ret = 0;
return ret;
} else {
if (!bret) {
drm_dbg_kms(&i915->drm,
"failed to get ESI - device may have failed\n");
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
return -EINVAL;
}
/* check link status - esi[10] = 0x200c */
if (intel_dp->active_mst_links > 0 && !need_retrain &&
!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
drm_dbg_kms(&i915->drm,
"channel EQ not ok, retraining\n");
need_retrain = true;
}
drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
if (!handled)
break;
for (retry = 0; retry < 3; retry++) {
int wret;
wret = drm_dp_dpcd_write(&intel_dp->aux,
DP_SINK_COUNT_ESI+1,
&esi[1], 3);
if (wret == 3)
break;
}
}
return -EINVAL;
return need_retrain;
}
static bool
@@ -5720,20 +5656,102 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
}
static bool intel_dp_has_connector(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_encoder *encoder;
enum pipe pipe;
if (!conn_state->best_encoder)
return false;
/* SST */
encoder = &dp_to_dig_port(intel_dp)->base;
if (conn_state->best_encoder == &encoder->base)
return true;
/* MST */
for_each_pipe(i915, pipe) {
encoder = &intel_dp->mst_encoders[pipe]->base;
if (conn_state->best_encoder == &encoder->base)
return true;
}
return false;
}
static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
struct drm_modeset_acquire_ctx *ctx,
u32 *crtc_mask)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
int ret = 0;
*crtc_mask = 0;
if (!intel_dp_needs_link_retrain(intel_dp))
return 0;
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state =
connector->base.state;
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
if (!intel_dp_has_connector(intel_dp, conn_state))
continue;
crtc = to_intel_crtc(conn_state->crtc);
if (!crtc)
continue;
ret = drm_modeset_lock(&crtc->base.mutex, ctx);
if (ret)
break;
crtc_state = to_intel_crtc_state(crtc->base.state);
drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
if (!crtc_state->hw.active)
continue;
if (conn_state->commit &&
!try_wait_for_completion(&conn_state->commit->hw_done))
continue;
*crtc_mask |= drm_crtc_mask(&crtc->base);
}
drm_connector_list_iter_end(&conn_iter);
if (!intel_dp_needs_link_retrain(intel_dp))
*crtc_mask = 0;
return ret;
}
static bool intel_dp_is_connected(struct intel_dp *intel_dp)
{
struct intel_connector *connector = intel_dp->attached_connector;
return connector->base.status == connector_status_connected ||
intel_dp->is_mst;
}
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_connector *connector = intel_dp->attached_connector;
struct drm_connector_state *conn_state;
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
u32 crtc_mask;
int ret;
/* FIXME handle the MST connectors as well */
if (!connector || connector->base.status != connector_status_connected)
if (!intel_dp_is_connected(intel_dp))
return 0;
ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
@@ -5741,46 +5759,42 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
if (ret)
return ret;
conn_state = connector->base.state;
crtc = to_intel_crtc(conn_state->crtc);
if (!crtc)
return 0;
ret = drm_modeset_lock(&crtc->base.mutex, ctx);
ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
if (ret)
return ret;
crtc_state = to_intel_crtc_state(crtc->base.state);
drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state));
if (!crtc_state->hw.active)
if (crtc_mask == 0)
return 0;
if (conn_state->commit &&
!try_wait_for_completion(&conn_state->commit->hw_done))
return 0;
drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
encoder->base.base.id, encoder->base.name);
if (!intel_dp_needs_link_retrain(intel_dp))
return 0;
for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
/* Suppress underruns caused by re-training */
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
if (crtc_state->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), false);
/* Suppress underruns caused by re-training */
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
if (crtc_state->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), false);
}
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
/* Keep underrun reporting disabled until things are stable */
intel_wait_for_vblank(dev_priv, crtc->pipe);
for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (crtc_state->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), true);
/* Keep underrun reporting disabled until things are stable */
intel_wait_for_vblank(dev_priv, crtc->pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (crtc_state->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), true);
}
return 0;
}
@@ -6451,8 +6465,6 @@ intel_dp_connector_register(struct drm_connector *connector)
if (ret)
return ret;
intel_connector_debugfs_add(connector);
drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
intel_dp->aux.name, connector->kdev->kobj.name);
@@ -6833,9 +6845,9 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
0, 0 },
};
static inline
int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
u8 *rx_status)
static int
intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
u8 *rx_status)
{
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
@@ -7424,7 +7436,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
}
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
switch (intel_dp_check_mst_status(intel_dp)) {
case -EINVAL:
/*
* If we were in MST mode, and device is not
* there, get out of MST mode
@@ -7438,6 +7451,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_dp->is_mst);
return IRQ_NONE;
case 1:
return IRQ_NONE;
default:
break;
}
}
@@ -8468,8 +8485,27 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->post_disable = g4x_post_disable_dp;
}
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A))
intel_dig_port->dp.set_link_train = cpt_set_link_train;
else
intel_dig_port->dp.set_link_train = g4x_set_link_train;
if (IS_CHERRYVIEW(dev_priv))
intel_dig_port->dp.set_signal_levels = chv_set_signal_levels;
else if (IS_VALLEYVIEW(dev_priv))
intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels;
else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
else if (IS_GEN(dev_priv, 6) && port == PORT_A)
intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
else
intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels;
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
intel_encoder->type = INTEL_OUTPUT_DP;
intel_encoder->power_domain = intel_port_to_power_domain(port);

View File

@@ -358,6 +358,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
*/
if (i915->vbt.backlight.type !=
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
i915_modparams.enable_dpcd_backlight != 1 &&
!drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
drm_info(&i915->drm,

View File

@@ -489,7 +489,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
* here for the following ones.
*/
if (INTEL_GEN(dev_priv) < 12 || !first_mst_stream)
intel_ddi_enable_pipe_clock(pipe_config);
intel_ddi_enable_pipe_clock(encoder, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
@@ -508,6 +508,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
intel_ddi_enable_transcoder_func(encoder, pipe_config);
intel_enable_pipe(pipe_config);
intel_crtc_vblank_on(pipe_config);

View File

@@ -80,7 +80,7 @@ intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
if (!state->dpll_set) {
state->dpll_set = true;
@@ -979,7 +979,7 @@ hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(crtc_state->port_clock / 2 != 135000))
if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
return NULL;
crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
@@ -1616,7 +1616,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
ref_clock / 0x8000;
if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
return 0;
return dco_freq / (p0 * p1 * p2 * 5);
@@ -2074,7 +2074,7 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
clk_div->p1 = best_clock.p1;
clk_div->p2 = best_clock.p2;
WARN_ON(best_clock.m1 != 2);
drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
clk_div->n = best_clock.n;
clk_div->m2_int = best_clock.m2 >> 22;
clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);

View File

@@ -34,7 +34,7 @@
#define DSB_BYTE_EN_SHIFT 20
#define DSB_REG_VALUE_MASK 0xfffff
static inline bool is_dsb_busy(struct intel_dsb *dsb)
static bool is_dsb_busy(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -43,7 +43,7 @@ static inline bool is_dsb_busy(struct intel_dsb *dsb)
return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
}
static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
static bool intel_dsb_enable_engine(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -63,7 +63,7 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
return true;
}
static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
static bool intel_dsb_disable_engine(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);

View File

@@ -121,7 +121,7 @@ struct i2c_adapter_lookup {
#define ICL_GPIO_DDPA_CTRLCLK_2 8
#define ICL_GPIO_DDPA_CTRLDATA_2 9
static inline enum port intel_dsi_seq_port_to_port(u8 port)
static enum port intel_dsi_seq_port_to_port(u8 port)
{
return port ? PORT_C : PORT_A;
}

View File

@@ -485,9 +485,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
if (!ret)
goto err_llb;
else if (ret > 1) {
drm_info(&dev_priv->drm,
"Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
drm_info_once(&dev_priv->drm,
"Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
}
fbc->threshold = ret;

View File

@@ -302,12 +302,14 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
BITS_PER_TYPE(atomic_t));
if (old) {
WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
drm_WARN_ON(old->obj->base.dev,
!(atomic_read(&old->bits) & frontbuffer_bits));
atomic_andnot(frontbuffer_bits, &old->bits);
}
if (new) {
WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
drm_WARN_ON(new->obj->base.dev,
atomic_read(&new->bits) & frontbuffer_bits);
atomic_or(frontbuffer_bits, &new->bits);
}
}

View File

@@ -64,7 +64,7 @@ static void assert_global_state_read_locked(struct intel_atomic_state *state)
return;
}
WARN(1, "Global state not read locked\n");
drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n");
}
struct intel_global_state *
@@ -148,7 +148,7 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state)
for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
new_obj_state, i) {
WARN_ON(obj->state != old_obj_state);
drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state);
/*
* If the new state wasn't modified (and properly

View File

@@ -379,8 +379,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
return ret;
}
static inline
unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
static unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
{
return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
GMBUS_BYTE_COUNT_MAX;

View File

@@ -109,18 +109,16 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
return capable;
}
static inline
bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
{
return intel_de_read(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
HDCP_STATUS_ENC;
}
static inline
bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
{
return intel_de_read(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
@@ -853,8 +851,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
return ret;
}
static inline
struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
{
return container_of(hdcp, struct intel_connector, hdcp);
}
@@ -1856,8 +1853,7 @@ static const struct component_ops i915_hdcp_component_ops = {
.unbind = i915_hdcp_component_unbind,
};
static inline
enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
{
switch (port) {
case PORT_A:
@@ -1869,8 +1865,7 @@ enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
}
}
static inline
enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
{
switch (cpu_transcoder) {
case TRANSCODER_A ... TRANSCODER_D:
@@ -1880,8 +1875,8 @@ enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
}
}
static inline int initialize_hdcp_port_data(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
static int initialize_hdcp_port_data(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;

View File

@@ -44,7 +44,6 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
@@ -1615,10 +1614,10 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
return -EINVAL;
}
static inline
int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
u8 msg_id, bool *msg_ready,
ssize_t *msg_sz)
static int
hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
u8 msg_id, bool *msg_ready,
ssize_t *msg_sz)
{
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
@@ -1751,12 +1750,6 @@ int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
return ret;
}
static inline
enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void)
{
return HDCP_PROTOCOL_HDMI;
}
static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
.write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
.read_bksv = intel_hdmi_hdcp_read_bksv,
@@ -2328,32 +2321,27 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
return true;
}
static bool
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
struct intel_crtc_state *config)
static int
intel_hdmi_ycbcr420_config(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_connector *connector = conn_state->connector;
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
if (!drm_mode_is_420_only(&connector->display_info, adjusted_mode))
return 0;
if (!connector->ycbcr_420_allowed) {
drm_err(&i915->drm,
"Platform doesn't support YCBCR420 output\n");
return false;
return -EINVAL;
}
config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
/* YCBCR 420 output conversion needs a scaler */
if (skl_update_scaler_crtc(config)) {
drm_dbg_kms(&i915->drm,
"Scaler allocation for output failed\n");
return false;
}
intel_pch_panel_fitting(intel_crtc, config,
DRM_MODE_SCALE_FULLSCREEN);
return true;
return intel_pch_panel_fitting(crtc_state, conn_state);
}
static int intel_hdmi_port_clock(int clock, int bpc)
@@ -2481,13 +2469,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
pipe_config->pixel_multiplier = 2;
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
drm_err(&dev_priv->drm,
"Can't support YCBCR420 output\n");
return -EINVAL;
}
}
ret = intel_hdmi_ycbcr420_config(pipe_config, conn_state);
if (ret)
return ret;
pipe_config->limited_color_range =
intel_hdmi_limited_color_range(pipe_config, conn_state);
@@ -2878,8 +2862,6 @@ intel_hdmi_connector_register(struct drm_connector *connector)
if (ret)
return ret;
intel_connector_debugfs_add(connector);
intel_hdmi_create_i2c_symlink(connector);
return ret;

View File

@@ -403,6 +403,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
unsigned int lvds_bpp;
int ret;
/* Should never happen!! */
if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
@@ -436,16 +437,15 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
if (HAS_PCH_SPLIT(dev_priv)) {
if (HAS_PCH_SPLIT(dev_priv))
pipe_config->has_pch_encoder = true;
intel_pch_panel_fitting(intel_crtc, pipe_config,
conn_state->scaling_mode);
} else {
intel_gmch_panel_fitting(intel_crtc, pipe_config,
conn_state->scaling_mode);
}
if (HAS_GMCH(dev_priv))
ret = intel_gmch_panel_fitting(pipe_config, conn_state);
else
ret = intel_pch_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
/*
* XXX: It would be nice to support lower refresh rates on the

View File

@@ -281,7 +281,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
enum pipe pipe = overlay->crtc->pipe;
struct intel_frontbuffer *from = NULL, *to = NULL;
WARN_ON(overlay->old_vma);
drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
if (overlay->vma)
from = intel_frontbuffer_get(overlay->vma->obj);
@@ -350,7 +350,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
struct i915_vma *vma;
vma = fetch_and_zero(&overlay->old_vma);
if (WARN_ON(!vma))
if (drm_WARN_ON(&overlay->i915->drm, !vma))
return;
intel_frontbuffer_flip_complete(overlay->i915,
@@ -396,7 +396,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
struct i915_request *rq;
u32 *cs, flip_addr = overlay->flip_addr;
WARN_ON(!overlay->active);
drm_WARN_ON(&overlay->i915->drm, !overlay->active);
/* According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether

View File

@@ -176,24 +176,23 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector)
}
/* adjusted_mode has been preset to be the panel's fixed mode */
void
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode)
int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int x = 0, y = 0, width = 0, height = 0;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int x, y, width, height;
/* Native modes don't need fitting */
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
goto done;
if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w &&
adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
return 0;
switch (fitting_mode) {
switch (conn_state->scaling_mode) {
case DRM_MODE_SCALE_CENTER:
width = pipe_config->pipe_src_w;
height = pipe_config->pipe_src_h;
width = crtc_state->pipe_src_w;
height = crtc_state->pipe_src_h;
x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
break;
@@ -202,18 +201,18 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
/* Scale but preserve the aspect ratio */
{
u32 scaled_width = adjusted_mode->crtc_hdisplay
* pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w
* crtc_state->pipe_src_h;
u32 scaled_height = crtc_state->pipe_src_w
* adjusted_mode->crtc_vdisplay;
if (scaled_width > scaled_height) { /* pillar */
width = scaled_height / pipe_config->pipe_src_h;
width = scaled_height / crtc_state->pipe_src_h;
if (width & 1)
width++;
x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->crtc_vdisplay;
} else if (scaled_width < scaled_height) { /* letter */
height = scaled_width / pipe_config->pipe_src_w;
height = scaled_width / crtc_state->pipe_src_w;
if (height & 1)
height++;
y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
@@ -227,6 +226,10 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
}
break;
case DRM_MODE_SCALE_NONE:
WARN_ON(adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w);
WARN_ON(adjusted_mode->crtc_vdisplay != crtc_state->pipe_src_h);
/* fall through */
case DRM_MODE_SCALE_FULLSCREEN:
x = y = 0;
width = adjusted_mode->crtc_hdisplay;
@@ -234,14 +237,15 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
break;
default:
WARN(1, "bad panel fit mode: %d\n", fitting_mode);
return;
MISSING_CASE(conn_state->scaling_mode);
return -EINVAL;
}
done:
pipe_config->pch_pfit.pos = (x << 16) | y;
pipe_config->pch_pfit.size = (width << 16) | height;
pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
drm_rect_init(&crtc_state->pch_pfit.dst,
x, y, width, height);
crtc_state->pch_pfit.enabled = true;
return 0;
}
static void
@@ -287,7 +291,7 @@ centre_vertically(struct drm_display_mode *adjusted_mode,
adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width;
}
static inline u32 panel_fitter_scaling(u32 source, u32 target)
static u32 panel_fitter_scaling(u32 source, u32 target)
{
/*
* Floating point operation is not supported. So the FACTOR
@@ -300,13 +304,14 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
static void i965_scale_aspect(struct intel_crtc_state *crtc_state,
u32 *pfit_control)
{
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
crtc_state->pipe_src_h;
u32 scaled_height = crtc_state->pipe_src_w *
adjusted_mode->crtc_vdisplay;
/* 965+ is easy, it does everything in hw */
@@ -316,18 +321,18 @@ static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
else if (scaled_width < scaled_height)
*pfit_control |= PFIT_ENABLE |
PFIT_SCALING_LETTER;
else if (adjusted_mode->crtc_hdisplay != pipe_config->pipe_src_w)
else if (adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w)
*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
}
static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
u32 *pfit_control, u32 *pfit_pgm_ratios,
u32 *border)
{
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
crtc_state->pipe_src_h;
u32 scaled_height = crtc_state->pipe_src_w *
adjusted_mode->crtc_vdisplay;
u32 bits;
@@ -339,11 +344,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
if (scaled_width > scaled_height) { /* pillar */
centre_horizontally(adjusted_mode,
scaled_height /
pipe_config->pipe_src_h);
crtc_state->pipe_src_h);
*border = LVDS_BORDER_ENABLE;
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay) {
bits = panel_fitter_scaling(pipe_config->pipe_src_h,
if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay) {
bits = panel_fitter_scaling(crtc_state->pipe_src_h,
adjusted_mode->crtc_vdisplay);
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
@@ -355,11 +360,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
} else if (scaled_width < scaled_height) { /* letter */
centre_vertically(adjusted_mode,
scaled_width /
pipe_config->pipe_src_w);
crtc_state->pipe_src_w);
*border = LVDS_BORDER_ENABLE;
if (pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
bits = panel_fitter_scaling(pipe_config->pipe_src_w,
if (crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
bits = panel_fitter_scaling(crtc_state->pipe_src_w,
adjusted_mode->crtc_hdisplay);
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
@@ -377,35 +382,35 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
}
}
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode)
int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
/* Native modes don't need fitting */
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w &&
adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h)
goto out;
switch (fitting_mode) {
switch (conn_state->scaling_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
centre_horizontally(adjusted_mode, crtc_state->pipe_src_w);
centre_vertically(adjusted_mode, crtc_state->pipe_src_h);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
if (INTEL_GEN(dev_priv) >= 4)
i965_scale_aspect(pipe_config, &pfit_control);
i965_scale_aspect(crtc_state, &pfit_control);
else
i9xx_scale_aspect(pipe_config, &pfit_control,
i9xx_scale_aspect(crtc_state, &pfit_control,
&pfit_pgm_ratios, &border);
break;
case DRM_MODE_SCALE_FULLSCREEN:
@@ -413,8 +418,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay ||
crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= PFIT_SCALING_AUTO;
@@ -426,15 +431,14 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
}
break;
default:
drm_WARN(&dev_priv->drm, 1, "bad panel fit mode: %d\n",
fitting_mode);
return;
MISSING_CASE(conn_state->scaling_mode);
return -EINVAL;
}
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= PFIT_PIPE(intel_crtc->pipe) | PFIT_FILTER_FUZZY;
pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY;
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
@@ -443,12 +447,14 @@ out:
}
/* Make sure pre-965 set dither correctly for 18bpp panels. */
if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18)
if (INTEL_GEN(dev_priv) < 4 && crtc_state->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
pipe_config->gmch_pfit.control = pfit_control;
pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
pipe_config->gmch_pfit.lvds_border_bits = border;
crtc_state->gmch_pfit.control = pfit_control;
crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
crtc_state->gmch_pfit.lvds_border_bits = border;
return 0;
}
/**
@@ -484,8 +490,8 @@ static u32 scale(u32 source_val,
}
/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
static inline u32 scale_user_to_hw(struct intel_connector *connector,
u32 user_level, u32 user_max)
static u32 scale_user_to_hw(struct intel_connector *connector,
u32 user_level, u32 user_max)
{
struct intel_panel *panel = &connector->panel;
@@ -495,8 +501,8 @@ static inline u32 scale_user_to_hw(struct intel_connector *connector,
/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
* to [hw_min..hw_max]. */
static inline u32 clamp_user_to_hw(struct intel_connector *connector,
u32 user_level, u32 user_max)
static u32 clamp_user_to_hw(struct intel_connector *connector,
u32 user_level, u32 user_max)
{
struct intel_panel *panel = &connector->panel;
u32 hw_level;
@@ -508,8 +514,8 @@ static inline u32 clamp_user_to_hw(struct intel_connector *connector,
}
/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
static inline u32 scale_hw_to_user(struct intel_connector *connector,
u32 hw_level, u32 user_max)
static u32 scale_hw_to_user(struct intel_connector *connector,
u32 hw_level, u32 user_max)
{
struct intel_panel *panel = &connector->panel;

View File

@@ -25,12 +25,10 @@ int intel_panel_init(struct intel_panel *panel,
void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
void intel_pch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode);
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode);
int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
u32 level, u32 max);
int intel_panel_setup_backlight(struct drm_connector *connector,

View File

@@ -34,6 +34,7 @@ tc_port_load_fia_params(struct drm_i915_private *i915,
if (INTEL_INFO(i915)->display.has_modular_fia) {
modular_fia = intel_uncore_read(&i915->uncore,
PORT_TX_DFLEXDPSP(FIA1));
drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff);
modular_fia &= MODULAR_FIA_MASK;
} else {
modular_fia = 0;
@@ -52,6 +53,62 @@ tc_port_load_fia_params(struct drm_i915_private *i915,
}
}
static enum intel_display_power_domain
tc_cold_get_power_domain(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
if (INTEL_GEN(i915) == 11)
return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
else
return POWER_DOMAIN_TC_COLD_OFF;
}
static intel_wakeref_t
tc_cold_block(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum intel_display_power_domain domain;
if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
return 0;
domain = tc_cold_get_power_domain(dig_port);
return intel_display_power_get(i915, domain);
}
static void
tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum intel_display_power_domain domain;
/*
* wakeref == -1, means some error happened saving save_depot_stack but
* power should still be put down and 0 is a invalid save_depot_stack
* id so can be used to skip it for non TC legacy ports.
*/
if (wakeref == 0)
return;
domain = tc_cold_get_power_domain(dig_port);
intel_display_power_put_async(i915, domain, wakeref);
}
static void
assert_tc_cold_blocked(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
bool enabled;
if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
return;
enabled = intel_display_power_is_enabled(i915,
tc_cold_get_power_domain(dig_port));
drm_WARN_ON(&i915->drm, !enabled);
}
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -62,6 +119,7 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
assert_tc_cold_blocked(dig_port);
lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -77,6 +135,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
assert_tc_cold_blocked(dig_port);
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -91,6 +150,8 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
if (dig_port->tc_mode != TC_PORT_DP_ALT)
return 4;
assert_tc_cold_blocked(dig_port);
lane_mask = 0;
with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
lane_mask = intel_tc_port_get_lane_mask(dig_port);
@@ -123,6 +184,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
drm_WARN_ON(&i915->drm,
lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
assert_tc_cold_blocked(dig_port);
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
@@ -420,9 +483,14 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
intel_display_power_flush_work(i915);
drm_WARN_ON(&i915->drm,
intel_display_power_is_enabled(i915,
intel_aux_power_domain(dig_port)));
if (INTEL_GEN(i915) != 11 || !dig_port->tc_legacy_port) {
enum intel_display_power_domain aux_domain;
bool aux_powered;
aux_domain = intel_aux_power_domain(dig_port);
aux_powered = intel_display_power_is_enabled(i915, aux_domain);
drm_WARN_ON(&i915->drm, aux_powered);
}
icl_tc_phy_disconnect(dig_port);
icl_tc_phy_connect(dig_port, required_lanes);
@@ -445,9 +513,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_encoder *encoder = &dig_port->base;
intel_wakeref_t tc_cold_wref;
int active_links = 0;
mutex_lock(&dig_port->tc_lock);
tc_cold_wref = tc_cold_block(dig_port);
dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
if (dig_port->dp.is_mst)
@@ -473,6 +543,7 @@ out:
dig_port->tc_port_name,
tc_port_mode_name(dig_port->tc_mode));
tc_cold_unblock(dig_port, tc_cold_wref);
mutex_unlock(&dig_port->tc_lock);
}
@@ -494,10 +565,15 @@ static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
bool intel_tc_port_connected(struct intel_digital_port *dig_port)
{
bool is_connected;
intel_wakeref_t tc_cold_wref;
intel_tc_port_lock(dig_port);
tc_cold_wref = tc_cold_block(dig_port);
is_connected = tc_port_live_status_mask(dig_port) &
BIT(dig_port->tc_mode);
tc_cold_unblock(dig_port, tc_cold_wref);
intel_tc_port_unlock(dig_port);
return is_connected;
@@ -513,9 +589,16 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
mutex_lock(&dig_port->tc_lock);
if (!dig_port->tc_link_refcount &&
intel_tc_port_needs_reset(dig_port))
intel_tc_port_reset_mode(dig_port, required_lanes);
if (!dig_port->tc_link_refcount) {
intel_wakeref_t tc_cold_wref;
tc_cold_wref = tc_cold_block(dig_port);
if (intel_tc_port_needs_reset(dig_port))
intel_tc_port_reset_mode(dig_port, required_lanes);
tc_cold_unblock(dig_port, tc_cold_wref);
}
drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
dig_port->tc_lock_wakeref = wakeref;

View File

@@ -267,7 +267,6 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
@@ -279,11 +278,11 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
if (HAS_GMCH(dev_priv))
intel_gmch_panel_fitting(crtc, pipe_config,
conn_state->scaling_mode);
ret = intel_gmch_panel_fitting(pipe_config, conn_state);
else
intel_pch_panel_fitting(crtc, pipe_config,
conn_state->scaling_mode);
ret = intel_pch_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -864,7 +863,7 @@ static void bxt_dsi_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
WARN_ON(crtc_state->has_pch_encoder);
drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
intel_crtc_vblank_on(crtc_state);
}