BackMerge v5.1-rc5 into drm-next
Need rc5 for udl fix to add udl cleanups on top. Signed-off-by: Dave Airlie <airlied@redhat.com>
这个提交包含在:
@@ -1437,7 +1437,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
|
||||
}
|
||||
|
||||
if (index_mode) {
|
||||
if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
|
||||
if (guest_gma >= I915_GTT_PAGE_SIZE) {
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
@@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
|
||||
/**
|
||||
* intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
|
||||
* @vgpu: a vGPU
|
||||
* @conncted: link state
|
||||
* @connected: link state
|
||||
*
|
||||
* This function is used to trigger hotplug interrupt for vGPU
|
||||
*
|
||||
|
@@ -209,7 +209,7 @@ static int vgpu_get_plane_info(struct drm_device *dev,
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_vgpu_primary_plane_format p;
|
||||
struct intel_vgpu_cursor_plane_format c;
|
||||
int ret;
|
||||
int ret, tile_height = 1;
|
||||
|
||||
if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
|
||||
ret = intel_vgpu_decode_primary_plane(vgpu, &p);
|
||||
@@ -228,19 +228,19 @@ static int vgpu_get_plane_info(struct drm_device *dev,
|
||||
break;
|
||||
case PLANE_CTL_TILED_X:
|
||||
info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
|
||||
tile_height = 8;
|
||||
break;
|
||||
case PLANE_CTL_TILED_Y:
|
||||
info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
|
||||
tile_height = 32;
|
||||
break;
|
||||
case PLANE_CTL_TILED_YF:
|
||||
info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
|
||||
tile_height = 32;
|
||||
break;
|
||||
default:
|
||||
gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
|
||||
}
|
||||
|
||||
info->size = (((p.stride * p.height * p.bpp) / 8) +
|
||||
(PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
|
||||
ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
|
||||
if (ret)
|
||||
@@ -262,14 +262,13 @@ static int vgpu_get_plane_info(struct drm_device *dev,
|
||||
info->x_hot = UINT_MAX;
|
||||
info->y_hot = UINT_MAX;
|
||||
}
|
||||
|
||||
info->size = (((info->stride * c.height * c.bpp) / 8)
|
||||
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
} else {
|
||||
gvt_vgpu_err("invalid plane id:%d\n", plane_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
info->size = (info->stride * roundup(info->height, tile_height)
|
||||
+ PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
if (info->size == 0) {
|
||||
gvt_vgpu_err("fb size is zero\n");
|
||||
return -EINVAL;
|
||||
|
@@ -750,14 +750,20 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
|
||||
|
||||
static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_vgpu_ppgtt_spt *spt;
|
||||
struct intel_vgpu_ppgtt_spt *spt, *spn;
|
||||
struct radix_tree_iter iter;
|
||||
void **slot;
|
||||
LIST_HEAD(all_spt);
|
||||
void __rcu **slot;
|
||||
|
||||
rcu_read_lock();
|
||||
radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
|
||||
spt = radix_tree_deref_slot(slot);
|
||||
ppgtt_free_spt(spt);
|
||||
list_move(&spt->post_shadow_list, &all_spt);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
|
||||
ppgtt_free_spt(spt);
|
||||
}
|
||||
|
||||
static int ppgtt_handle_guest_write_page_table_bytes(
|
||||
@@ -1882,7 +1888,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
}
|
||||
|
||||
list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
|
||||
|
||||
mutex_lock(&gvt->gtt.ppgtt_mm_lock);
|
||||
list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
|
||||
mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
|
||||
|
||||
return mm;
|
||||
}
|
||||
|
||||
@@ -1942,7 +1952,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
|
||||
*/
|
||||
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
|
||||
{
|
||||
atomic_dec(&mm->pincount);
|
||||
atomic_dec_if_positive(&mm->pincount);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1967,9 +1977,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
|
||||
list_move_tail(&mm->ppgtt_mm.lru_list,
|
||||
&mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
|
||||
|
||||
mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1980,6 +1991,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
|
||||
struct intel_vgpu_mm *mm;
|
||||
struct list_head *pos, *n;
|
||||
|
||||
mutex_lock(&gvt->gtt.ppgtt_mm_lock);
|
||||
|
||||
list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
|
||||
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
|
||||
|
||||
@@ -1987,9 +2000,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
|
||||
continue;
|
||||
|
||||
list_del_init(&mm->ppgtt_mm.lru_list);
|
||||
mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
|
||||
invalidate_ppgtt_mm(mm);
|
||||
return 1;
|
||||
}
|
||||
mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2659,6 +2674,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
}
|
||||
}
|
||||
INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
|
||||
mutex_init(&gvt->gtt.ppgtt_mm_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2699,7 +2715,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
|
||||
list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
|
||||
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
|
||||
if (mm->type == INTEL_GVT_MM_PPGTT) {
|
||||
mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
|
||||
list_del_init(&mm->ppgtt_mm.lru_list);
|
||||
mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
|
||||
if (mm->ppgtt_mm.shadowed)
|
||||
invalidate_ppgtt_mm(mm);
|
||||
}
|
||||
|
@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
|
||||
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
|
||||
struct list_head oos_page_use_list_head;
|
||||
struct list_head oos_page_free_list_head;
|
||||
struct mutex ppgtt_mm_lock;
|
||||
struct list_head ppgtt_mm_lru_list_head;
|
||||
|
||||
struct page *scratch_page;
|
||||
|
@@ -905,7 +905,7 @@ static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
|
||||
static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
|
||||
void *buf, unsigned long count, bool is_write)
|
||||
{
|
||||
void *aperture_va;
|
||||
void __iomem *aperture_va;
|
||||
|
||||
if (!intel_vgpu_in_aperture(vgpu, off) ||
|
||||
!intel_vgpu_in_aperture(vgpu, off + count)) {
|
||||
@@ -920,9 +920,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
|
||||
return -EIO;
|
||||
|
||||
if (is_write)
|
||||
memcpy(aperture_va + offset_in_page(off), buf, count);
|
||||
memcpy_toio(aperture_va + offset_in_page(off), buf, count);
|
||||
else
|
||||
memcpy(buf, aperture_va + offset_in_page(off), count);
|
||||
memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
|
||||
|
||||
io_mapping_unmap(aperture_va);
|
||||
|
||||
|
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
|
||||
int i = 0;
|
||||
|
||||
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
|
||||
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
|
||||
px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
|
||||
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
||||
if (workload->shadow)
|
||||
return 0;
|
||||
|
||||
ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
|
||||
if (ret < 0) {
|
||||
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* pin shadow context by gvt even the shadow context will be pinned
|
||||
* when i915 alloc request. That is because gvt will update the guest
|
||||
* context from shadow context when workload is completed, and at that
|
||||
@@ -677,6 +671,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
|
||||
struct i915_request *rq;
|
||||
int ring_id = workload->ring_id;
|
||||
int ret;
|
||||
|
||||
@@ -686,6 +683,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
|
||||
if (ret < 0) {
|
||||
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
|
||||
goto err_req;
|
||||
}
|
||||
|
||||
ret = intel_gvt_workload_req_alloc(workload);
|
||||
if (ret)
|
||||
goto err_req;
|
||||
@@ -702,6 +705,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||
|
||||
ret = prepare_workload(workload);
|
||||
out:
|
||||
if (ret) {
|
||||
/* We might still need to add request with
|
||||
* clean ctx to retire it properly..
|
||||
*/
|
||||
rq = fetch_and_zero(&workload->req);
|
||||
i915_request_put(rq);
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(workload->req)) {
|
||||
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
|
||||
ring_id, workload->req);
|
||||
@@ -738,7 +749,8 @@ static struct intel_vgpu_workload *pick_next_workload(
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
|
||||
if (!scheduler->current_vgpu->active ||
|
||||
list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
@@ -1473,8 +1485,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
if (ret && (vgpu_is_vm_unhealthy(ret))) {
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
|
||||
if (ret) {
|
||||
if (vgpu_is_vm_unhealthy(ret))
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
|
||||
intel_vgpu_destroy_workload(workload);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
@@ -4786,7 +4786,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
|
||||
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
|
||||
&ctx);
|
||||
if (ret) {
|
||||
ret = -EINTR;
|
||||
if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
|
||||
try_again = true;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
crtc = connector->state->crtc;
|
||||
|
@@ -323,6 +323,21 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
|
||||
}
|
||||
}
|
||||
|
||||
static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
|
||||
struct intel_dsi *intel_dsi)
|
||||
{
|
||||
enum port port;
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
WARN_ON(intel_dsi->io_wakeref[port]);
|
||||
intel_dsi->io_wakeref[port] =
|
||||
intel_display_power_get(dev_priv,
|
||||
port == PORT_A ?
|
||||
POWER_DOMAIN_PORT_DDI_A_IO :
|
||||
POWER_DOMAIN_PORT_DDI_B_IO);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
@@ -336,13 +351,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
|
||||
I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
|
||||
}
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
intel_dsi->io_wakeref[port] =
|
||||
intel_display_power_get(dev_priv,
|
||||
port == PORT_A ?
|
||||
POWER_DOMAIN_PORT_DDI_A_IO :
|
||||
POWER_DOMAIN_PORT_DDI_B_IO);
|
||||
}
|
||||
get_dsi_io_power_domains(dev_priv, intel_dsi);
|
||||
}
|
||||
|
||||
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
|
||||
@@ -589,6 +598,12 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
|
||||
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
|
||||
}
|
||||
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
|
||||
}
|
||||
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
|
||||
|
||||
POSTING_READ(DPCLKA_CFGCR0_ICL);
|
||||
|
||||
mutex_unlock(&dev_priv->dpll_lock);
|
||||
@@ -1119,7 +1134,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
|
||||
DRM_ERROR("DDI port:%c buffer not idle\n",
|
||||
port_name(port));
|
||||
}
|
||||
gen11_dsi_ungate_clocks(encoder);
|
||||
gen11_dsi_gate_clocks(encoder);
|
||||
}
|
||||
|
||||
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
|
||||
@@ -1219,20 +1234,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
u64 domains = 0;
|
||||
enum port port;
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports)
|
||||
if (port == PORT_A)
|
||||
domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
|
||||
else
|
||||
domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
|
||||
|
||||
return domains;
|
||||
get_dsi_io_power_domains(to_i915(encoder->base.dev),
|
||||
enc_to_intel_dsi(&encoder->base));
|
||||
}
|
||||
|
||||
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
|
@@ -2043,12 +2043,11 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
|
||||
intel_aux_power_domain(dig_port);
|
||||
}
|
||||
|
||||
static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port;
|
||||
u64 domains;
|
||||
|
||||
/*
|
||||
* TODO: Add support for MST encoders. Atm, the following should never
|
||||
@@ -2056,10 +2055,10 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
|
||||
* hook.
|
||||
*/
|
||||
if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
dig_port = enc_to_dig_port(&encoder->base);
|
||||
domains = BIT_ULL(dig_port->ddi_io_power_domain);
|
||||
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
|
||||
|
||||
/*
|
||||
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
|
||||
@@ -2067,15 +2066,15 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
|
||||
*/
|
||||
if (intel_crtc_has_dp_encoder(crtc_state) ||
|
||||
intel_port_is_tc(dev_priv, encoder->port))
|
||||
domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
|
||||
intel_display_power_get(dev_priv,
|
||||
intel_ddi_main_link_aux_domain(dig_port));
|
||||
|
||||
/*
|
||||
* VDSC power is needed when DSC is enabled
|
||||
*/
|
||||
if (crtc_state->dsc_params.compression_enable)
|
||||
domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
|
||||
|
||||
return domains;
|
||||
intel_display_power_get(dev_priv,
|
||||
intel_dsc_power_domain(crtc_state));
|
||||
}
|
||||
|
||||
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
|
||||
@@ -2793,10 +2792,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* DSI ports should have their DDI clock ungated when disabled
|
||||
* and gated when enabled.
|
||||
* For DSI we keep the ddi clocks gated
|
||||
* except during enable/disable sequence.
|
||||
*/
|
||||
ddi_clk_needed = !encoder->base.crtc;
|
||||
ddi_clk_needed = false;
|
||||
}
|
||||
|
||||
val = I915_READ(DPCLKA_CFGCR0_ICL);
|
||||
|
@@ -16211,8 +16211,6 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
u64 get_domains;
|
||||
enum intel_display_power_domain domain;
|
||||
struct intel_crtc_state *crtc_state;
|
||||
|
||||
if (!encoder->get_power_domains)
|
||||
@@ -16226,9 +16224,7 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
|
||||
continue;
|
||||
|
||||
crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
|
||||
get_domains = encoder->get_power_domains(encoder, crtc_state);
|
||||
for_each_power_domain(domain, get_domains)
|
||||
intel_display_power_get(dev_priv, domain);
|
||||
encoder->get_power_domains(encoder, crtc_state);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1856,42 +1856,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Optimize link config in order: max bpp, min lanes, min clock */
|
||||
static int
|
||||
intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
const struct link_config_limits *limits)
|
||||
{
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
int bpp, clock, lane_count;
|
||||
int mode_rate, link_clock, link_avail;
|
||||
|
||||
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
|
||||
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
|
||||
bpp);
|
||||
|
||||
for (lane_count = limits->min_lane_count;
|
||||
lane_count <= limits->max_lane_count;
|
||||
lane_count <<= 1) {
|
||||
for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
|
||||
link_clock = intel_dp->common_rates[clock];
|
||||
link_avail = intel_dp_max_data_rate(link_clock,
|
||||
lane_count);
|
||||
|
||||
if (mode_rate <= link_avail) {
|
||||
pipe_config->lane_count = lane_count;
|
||||
pipe_config->pipe_bpp = bpp;
|
||||
pipe_config->port_clock = link_clock;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
|
||||
{
|
||||
int i, num_bpc;
|
||||
@@ -2028,15 +1992,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
||||
limits.min_bpp = 6 * 3;
|
||||
limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
|
||||
|
||||
if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
|
||||
if (intel_dp_is_edp(intel_dp)) {
|
||||
/*
|
||||
* Use the maximum clock and number of lanes the eDP panel
|
||||
* advertizes being capable of. The eDP 1.3 and earlier panels
|
||||
* are generally designed to support only a single clock and
|
||||
* lane configuration, and typically these values correspond to
|
||||
* the native resolution of the panel. With eDP 1.4 rate select
|
||||
* and DSC, this is decreasingly the case, and we need to be
|
||||
* able to select less than maximum link config.
|
||||
* advertizes being capable of. The panels are generally
|
||||
* designed to support only a single clock and lane
|
||||
* configuration, and typically these values correspond to the
|
||||
* native resolution of the panel.
|
||||
*/
|
||||
limits.min_lane_count = limits.max_lane_count;
|
||||
limits.min_clock = limits.max_clock;
|
||||
@@ -2050,22 +2012,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
||||
intel_dp->common_rates[limits.max_clock],
|
||||
limits.max_bpp, adjusted_mode->crtc_clock);
|
||||
|
||||
if (intel_dp_is_edp(intel_dp))
|
||||
/*
|
||||
* Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
|
||||
* section A.1: "It is recommended that the minimum number of
|
||||
* lanes be used, using the minimum link rate allowed for that
|
||||
* lane configuration."
|
||||
*
|
||||
* Note that we use the max clock and lane count for eDP 1.3 and
|
||||
* earlier, and fast vs. wide is irrelevant.
|
||||
*/
|
||||
ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
|
||||
&limits);
|
||||
else
|
||||
/* Optimize for slow and wide. */
|
||||
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
|
||||
&limits);
|
||||
/*
|
||||
* Optimize for slow and wide. This is the place to add alternative
|
||||
* optimization policy.
|
||||
*/
|
||||
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
|
||||
|
||||
/* enable compression if the mode doesn't fit available BW */
|
||||
DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
|
||||
|
@@ -271,10 +271,12 @@ struct intel_encoder {
|
||||
* be set correctly before calling this function. */
|
||||
void (*get_config)(struct intel_encoder *,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
/* Returns a mask of power domains that need to be referenced as part
|
||||
* of the hardware state readout code. */
|
||||
u64 (*get_power_domains)(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
/*
|
||||
* Acquires the power domains needed for an active encoder during
|
||||
* hardware state readout.
|
||||
*/
|
||||
void (*get_power_domains)(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
/*
|
||||
* Called during system suspend after all pending requests for the
|
||||
* encoder are flushed (for example for DP AUX transactions) and
|
||||
|
@@ -256,6 +256,28 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
}
|
||||
|
||||
static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
u32 tmp;
|
||||
|
||||
tmp = I915_READ(PIPEMISC(crtc->pipe));
|
||||
|
||||
switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
|
||||
case PIPEMISC_DITHER_6_BPC:
|
||||
return 18;
|
||||
case PIPEMISC_DITHER_8_BPC:
|
||||
return 24;
|
||||
case PIPEMISC_DITHER_10_BPC:
|
||||
return 30;
|
||||
case PIPEMISC_DITHER_12_BPC:
|
||||
return 36;
|
||||
default:
|
||||
MISSING_CASE(tmp);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
@@ -1082,6 +1104,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
|
||||
bpp = mipi_dsi_pixel_format_to_bpp(
|
||||
pixel_format_from_register_bits(fmt));
|
||||
|
||||
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
|
||||
|
||||
/* Enable Frame time stamo based scanline reporting */
|
||||
adjusted_mode->private_flags |=
|
||||
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
|
||||
|
在新工单中引用
屏蔽一个用户