diff options
Diffstat (limited to 'drivers/gpu')
60 files changed, 1397 insertions, 389 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 1c9ed4c52760..789dce9e2608 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -19,7 +19,7 @@ subdir-ccflags-y += -Wno-type-limits subdir-ccflags-y += -Wno-missing-field-initializers subdir-ccflags-y += -Wno-sign-compare subdir-ccflags-y += -Wno-shift-negative-value -subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) +subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable) subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror @@ -269,6 +269,7 @@ i915-y += \ display/intel_pch_display.o \ display/intel_pch_refclk.o \ display/intel_plane_initial.o \ + display/intel_pmdemand.o \ display/intel_psr.o \ display/intel_quirks.o \ display/intel_sprite.o \ diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 616654adbfb8..b10488324457 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -1033,10 +1033,13 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, DSPLINOFF(i9xx_plane)); base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & DISP_ADDR_MASK; } else { + offset = 0; base = intel_de_read(dev_priv, DSPADDR(i9xx_plane)); } plane_config->base = base; + drm_WARN_ON(&dev_priv->drm, offset != 0); + val = intel_de_read(dev_priv, PIPESRC(pipe)); fb->width = REG_FIELD_GET(PIPESRC_WIDTH_MASK, val) + 1; fb->height = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, val) + 1; diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 4125ee07a271..7d9578ebae55 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -722,7 +722,7 @@ skl_next_plane_to_commit(struct intel_atomic_state *state, { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct intel_plane_state *plane_state; + struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; int i; diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 597d5816ad1b..bef96db62c80 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -182,7 +182,7 @@ static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv, val2 = intel_uncore_read(&dev_priv->uncore, MTL_MEM_SS_INFO_QGV_POINT_HIGH(point)); dclk = REG_FIELD_GET(MTL_DCLK_MASK, val); - sp->dclk = DIV_ROUND_UP((16667 * dclk), 1000); + sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000); sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val); sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val); @@ -379,7 +379,7 @@ static const struct intel_sa_info mtl_sa_info = { .deburst = 32, .deprogbwlimit = 38, /* GB/s */ .displayrtids = 256, - .derating = 20, + .derating = 10, }; static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) @@ -534,10 +534,14 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel bi->deratedbw[j] = min(maxdebw, bw * (100 - sa->derating) / 100); + bi->peakbw[j] = DIV_ROUND_CLOSEST(sp->dclk * + num_channels * + qi.channel_width, 8); drm_dbg_kms(&dev_priv->drm, - "BW%d / QGV %d: num_planes=%d deratedbw=%u\n", - i, j, bi->num_planes, bi->deratedbw[j]); + "BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n", + i, j, bi->num_planes, bi->deratedbw[j], + bi->peakbw[j]); } for (j = 0; j < qi.num_psf_points; j++) { @@ -589,8 +593,8 @@ static void dg2_get_bw_info(struct drm_i915_private *i915) i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; } -static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, - int num_planes, int qgv_point) +static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv, + int num_planes, int qgv_point) { int i; @@ -611,14 +615,14 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, return UINT_MAX; if (num_planes >= bi->num_planes) - return bi->deratedbw[qgv_point]; + return i; } - return 0; + return UINT_MAX; } -static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv, - int num_planes, int qgv_point) +static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv, + int num_planes, int qgv_point) { int i; @@ -639,10 +643,10 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv, return UINT_MAX; if (num_planes <= bi->num_planes) - return bi->deratedbw[qgv_point]; + return i; } - return dev_priv->display.bw.max[0].deratedbw[qgv_point]; + return 0; } static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv, @@ -799,6 +803,210 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state) return to_intel_bw_state(bw_state); } +static int mtl_find_qgv_points(struct drm_i915_private *i915, + unsigned int data_rate, + unsigned int num_active_planes, + struct intel_bw_state *new_bw_state) +{ + unsigned int best_rate = UINT_MAX; + unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + unsigned int qgv_peak_bw = 0; + int i; + int ret; + + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + + /* + * If SAGV cannot be enabled, disable the pcode SAGV by passing all 1's + * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is + * not enabled. PM Demand code will clamp the value for the register + */ + if (!intel_can_enable_sagv(i915, new_bw_state)) { + new_bw_state->qgv_point_peakbw = U16_MAX; + drm_dbg_kms(&i915->drm, "No SAGV, use UINT_MAX as peak bw."); + return 0; + } + + /* + * Find the best QGV point by comparing the data_rate with max data rate + * offered per plane group + */ + for (i = 0; i < num_qgv_points; i++) { + unsigned int bw_index = + tgl_max_bw_index(i915, num_active_planes, i); + unsigned int max_data_rate; + + if (bw_index >= ARRAY_SIZE(i915->display.bw.max)) + continue; + + max_data_rate = i915->display.bw.max[bw_index].deratedbw[i]; + + if (max_data_rate < data_rate) + continue; + + if (max_data_rate - data_rate < best_rate) { + best_rate = max_data_rate - data_rate; + qgv_peak_bw = i915->display.bw.max[bw_index].peakbw[i]; + } + + drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n", + i, max_data_rate, data_rate, qgv_peak_bw); + } + + drm_dbg_kms(&i915->drm, "Matching peaks QGV bw: %d for required data rate: %d\n", + qgv_peak_bw, data_rate); + + /* + * The display configuration cannot be supported if no QGV point + * satisfying the required data rate is found + */ + if (qgv_peak_bw == 0) { + drm_dbg_kms(&i915->drm, "No QGV points for bw %d for display configuration(%d active planes).\n", + data_rate, num_active_planes); + return -EINVAL; + } + + /* MTL PM DEMAND expects QGV BW parameter in multiples of 100 mbps */ + new_bw_state->qgv_point_peakbw = DIV_ROUND_CLOSEST(qgv_peak_bw, 100); + + return 0; +} + +static int icl_find_qgv_points(struct drm_i915_private *i915, + unsigned int data_rate, + unsigned int num_active_planes, + const struct intel_bw_state *old_bw_state, + struct intel_bw_state *new_bw_state) +{ + unsigned int max_bw_point = 0; + unsigned int max_bw = 0; + unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; + unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + u16 psf_points = 0; + u16 qgv_points = 0; + int i; + int ret; + + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + + for (i = 0; i < num_qgv_points; i++) { + unsigned int idx; + unsigned int max_data_rate; + + if (DISPLAY_VER(i915) > 11) + idx = tgl_max_bw_index(i915, num_active_planes, i); + else + idx = icl_max_bw_index(i915, num_active_planes, i); + + if (idx >= ARRAY_SIZE(i915->display.bw.max)) + continue; + + max_data_rate = i915->display.bw.max[idx].deratedbw[i]; + + /* + * We need to know which qgv point gives us + * maximum bandwidth in order to disable SAGV + * if we find that we exceed SAGV block time + * with watermarks. By that moment we already + * have those, as it is calculated earlier in + * intel_atomic_check, + */ + if (max_data_rate > max_bw) { + max_bw_point = i; + max_bw = max_data_rate; + } + if (max_data_rate >= data_rate) + qgv_points |= BIT(i); + + drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n", + i, max_data_rate, data_rate); + } + + for (i = 0; i < num_psf_gv_points; i++) { + unsigned int max_data_rate = adl_psf_bw(i915, i); + + if (max_data_rate >= data_rate) + psf_points |= BIT(i); + + drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d" + " required %d\n", + i, max_data_rate, data_rate); + } + + /* + * BSpec states that we always should have at least one allowed point + * left, so if we couldn't - simply reject the configuration for obvious + * reasons. + */ + if (qgv_points == 0) { + drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory" + " bandwidth %d for display configuration(%d active planes).\n", + data_rate, num_active_planes); + return -EINVAL; + } + + if (num_psf_gv_points > 0 && psf_points == 0) { + drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory" + " bandwidth %d for display configuration(%d active planes).\n", + data_rate, num_active_planes); + return -EINVAL; + } + + /* + * Leave only single point with highest bandwidth, if + * we can't enable SAGV due to the increased memory latency it may + * cause. + */ + if (!intel_can_enable_sagv(i915, new_bw_state)) { + qgv_points = BIT(max_bw_point); + drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point %d\n", + max_bw_point); + } + + /* + * We store the ones which need to be masked as that is what PCode + * actually accepts as a parameter. + */ + new_bw_state->qgv_points_mask = + ~(ICL_PCODE_REQ_QGV_PT(qgv_points) | + ADLS_PCODE_REQ_PSF_PT(psf_points)) & + icl_qgv_points_mask(i915); + + /* + * If the actual mask had changed we need to make sure that + * the commits are serialized(in case this is a nomodeset, nonblocking) + */ + if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } + + return 0; +} + +static int intel_bw_check_qgv_points(struct drm_i915_private *i915, + const struct intel_bw_state *old_bw_state, + struct intel_bw_state *new_bw_state) +{ + unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state); + unsigned int num_active_planes = + intel_bw_num_active_planes(i915, new_bw_state); + + data_rate = DIV_ROUND_UP(data_rate, 1000); + + if (DISPLAY_VER(i915) >= 14) + return mtl_find_qgv_points(i915, data_rate, num_active_planes, + new_bw_state); + else + return icl_find_qgv_points(i915, data_rate, num_active_planes, + old_bw_state, new_bw_state); +} + static bool intel_bw_state_changed(struct drm_i915_private *i915, const struct intel_bw_state *old_bw_state, const struct intel_bw_state *new_bw_state) @@ -1045,20 +1253,14 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan int intel_bw_atomic_check(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_bw_state *old_bw_state; - struct intel_bw_state *new_bw_state; - unsigned int data_rate; - unsigned int num_active_planes; - int i, ret; - u16 qgv_points = 0, psf_points = 0; - unsigned int max_bw_point = 0, max_bw = 0; - unsigned int num_qgv_points = dev_priv->display.bw.max[0].num_qgv_points; - unsigned int num_psf_gv_points = dev_priv->display.bw.max[0].num_psf_gv_points; bool changed = false; + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_bw_state *new_bw_state; + const struct intel_bw_state *old_bw_state; + int ret; /* FIXME earlier gens need some checks too */ - if (DISPLAY_VER(dev_priv) < 11) + if (DISPLAY_VER(i915) < 11) return 0; ret = intel_bw_check_data_rate(state, &changed); @@ -1069,8 +1271,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) new_bw_state = intel_atomic_get_new_bw_state(state); if (new_bw_state && - intel_can_enable_sagv(dev_priv, old_bw_state) != - intel_can_enable_sagv(dev_priv, new_bw_state)) + intel_can_enable_sagv(i915, old_bw_state) != + intel_can_enable_sagv(i915, new_bw_state)) changed = true; /* @@ -1080,101 +1282,10 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) if (!changed) return 0; - ret = intel_atomic_lock_global_state(&new_bw_state->base); + ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state); if (ret) return ret; - data_rate = intel_bw_data_rate(dev_priv, new_bw_state); - data_rate = DIV_ROUND_UP(data_rate, 1000); - - num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state); - - for (i = 0; i < num_qgv_points; i++) { - unsigned int max_data_rate; - - if (DISPLAY_VER(dev_priv) > 11) - max_data_rate = tgl_max_bw(dev_priv, num_active_planes, i); - else - max_data_rate = icl_max_bw(dev_priv, num_active_planes, i); - /* - * We need to know which qgv point gives us - * maximum bandwidth in order to disable SAGV - * if we find that we exceed SAGV block time - * with watermarks. By that moment we already - * have those, as it is calculated earlier in - * intel_atomic_check, - */ - if (max_data_rate > max_bw) { - max_bw_point = i; - max_bw = max_data_rate; - } - if (max_data_rate >= data_rate) - qgv_points |= BIT(i); - - drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n", - i, max_data_rate, data_rate); - } - - for (i = 0; i < num_psf_gv_points; i++) { - unsigned int max_data_rate = adl_psf_bw(dev_priv, i); - - if (max_data_rate >= data_rate) - psf_points |= BIT(i); - - drm_dbg_kms(&dev_priv->drm, "PSF GV point %d: max bw %d" - " required %d\n", - i, max_data_rate, data_rate); - } - - /* - * BSpec states that we always should have at least one allowed point - * left, so if we couldn't - simply reject the configuration for obvious - * reasons. - */ - if (qgv_points == 0) { - drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory" - " bandwidth %d for display configuration(%d active planes).\n", - data_rate, num_active_planes); - return -EINVAL; - } - - if (num_psf_gv_points > 0 && psf_points == 0) { - drm_dbg_kms(&dev_priv->drm, "No PSF GV points provide sufficient memory" - " bandwidth %d for display configuration(%d active planes).\n", - data_rate, num_active_planes); - return -EINVAL; - } - - /* - * Leave only single point with highest bandwidth, if - * we can't enable SAGV due to the increased memory latency it may - * cause. - */ - if (!intel_can_enable_sagv(dev_priv, new_bw_state)) { - qgv_points = BIT(max_bw_point); - drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n", - max_bw_point); - } - - /* - * We store the ones which need to be masked as that is what PCode - * actually accepts as a parameter. - */ - new_bw_state->qgv_points_mask = - ~(ICL_PCODE_REQ_QGV_PT(qgv_points) | - ADLS_PCODE_REQ_PSF_PT(psf_points)) & - icl_qgv_points_mask(dev_priv); - - /* - * If the actual mask had changed we need to make sure that - * the commits are serialized(in case this is a nomodeset, nonblocking) - */ - if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index f20292143745..59cb4fc5db76 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -35,6 +35,12 @@ struct intel_bw_state { u8 active_pipes; /* + * From MTL onwards, to lock a QGV point, punit expects the peak BW of + * the selected QGV point as the parameter in multiples of 100MB/s + */ + u16 qgv_point_peakbw; + + /* * Current QGV points mask, which restricts * some particular SAGV states, not to confuse * with pipe_sagv_mask. diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 1a5268e3d0a3..4207863b7b2a 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2959,7 +2959,7 @@ int intel_cdclk_atomic_check(struct intel_atomic_state *state, { const struct intel_cdclk_state *old_cdclk_state; const struct intel_cdclk_state *new_cdclk_state; - struct intel_plane_state *plane_state; + struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; int ret; int i; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 70d44edd8c6e..090f242e610c 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2225,12 +2225,10 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp; if (!crtc_state->fec_enable) return; - intel_dp = enc_to_intel_dp(encoder); intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 0, DP_TP_CTL_FEC_ENABLE); } @@ -2239,12 +2237,10 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp; if (!crtc_state->fec_enable) return; - intel_dp = enc_to_intel_dp(encoder); intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), DP_TP_CTL_FEC_ENABLE, 0); intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index f51a55f4e9d0..d8533603ad05 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -99,6 +99,7 @@ #include "intel_pcode.h" #include "intel_pipe_crc.h" #include "intel_plane_initial.h" +#include "intel_pmdemand.h" #include "intel_pps.h" #include "intel_psr.h" #include "intel_sdvo.h" @@ -971,7 +972,7 @@ static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); u8 update_planes = crtc_state->update_planes; - const struct intel_plane_state *plane_state; + const struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; int i; @@ -988,7 +989,7 @@ static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); u8 update_planes = crtc_state->update_planes; - const struct intel_plane_state *plane_state; + const struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; int i; @@ -5617,7 +5618,7 @@ static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_crtc *other) { - const struct intel_plane_state *plane_state; + const struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; u8 plane_ids = 0; int i; @@ -5660,7 +5661,7 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *old_crtc_state, *new_crtc_state; - struct intel_plane_state *plane_state; + struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; struct intel_crtc *crtc; int i, ret; @@ -5715,7 +5716,7 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state) static int intel_atomic_check_crtcs(struct intel_atomic_state *state) { - struct intel_crtc_state *crtc_state; + struct intel_crtc_state __maybe_unused *crtc_state; struct intel_crtc *crtc; int i; @@ -6012,8 +6013,9 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in */ if (DISPLAY_VER(i915) < 12) { drm_dbg_kms(&i915->drm, - "[PLANE:%d:%s] Modifier does not support async flips\n", - plane->base.base.id, plane->base.name); + "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n", + plane->base.base.id, plane->base.name, + new_plane_state->hw.fb->modifier, DISPLAY_VER(i915)); return -EINVAL; } break; @@ -6025,8 +6027,9 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in break; default: drm_dbg_kms(&i915->drm, - "[PLANE:%d:%s] Modifier does not support async flips\n", - plane->base.base.id, plane->base.name); + "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n", + plane->base.base.id, plane->base.name, + new_plane_state->hw.fb->modifier); return -EINVAL; } @@ -6352,6 +6355,10 @@ int intel_atomic_check(struct drm_device *dev, return ret; } + ret = intel_pmdemand_atomic_check(state); + if (ret) + goto fail; + ret = intel_atomic_check_crtcs(state); if (ret) goto fail; @@ -6997,6 +7004,14 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) crtc->config = new_crtc_state; + /* + * In XE_LPD+ Pmdemand combines many parameters such as voltage index, + * plls, cdclk frequency, QGV point selection parameter etc. Voltage + * index, cdclk/ddiclk frequencies are supposed to be configured before + * the cdclk config is set. + */ + intel_pmdemand_pre_plane_update(state); + if (state->modeset) { drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); @@ -7116,6 +7131,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_verify_planes(state); intel_sagv_post_plane_update(state); + intel_pmdemand_post_plane_update(state); drm_atomic_helper_commit_hw_done(&state->base); @@ -7164,11 +7180,12 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence, break; case FENCE_FREE: { + struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_atomic_helper *helper = - &to_i915(state->base.dev)->display.atomic_helper; + &i915->display.atomic_helper; if (llist_add(&state->freed, &helper->free_list)) - schedule_work(&helper->free_work); + queue_work(i915->unordered_wq, &helper->free_work); break; } } diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 2209811eb29e..8d2243c71dd8 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -314,6 +314,8 @@ struct intel_display { unsigned int deratedbw[I915_NUM_QGV_POINTS]; /* for each PSF GV point */ unsigned int psf_bw[I915_NUM_PSF_GV_POINTS]; + /* Peak BW for each QGV point */ + unsigned int peakbw[I915_NUM_QGV_POINTS]; u8 num_qgv_points; u8 num_psf_gv_points; u8 num_planes; @@ -344,6 +346,15 @@ struct intel_display { } dbuf; struct { + wait_queue_head_t waitqueue; + + /* mutex to protect pmdemand programming sequence */ + struct mutex lock; + + struct intel_global_obj obj; + } pmdemand; + + struct { /* * dkl.phy_lock protects against concurrent access of the * Dekel TypeC PHYs. diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 2a4df62692a6..165e2c7e3126 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -6,6 +6,7 @@ #include <linux/string_helpers.h> #include <drm/drm_debugfs.h> +#include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include "hsw_ips.h" @@ -228,19 +229,18 @@ out: seq_puts(m, "\n"); } -static void intel_dp_info(struct seq_file *m, - struct intel_connector *intel_connector) +static void intel_dp_info(struct seq_file *m, struct intel_connector *connector) { - struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); + struct intel_encoder *intel_encoder = intel_attached_encoder(connector); struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); - const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr; + const struct edid *edid = drm_edid_raw(connector->detect_edid); seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); seq_printf(m, "\taudio support: %s\n", - str_yes_no(intel_connector->base.display_info.has_audio)); + str_yes_no(connector->base.display_info.has_audio)); drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, - edid ? edid->data : NULL, &intel_dp->aux); + edid, &intel_dp->aux); } static void intel_dp_mst_info(struct seq_file *m, diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index 464df1764a86..3fd30e7f0062 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -9,6 +9,8 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "intel_de.h" +#include "intel_display.h" #include "intel_display_device.h" #include "intel_display_power.h" #include "intel_display_reg_defs.h" @@ -222,7 +224,6 @@ static const struct intel_display_device_info i865g_display = { .has_overlay = 1, \ I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ - I9XX_COLORS, \ \ .__runtime_defaults.ip.ver = 3, \ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ @@ -231,12 +232,14 @@ static const struct intel_display_device_info i865g_display = { static const struct intel_display_device_info i915g_display = { GEN3_DISPLAY, + I845_COLORS, .cursor_needs_physical = 1, .overlay_needs_physical = 1, }; static const struct intel_display_device_info i915gm_display = { GEN3_DISPLAY, + I9XX_COLORS, .cursor_needs_physical = 1, .overlay_needs_physical = 1, .supports_tv = 1, @@ -246,6 +249,7 @@ static const struct intel_display_device_info i915gm_display = { static const struct intel_display_device_info i945g_display = { GEN3_DISPLAY, + I845_COLORS, .has_hotplug = 1, .cursor_needs_physical = 1, .overlay_needs_physical = 1, @@ -253,6 +257,7 @@ static const struct intel_display_device_info i945g_display = { static const struct intel_display_device_info i945gm_display = { GEN3_DISPLAY, + I9XX_COLORS, .has_hotplug = 1, .cursor_needs_physical = 1, .overlay_needs_physical = 1, @@ -263,6 +268,13 @@ static const struct intel_display_device_info i945gm_display = { static const struct intel_display_device_info g33_display = { GEN3_DISPLAY, + I845_COLORS, + .has_hotplug = 1, +}; + +static const struct intel_display_device_info pnv_display = { + GEN3_DISPLAY, + I9XX_COLORS, .has_hotplug = 1, }; @@ -677,8 +689,8 @@ static const struct { INTEL_I965GM_IDS(&i965gm_display), INTEL_GM45_IDS(&gm45_display), INTEL_G45_IDS(&g45_display), - INTEL_PINEVIEW_G_IDS(&g33_display), - INTEL_PINEVIEW_M_IDS(&g33_display), + INTEL_PINEVIEW_G_IDS(&pnv_display), + INTEL_PINEVIEW_M_IDS(&pnv_display), INTEL_IRONLAKE_D_IDS(&ilk_d_display), INTEL_IRONLAKE_M_IDS(&ilk_m_display), INTEL_SNB_D_IDS(&snb_display), @@ -778,3 +790,128 @@ intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid, return &no_display; } + +void intel_display_device_info_runtime_init(struct drm_i915_private *i915) +{ + struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(i915); + enum pipe pipe; + + /* Wa_14011765242: adl-s A0,A1 */ + if (IS_ADLS_DISPLAY_STEP(i915, STEP_A0, STEP_A2)) + for_each_pipe(i915, pipe) + display_runtime->num_scalers[pipe] = 0; + else if (DISPLAY_VER(i915) >= 11) { + for_each_pipe(i915, pipe) + display_runtime->num_scalers[pipe] = 2; + } else if (DISPLAY_VER(i915) >= 9) { + display_runtime->num_scalers[PIPE_A] = 2; + display_runtime->num_scalers[PIPE_B] = 2; + display_runtime->num_scalers[PIPE_C] = 1; + } + + if (DISPLAY_VER(i915) >= 13 || HAS_D12_PLANE_MINIMIZATION(i915)) + for_each_pipe(i915, pipe) + display_runtime->num_sprites[pipe] = 4; + else if (DISPLAY_VER(i915) >= 11) + for_each_pipe(i915, pipe) + display_runtime->num_sprites[pipe] = 6; + else if (DISPLAY_VER(i915) == 10) + for_each_pipe(i915, pipe) + display_runtime->num_sprites[pipe] = 3; + else if (IS_BROXTON(i915)) { + /* + * Skylake and Broxton currently don't expose the topmost plane as its + * use is exclusive with the legacy cursor and we only want to expose + * one of those, not both. Until we can safely expose the topmost plane + * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, + * we don't expose the topmost plane at all to prevent ABI breakage + * down the line. + */ + + display_runtime->num_sprites[PIPE_A] = 2; + display_runtime->num_sprites[PIPE_B] = 2; + display_runtime->num_sprites[PIPE_C] = 1; + } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + for_each_pipe(i915, pipe) + display_runtime->num_sprites[pipe] = 2; + } else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) { + for_each_pipe(i915, pipe) + display_runtime->num_sprites[pipe] = 1; + } + + if ((IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) && + !(intel_de_read(i915, GU_CNTL_PROTECTED) & DEPRESENT)) { + drm_info(&i915->drm, "Display not present, disabling\n"); + goto display_fused_off; + } + + if (IS_GRAPHICS_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) { + u32 fuse_strap = intel_de_read(i915, FUSE_STRAP); + u32 sfuse_strap = intel_de_read(i915, SFUSE_STRAP); + + /* + * SFUSE_STRAP is supposed to have a bit signalling the display + * is fused off. Unfortunately it seems that, at least in + * certain cases, fused off display means that PCH display + * reads don't land anywhere. In that case, we read 0s. + * + * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK + * should be set when taking over after the firmware. + */ + if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || + sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || + (HAS_PCH_CPT(i915) && + !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { + drm_info(&i915->drm, + "Display fused off, disabling\n"); + goto display_fused_off; + } else if (fuse_strap & IVB_PIPE_C_DISABLE) { + drm_info(&i915->drm, "PipeC fused off\n"); + display_runtime->pipe_mask &= ~BIT(PIPE_C); + display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + } + } else if (DISPLAY_VER(i915) >= 9) { + u32 dfsm = intel_de_read(i915, SKL_DFSM); + + if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { + display_runtime->pipe_mask &= ~BIT(PIPE_A); + display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A); + display_runtime->fbc_mask &= ~BIT(INTEL_FBC_A); + } + if (dfsm & SKL_DFSM_PIPE_B_DISABLE) { + display_runtime->pipe_mask &= ~BIT(PIPE_B); + display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B); + } + if (dfsm & SKL_DFSM_PIPE_C_DISABLE) { + display_runtime->pipe_mask &= ~BIT(PIPE_C); + display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + } + + if (DISPLAY_VER(i915) >= 12 && + (dfsm & TGL_DFSM_PIPE_D_DISABLE)) { + display_runtime->pipe_mask &= ~BIT(PIPE_D); + display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D); + } + + if (!display_runtime->pipe_mask) + goto display_fused_off; + + if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) + display_runtime->has_hdcp = 0; + + if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) + display_runtime->fbc_mask = 0; + + if (DISPLAY_VER(i915) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) + display_runtime->has_dmc = 0; + + if (IS_DISPLAY_VER(i915, 10, 12) && + (dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE)) + display_runtime->has_dsc = 0; + } + + return; + +display_fused_off: + memset(display_runtime, 0, sizeof(*display_runtime)); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 2aa82cbdf1c5..706ff2aa1f55 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -8,7 +8,7 @@ #include <linux/types.h> -#include "display/intel_display_limits.h" +#include "intel_display_limits.h" struct drm_i915_private; @@ -124,5 +124,6 @@ struct intel_display_device_info { const struct intel_display_device_info * intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid, u16 *ver, u16 *rel, u16 *step); +void intel_display_device_info_runtime_init(struct drm_i915_private *i915); #endif diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 60ce10fc7205..b909814ae02b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -47,6 +47,7 @@ #include "intel_opregion.h" #include "intel_overlay.h" #include "intel_plane_initial.h" +#include "intel_pmdemand.h" #include "intel_pps.h" #include "intel_quirks.h" #include "intel_vga.h" @@ -211,6 +212,8 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) if (ret < 0) goto cleanup_vga; + intel_pmdemand_init_early(i915); + intel_power_domains_init_hw(i915, false); if (!HAS_DISPLAY(i915)) @@ -240,6 +243,10 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) if (ret) goto cleanup_vga_client_pw_domain_dmc; + ret = intel_pmdemand_init(i915); + if (ret) + goto cleanup_vga_client_pw_domain_dmc; + init_llist_head(&i915->display.atomic_helper.free_list); INIT_WORK(&i915->display.atomic_helper.free_work, intel_atomic_helper_free_state_worker); @@ -435,7 +442,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) intel_unregister_dsm_handler(); /* flush any delayed tasks or pending work */ - flush_scheduled_work(); + flush_workqueue(i915->unordered_wq); intel_hdcp_component_fini(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index 3b2a287d2041..ae2578741dfe 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -18,6 +18,7 @@ #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hotplug_irq.h" +#include "intel_pmdemand.h" #include "intel_psr.h" #include "intel_psr_regs.h" @@ -827,12 +828,27 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; } +static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv) +{ + wake_up_all(&dev_priv->display.pmdemand.waitqueue); +} + static void gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) { bool found = false; - if (iir & GEN8_DE_MISC_GSE) { + if (DISPLAY_VER(dev_priv) >= 14) { + if (iir & (XELPDP_PMDEMAND_RSP | + XELPDP_PMDEMAND_RSPTOUT_ERR)) { + if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR) + drm_dbg(&dev_priv->drm, + "Error waiting for Punit PM Demand Response\n"); + + intel_pmdemand_irq_handler(dev_priv); + found = true; + } + } else if (iir & GEN8_DE_MISC_GSE) { intel_opregion_asle_intr(dev_priv); found = true; } @@ -873,7 +889,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, enum pipe pipe = INVALID_PIPE; enum transcoder dsi_trans; enum port port; - u32 val, tmp; + u32 val; /* * Incase of dual link, TE comes from DSI_1 @@ -920,7 +936,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, /* clear TE in dsi IIR */ port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; - tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); + intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); } static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) @@ -1576,7 +1592,10 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) de_port_masked |= BXT_DE_PORT_GMBUS; - if (DISPLAY_VER(dev_priv) >= 11) { + if (DISPLAY_VER(dev_priv) >= 14) { + de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | + XELPDP_PMDEMAND_RSP; + } else if (DISPLAY_VER(dev_priv) >= 11) { enum port port; if (intel_bios_is_dsi_present(dev_priv, &port)) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 2f4f00ae2f57..db5437043904 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -20,6 +20,7 @@ #include "intel_mchbar_regs.h" #include "intel_pch_refclk.h" #include "intel_pcode.h" +#include "intel_pmdemand.h" #include "intel_pps_regs.h" #include "intel_snps_phy.h" #include "skl_watermark.h" @@ -1082,20 +1083,29 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) { + u8 slices_mask; + dev_priv->display.dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); + slices_mask = BIT(DBUF_S1) | dev_priv->display.dbuf.enabled_slices; + + if (DISPLAY_VER(dev_priv) >= 14) + intel_pmdemand_program_dbuf(dev_priv, slices_mask); + /* * Just power up at least 1 slice, we will * figure out later which slices we have and what we need. */ - gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | - dev_priv->display.dbuf.enabled_slices); + gen9_dbuf_slices_update(dev_priv, slices_mask); } static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) { gen9_dbuf_slices_update(dev_priv, 0); + + if (DISPLAY_VER(dev_priv) >= 14) + intel_pmdemand_program_dbuf(dev_priv, 0); } static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 8a88de67ff0a..5f479f3828bb 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -1057,7 +1057,7 @@ void intel_dmc_init(struct drm_i915_private *i915) i915->display.dmc.dmc = dmc; drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path); - schedule_work(&dmc->work); + queue_work(i915->unordered_wq, &dmc->work); return; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index f4192fda1a76..09dc6c88ad28 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -5251,7 +5251,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector) spin_lock_irq(&i915->irq_lock); i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin); spin_unlock_irq(&i915->irq_lock); - queue_delayed_work(system_wq, &i915->display.hotplug.hotplug_work, 0); + queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0); } static const struct drm_connector_funcs intel_dp_connector_funcs = { diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 0952a707358c..a263773f4d68 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -1064,6 +1064,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) { lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n"); @@ -1081,7 +1082,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, } /* Schedule a Hotplug Uevent to userspace to start modeset */ - schedule_work(&intel_connector->modeset_retry_work); + queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work); } /* Perform the link training on all LTTPRs and the DPRX on a link. */ @@ -1279,7 +1280,7 @@ intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp, if (drm_dp_128b132b_eq_interlane_align_done(link_status) && drm_dp_128b132b_cds_interlane_align_done(link_status) && drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) { - lt_err(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n"); + lt_dbg(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n"); break; } diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 824be7f03724..999badfe2906 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -1793,13 +1793,11 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state) enum pipe pipe = crtc->pipe; enum dpio_channel port = vlv_pipe_to_channel(pipe); u32 loopfilter, tribuf_calcntr; - u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; + u32 bestm2, bestp1, bestp2, bestm2_frac; u32 dpio_val; int vco; - bestn = crtc_state->dpll.n; bestm2_frac = crtc_state->dpll.m2 & 0x3fffff; - bestm1 = crtc_state->dpll.m1; bestm2 = crtc_state->dpll.m2 >> 22; bestp1 = crtc_state->dpll.p1; bestp2 = crtc_state->dpll.p2; diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index 760e63cdc0c8..0d35b6be5b6a 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -111,7 +111,9 @@ static void intel_drrs_set_state(struct intel_crtc *crtc, static void intel_drrs_schedule_work(struct intel_crtc *crtc) { - mod_delayed_work(system_wq, &crtc->drrs.work, msecs_to_jiffies(1000)); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + mod_delayed_work(i915->unordered_wq, &crtc->drrs.work, msecs_to_jiffies(1000)); } static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 0d27a98dcbbe..446bbf7986b6 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -1601,7 +1601,7 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer * for (i = 0; i < num_planes; i++) { struct fb_plane_view_dims view_dims; unsigned int width, height; - unsigned int cpp, size; + unsigned int size; u32 offset; int x, y; int ret; @@ -1618,7 +1618,6 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer * return -EINVAL; } - cpp = fb->base.format->cpp[i]; intel_fb_plane_dims(fb, i, &width, &height); ret = convert_plane_offset_to_xy(fb, i, width, &x, &y); diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 1966f9396201..7f8b2d7713c7 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1254,7 +1254,7 @@ static bool __intel_fbc_pre_update(struct intel_atomic_state *state, bool intel_fbc_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { - const struct intel_plane_state *plane_state; + const struct intel_plane_state __maybe_unused *plane_state; bool need_vblank_wait = false; struct intel_plane *plane; int i; @@ -1309,7 +1309,7 @@ static void __intel_fbc_post_update(struct intel_fbc *fbc) void intel_fbc_post_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { - const struct intel_plane_state *plane_state; + const struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; int i; @@ -1408,7 +1408,7 @@ void intel_fbc_flush(struct drm_i915_private *i915, int intel_fbc_atomic_check(struct intel_atomic_state *state) { - struct intel_plane_state *plane_state; + struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; int i; @@ -1600,7 +1600,7 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) if (READ_ONCE(fbc->underrun_detected)) return; - schedule_work(&fbc->underrun_work); + queue_work(fbc->i915->unordered_wq, &fbc->underrun_work); } /** diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 3b5690acd720..1cc0ddc6a310 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -696,7 +696,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous /* Don't block our own workqueue as this can * be run in parallel with other i915.ko tasks. */ - schedule_work(&dev_priv->display.fbdev.suspend_work); + queue_work(dev_priv->unordered_wq, + &dev_priv->display.fbdev.suspend_work); return; } } diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c index 02b593b1e2ea..e8e8be54143b 100644 --- a/drivers/gpu/drm/i915/display/intel_global_state.c +++ b/drivers/gpu/drm/i915/display/intel_global_state.c @@ -255,3 +255,15 @@ int intel_atomic_serialize_global_state(struct intel_global_state *obj_state) return 0; } + +bool +intel_atomic_global_state_is_serialized(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) + if (!intel_atomic_get_new_crtc_state(state, crtc)) + return false; + return true; +} diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h index f01ee0bb3e5a..5477de8f0b30 100644 --- a/drivers/gpu/drm/i915/display/intel_global_state.h +++ b/drivers/gpu/drm/i915/display/intel_global_state.h @@ -87,4 +87,6 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state); int intel_atomic_lock_global_state(struct intel_global_state *obj_state); int intel_atomic_serialize_global_state(struct intel_global_state *obj_state); +bool intel_atomic_global_state_is_serialized(struct intel_atomic_state *state); + #endif diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 17542c28dfd5..5ed450111f77 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -983,6 +983,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector, struct drm_device *dev = connector->base.dev; struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; + struct drm_i915_private *i915 = to_i915(connector->base.dev); drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex)); @@ -1001,7 +1002,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector, hdcp->value = value; if (update_property) { drm_connector_get(&connector->base); - schedule_work(&hdcp->prop_work); + queue_work(i915->unordered_wq, &hdcp->prop_work); } } @@ -2090,16 +2091,17 @@ static void intel_hdcp_check_work(struct work_struct *work) struct intel_hdcp, check_work); struct intel_connector *connector = intel_hdcp_to_connector(hdcp); + struct drm_i915_private *i915 = to_i915(connector->base.dev); if (drm_connector_is_unregistered(&connector->base)) return; if (!intel_hdcp2_check_link(connector)) - schedule_delayed_work(&hdcp->check_work, - DRM_HDCP2_CHECK_PERIOD_MS); + queue_delayed_work(i915->unordered_wq, &hdcp->check_work, + DRM_HDCP2_CHECK_PERIOD_MS); else if (!intel_hdcp_check_link(connector)) - schedule_delayed_work(&hdcp->check_work, - DRM_HDCP_CHECK_PERIOD_MS); + queue_delayed_work(i915->unordered_wq, &hdcp->check_work, + DRM_HDCP_CHECK_PERIOD_MS); } static int i915_hdcp_component_bind(struct device *i915_kdev, @@ -2398,7 +2400,8 @@ int intel_hdcp_enable(struct intel_atomic_state *state, } if (!ret) { - schedule_delayed_work(&hdcp->check_work, check_link_interval); + queue_delayed_work(i915->unordered_wq, &hdcp->check_work, + check_link_interval); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_ENABLED, true); @@ -2447,6 +2450,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, to_intel_connector(conn_state->connector); struct intel_hdcp *hdcp = &connector->hdcp; bool content_protection_type_changed, desired_and_not_enabled = false; + struct drm_i915_private *i915 = to_i915(connector->base.dev); if (!connector->hdcp.shim) return; @@ -2473,7 +2477,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, mutex_lock(&hdcp->mutex); hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; drm_connector_get(&connector->base); - schedule_work(&hdcp->prop_work); + queue_work(i915->unordered_wq, &hdcp->prop_work); mutex_unlock(&hdcp->mutex); } @@ -2490,7 +2494,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, */ if (!desired_and_not_enabled && !content_protection_type_changed) { drm_connector_get(&connector->base); - schedule_work(&hdcp->prop_work); + queue_work(i915->unordered_wq, &hdcp->prop_work); } } @@ -2602,6 +2606,7 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, void intel_hdcp_handle_cp_irq(struct intel_connector *connector) { struct intel_hdcp *hdcp = &connector->hdcp; + struct drm_i915_private *i915 = to_i915(connector->base.dev); if (!hdcp->shim) return; @@ -2609,5 +2614,5 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector) atomic_inc(&connector->hdcp.cp_irq_count); wake_up_all(&connector->hdcp.cp_irq_queue); - schedule_delayed_work(&hdcp->check_work, 0); + queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0); } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 23a5e1a875f1..1160fa20433b 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -212,7 +212,8 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { drm_kms_helper_poll_enable(&dev_priv->drm); - mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work, + mod_delayed_work(dev_priv->unordered_wq, + &dev_priv->display.hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } } @@ -339,7 +340,8 @@ static void i915_digport_work_func(struct work_struct *work) spin_lock_irq(&dev_priv->irq_lock); dev_priv->display.hotplug.event_bits |= old_bits; spin_unlock_irq(&dev_priv->irq_lock); - queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0); + queue_delayed_work(dev_priv->unordered_wq, + &dev_priv->display.hotplug.hotplug_work, 0); } } @@ -446,7 +448,8 @@ static void i915_hotplug_work_func(struct work_struct *work) dev_priv->display.hotplug.retry_bits |= retry; spin_unlock_irq(&dev_priv->irq_lock); - mod_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, + mod_delayed_work(dev_priv->unordered_wq, + &dev_priv->display.hotplug.hotplug_work, msecs_to_jiffies(HPD_RETRY_DELAY)); } } @@ -577,7 +580,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (queue_dig) queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); if (queue_hp) - queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0); + queue_delayed_work(dev_priv->unordered_wq, + &dev_priv->display.hotplug.hotplug_work, 0); } /** @@ -687,7 +691,8 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) * As well, there's no issue if we race here since we always reschedule * this worker anyway */ - schedule_work(&dev_priv->display.hotplug.poll_init_work); + queue_work(dev_priv->unordered_wq, + &dev_priv->display.hotplug.poll_init_work); } /** @@ -715,7 +720,8 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) return; WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); - schedule_work(&dev_priv->display.hotplug.poll_init_work); + queue_work(dev_priv->unordered_wq, + &dev_priv->display.hotplug.poll_init_work); } void intel_hpd_init_early(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index 5ff99ca7f1de..b8f43efb0ab5 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -26,6 +26,7 @@ #include "intel_fifo_underrun.h" #include "intel_modeset_setup.h" #include "intel_pch_display.h" +#include "intel_pmdemand.h" #include "intel_tc.h" #include "intel_vblank.h" #include "intel_wm.h" @@ -115,6 +116,8 @@ static void set_encoder_for_connector(struct intel_connector *connector, static void reset_encoder_connector_state(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_pmdemand_state *pmdemand_state = + to_intel_pmdemand_state(i915->display.pmdemand.obj.state); struct intel_connector *connector; struct drm_connector_list_iter conn_iter; @@ -123,6 +126,10 @@ static void reset_encoder_connector_state(struct intel_encoder *encoder) if (connector->base.encoder != &encoder->base) continue; + /* Clear the corresponding bit in pmdemand active phys mask */ + intel_pmdemand_update_phys_mask(i915, encoder, + pmdemand_state, false); + set_encoder_for_connector(connector, NULL); connector->base.dpms = DRM_MODE_DPMS_OFF; @@ -151,6 +158,8 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc) to_intel_cdclk_state(i915->display.cdclk.obj.state); struct intel_dbuf_state *dbuf_state = to_intel_dbuf_state(i915->display.dbuf.obj.state); + struct intel_pmdemand_state *pmdemand_state = + to_intel_pmdemand_state(i915->display.pmdemand.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); enum pipe pipe = crtc->pipe; @@ -174,6 +183,8 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc) bw_state->data_rate[pipe] = 0; bw_state->num_active_planes[pipe] = 0; + + intel_pmdemand_update_port_clock(i915, pmdemand_state, pipe, 0); } /* @@ -552,6 +563,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc_state *crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; + struct intel_pmdemand_state *pmdemand_state = + to_intel_pmdemand_state(i915->display.pmdemand.obj.state); /* * We need to check both for a crtc link (meaning that the encoder is @@ -575,6 +588,10 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) encoder->base.base.id, encoder->base.name); + /* Clear the corresponding bit in pmdemand active phys mask */ + intel_pmdemand_update_phys_mask(i915, encoder, + pmdemand_state, false); + /* * Connector is active, but has no active pipe. This is fallout * from our resume register restoring. Disable the encoder @@ -661,6 +678,8 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) to_intel_cdclk_state(i915->display.cdclk.obj.state); struct intel_dbuf_state *dbuf_state = to_intel_dbuf_state(i915->display.dbuf.obj.state); + struct intel_pmdemand_state *pmdemand_state = + to_intel_pmdemand_state(i915->display.pmdemand.obj.state); enum pipe pipe; struct intel_crtc *crtc; struct intel_encoder *encoder; @@ -724,7 +743,15 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) intel_encoder_get_config(encoder, slave_crtc_state); } } + + intel_pmdemand_update_phys_mask(i915, encoder, + pmdemand_state, + true); } else { + intel_pmdemand_update_phys_mask(i915, encoder, + pmdemand_state, + false); + encoder->base.crtc = NULL; } @@ -841,8 +868,13 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) cdclk_state->min_voltage_level[crtc->pipe] = crtc_state->min_voltage_level; + intel_pmdemand_update_port_clock(i915, pmdemand_state, pipe, + crtc_state->port_clock); + intel_bw_crtc_update(bw_state, crtc_state); } + + intel_pmdemand_init_pmdemand_params(i915, pmdemand_state); } static void diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index b7973a05d022..84078fb82b2f 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -635,7 +635,8 @@ static void asle_work(struct work_struct *work) void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) { if (dev_priv->display.opregion.asle) - schedule_work(&dev_priv->display.opregion.asle_work); + queue_work(dev_priv->unordered_wq, + &dev_priv->display.opregion.asle_work); } #define ACPI_EV_DISPLAY_SWITCH (1<<0) diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c new file mode 100644 index 000000000000..f7608d363634 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c @@ -0,0 +1,620 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <linux/bitops.h> + +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_atomic.h" +#include "intel_bw.h" +#include "intel_cdclk.h" +#include "intel_de.h" +#include "intel_display_trace.h" +#include "intel_pmdemand.h" +#include "skl_watermark.h" + +static struct intel_global_state * +intel_pmdemand_duplicate_state(struct intel_global_obj *obj) +{ + struct intel_pmdemand_state *pmdemand_state; + + pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL); + if (!pmdemand_state) + return NULL; + + return &pmdemand_state->base; +} + +static void intel_pmdemand_destroy_state(struct intel_global_obj *obj, + struct intel_global_state *state) +{ + kfree(state); +} + +static const struct intel_global_state_funcs intel_pmdemand_funcs = { + .atomic_duplicate_state = intel_pmdemand_duplicate_state, + .atomic_destroy_state = intel_pmdemand_destroy_state, +}; + +static struct intel_pmdemand_state * +intel_atomic_get_pmdemand_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_global_state *pmdemand_state = + intel_atomic_get_global_obj_state(state, + &i915->display.pmdemand.obj); + + if (IS_ERR(pmdemand_state)) + return ERR_CAST(pmdemand_state); + + return to_intel_pmdemand_state(pmdemand_state); +} + +static struct intel_pmdemand_state * +intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_global_state *pmdemand_state = + intel_atomic_get_old_global_obj_state(state, + &i915->display.pmdemand.obj); + + if (!pmdemand_state) + return NULL; + + return to_intel_pmdemand_state(pmdemand_state); +} + +static struct intel_pmdemand_state * +intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_global_state *pmdemand_state = + intel_atomic_get_new_global_obj_state(state, + &i915->display.pmdemand.obj); + + if (!pmdemand_state) + return NULL; + + return to_intel_pmdemand_state(pmdemand_state); +} + +int intel_pmdemand_init(struct drm_i915_private *i915) +{ + struct intel_pmdemand_state *pmdemand_state; + + pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL); + if (!pmdemand_state) + return -ENOMEM; + + intel_atomic_global_obj_init(i915, &i915->display.pmdemand.obj, + &pmdemand_state->base, + &intel_pmdemand_funcs); + + if (IS_MTL_DISPLAY_STEP(i915, STEP_A0, STEP_C0)) + /* Wa_14016740474 */ + intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE); + + return 0; +} + +void intel_pmdemand_init_early(struct drm_i915_private *i915) +{ + mutex_init(&i915->display.pmdemand.lock); + init_waitqueue_head(&i915->display.pmdemand.waitqueue); +} + +void +intel_pmdemand_update_phys_mask(struct drm_i915_private *i915, + struct intel_encoder *encoder, + struct intel_pmdemand_state *pmdemand_state, + bool set_bit) +{ + enum phy phy; + + if (DISPLAY_VER(i915) < 14) + return; + + if (!encoder) + return; + + phy = intel_port_to_phy(i915, encoder->port); + if (intel_phy_is_tc(i915, phy)) + return; + + if (set_bit) + pmdemand_state->active_combo_phys_mask |= BIT(phy); + else + pmdemand_state->active_combo_phys_mask &= ~BIT(phy); +} + +void +intel_pmdemand_update_port_clock(struct drm_i915_private *i915, + struct intel_pmdemand_state *pmdemand_state, + enum pipe pipe, int port_clock) +{ + if (DISPLAY_VER(i915) < 14) + return; + + pmdemand_state->ddi_clocks[pipe] = port_clock; +} + +static void +intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915, + struct intel_atomic_state *state, + struct intel_pmdemand_state *pmdemand_state) +{ + int max_ddiclk = 0; + const struct intel_crtc_state *new_crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) + intel_pmdemand_update_port_clock(i915, pmdemand_state, + crtc->pipe, + new_crtc_state->port_clock); + + for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++) + max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk); + + pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000); +} + +static void +intel_pmdemand_update_connector_phys(struct drm_i915_private *i915, + struct intel_atomic_state *state, + struct drm_connector_state *conn_state, + bool set_bit, + struct intel_pmdemand_state *pmdemand_state) +{ + struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); + struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc); + struct intel_crtc_state *crtc_state; + + if (!crtc) + return; + + if (set_bit) + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + else + crtc_state = intel_atomic_get_old_crtc_state(state, crtc); + + if (!crtc_state->hw.active) + return; + + intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state, + set_bit); +} + +static void +intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915, + struct intel_atomic_state *state, + struct intel_pmdemand_state *pmdemand_state) +{ + struct drm_connector_state *old_conn_state; + struct drm_connector_state *new_conn_state; + struct drm_connector *connector; + int i; + + for_each_oldnew_connector_in_state(&state->base, connector, + old_conn_state, new_conn_state, i) { + if (!intel_connector_needs_modeset(state, connector)) + continue; + + /* First clear the active phys in the old connector state */ + intel_pmdemand_update_connector_phys(i915, state, + old_conn_state, false, + pmdemand_state); + + /* Then set the active phys in new connector state */ + intel_pmdemand_update_connector_phys(i915, state, + new_conn_state, true, + pmdemand_state); + } + + pmdemand_state->params.active_phys = + min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask), + 7); +} + +static bool +intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915, + struct intel_encoder *encoder) +{ + enum phy phy; + + if (!encoder) + return false; + + phy = intel_port_to_phy(i915, encoder->port); + + return intel_phy_is_tc(i915, phy); +} + +static bool +intel_pmdemand_connector_needs_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct drm_connector_state *old_conn_state; + struct drm_connector_state *new_conn_state; + struct drm_connector *connector; + int i; + + for_each_oldnew_connector_in_state(&state->base, connector, + old_conn_state, new_conn_state, i) { + struct intel_encoder *old_encoder = + to_intel_encoder(old_conn_state->best_encoder); + struct intel_encoder *new_encoder = + to_intel_encoder(new_conn_state->best_encoder); + + if (!intel_connector_needs_modeset(state, connector)) + continue; + + if (old_encoder == new_encoder || + (intel_pmdemand_encoder_has_tc_phy(i915, old_encoder) && + intel_pmdemand_encoder_has_tc_phy(i915, new_encoder))) + continue; + + return true; + } + + return false; +} + +static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) +{ + const struct intel_bw_state *new_bw_state, *old_bw_state; + const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state; + const struct intel_crtc_state *new_crtc_state, *old_crtc_state; + const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; + struct intel_crtc *crtc; + int i; + + new_bw_state = intel_atomic_get_new_bw_state(state); + old_bw_state = intel_atomic_get_old_bw_state(state); + if (new_bw_state && new_bw_state->qgv_point_peakbw != + old_bw_state->qgv_point_peakbw) + return true; + + new_dbuf_state = intel_atomic_get_new_dbuf_state(state); + old_dbuf_state = intel_atomic_get_old_dbuf_state(state); + if (new_dbuf_state && + (new_dbuf_state->active_pipes != + old_dbuf_state->active_pipes || + new_dbuf_state->enabled_slices != + old_dbuf_state->enabled_slices)) + return true; + + new_cdclk_state = intel_atomic_get_new_cdclk_state(state); + old_cdclk_state = intel_atomic_get_old_cdclk_state(state); + if (new_cdclk_state && + (new_cdclk_state->actual.cdclk != + old_cdclk_state->actual.cdclk || + new_cdclk_state->actual.voltage_level != + old_cdclk_state->actual.voltage_level)) + return true; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) + if (new_crtc_state->port_clock != old_crtc_state->port_clock) + return true; + + return intel_pmdemand_connector_needs_update(state); +} + +int intel_pmdemand_atomic_check(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *new_bw_state; + const struct intel_cdclk_state *new_cdclk_state; + const struct intel_dbuf_state *new_dbuf_state; + struct intel_pmdemand_state *new_pmdemand_state; + + if (DISPLAY_VER(i915) < 14) + return 0; + + if (!intel_pmdemand_needs_update(state)) + return 0; + + new_pmdemand_state = intel_atomic_get_pmdemand_state(state); + if (IS_ERR(new_pmdemand_state)) + return PTR_ERR(new_pmdemand_state); + + new_bw_state = intel_atomic_get_bw_state(state); + if (IS_ERR(new_bw_state)) + return PTR_ERR(new_bw_state); + + /* firmware will calculate the qclk_gv_index, requirement is set to 0 */ + new_pmdemand_state->params.qclk_gv_index = 0; + new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw; + + new_dbuf_state = intel_atomic_get_dbuf_state(state); + if (IS_ERR(new_dbuf_state)) + return PTR_ERR(new_dbuf_state); + + new_pmdemand_state->params.active_pipes = + min_t(u8, hweight8(new_dbuf_state->active_pipes), 3); + new_pmdemand_state->params.active_dbufs = + min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); + + new_cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(new_cdclk_state)) + return PTR_ERR(new_cdclk_state); + + new_pmdemand_state->params.voltage_index = + new_cdclk_state->actual.voltage_level; + new_pmdemand_state->params.cdclk_freq_mhz = + DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000); + + intel_pmdemand_update_max_ddiclk(i915, state, new_pmdemand_state); + + intel_pmdemand_update_active_non_tc_phys(i915, state, new_pmdemand_state); + + /* + * Active_PLLs starts with 1 because of CDCLK PLL. + * TODO: Missing to account genlock filter when it gets used. + */ + new_pmdemand_state->params.plls = + min_t(u16, new_pmdemand_state->params.active_phys + 1, 7); + + /* + * Setting scalers to max as it can not be calculated during flips and + * fastsets without taking global states locks. + */ + new_pmdemand_state->params.scalers = 7; + + if (state->base.allow_modeset) + return intel_atomic_serialize_global_state(&new_pmdemand_state->base); + else + return intel_atomic_lock_global_state(&new_pmdemand_state->base); +} + +static bool intel_pmdemand_check_prev_transaction(struct drm_i915_private *i915) +{ + return !(intel_de_wait_for_clear(i915, + XELPDP_INITIATE_PMDEMAND_REQUEST(1), + XELPDP_PMDEMAND_REQ_ENABLE, 10) || + intel_de_wait_for_clear(i915, + GEN12_DCPR_STATUS_1, + XELPDP_PMDEMAND_INFLIGHT_STATUS, 10)); +} + +void +intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915, + struct intel_pmdemand_state *pmdemand_state) +{ + u32 reg1, reg2; + + if (DISPLAY_VER(i915) < 14) + return; + + mutex_lock(&i915->display.pmdemand.lock); + if (drm_WARN_ON(&i915->drm, + !intel_pmdemand_check_prev_transaction(i915))) { + memset(&pmdemand_state->params, 0, + sizeof(pmdemand_state->params)); + goto unlock; + } + + reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0)); + + reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); + + /* Set 1*/ + pmdemand_state->params.qclk_gv_bw = + REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1); + pmdemand_state->params.voltage_index = + REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1); + pmdemand_state->params.qclk_gv_index = + REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1); + pmdemand_state->params.active_pipes = + REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); + pmdemand_state->params.active_dbufs = + REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); + pmdemand_state->params.active_phys = + REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1); + + /* Set 2*/ + pmdemand_state->params.cdclk_freq_mhz = + REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2); + pmdemand_state->params.ddiclk_max = + REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2); + pmdemand_state->params.scalers = + REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); + +unlock: + mutex_unlock(&i915->display.pmdemand.lock); +} + +static bool intel_pmdemand_req_complete(struct drm_i915_private *i915) +{ + return !(intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) & + XELPDP_PMDEMAND_REQ_ENABLE); +} + +static void intel_pmdemand_wait(struct drm_i915_private *i915) +{ + if (!wait_event_timeout(i915->display.pmdemand.waitqueue, + intel_pmdemand_req_complete(i915), + msecs_to_jiffies_timeout(10))) + drm_err(&i915->drm, + "timed out waiting for Punit PM Demand Response\n"); +} + +/* Required to be programmed during Display Init Sequences. */ +void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, + u8 dbuf_slices) +{ + u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3); + + mutex_lock(&i915->display.pmdemand.lock); + if (drm_WARN_ON(&i915->drm, + !intel_pmdemand_check_prev_transaction(i915))) + goto unlock; + + intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0), + XELPDP_PMDEMAND_DBUFS_MASK, + REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs)); + intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0, + XELPDP_PMDEMAND_REQ_ENABLE); + + intel_pmdemand_wait(i915); + +unlock: + mutex_unlock(&i915->display.pmdemand.lock); +} + +static void +intel_pmdemand_update_params(const struct intel_pmdemand_state *new, + const struct intel_pmdemand_state *old, + u32 *reg1, u32 *reg2, bool serialized) +{ + /* + * The pmdemand parameter updates happens in two steps. Pre plane and + * post plane updates. During the pre plane, as DE might still be + * handling with some old operations, to avoid unexpected performance + * issues, program the pmdemand parameters with higher of old and new + * values. And then after once settled, use the new parameter values + * as part of the post plane update. + * + * If the pmdemand params update happens without modeset allowed, this + * means we can't serialize the updates. So that implies possibility of + * some parallel atomic commits affecting the pmdemand parameters. In + * that case, we need to consider the current values from the register + * as well. So in pre-plane case, we need to check the max of old, new + * and current register value if not serialized. In post plane update + * we need to consider max of new and current register value if not + * serialized + */ + +#define update_reg(reg, field, mask) do { \ + u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \ + u32 old_val = old ? old->params.field : 0; \ + u32 new_val = new->params.field; \ +\ + *(reg) &= ~(mask); \ + *(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \ +} while (0) + + /* Set 1*/ + update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK); + update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK); + update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK); + update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); + update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); + update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK); + + /* Set 2*/ + update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK); + update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK); + update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); + update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK); + +#undef update_reg +} + +static void +intel_pmdemand_program_params(struct drm_i915_private *i915, + const struct intel_pmdemand_state *new, + const struct intel_pmdemand_state *old, + bool serialized) +{ + bool changed = false; + u32 reg1, mod_reg1; + u32 reg2, mod_reg2; + + mutex_lock(&i915->display.pmdemand.lock); + if (drm_WARN_ON(&i915->drm, + !intel_pmdemand_check_prev_transaction(i915))) + goto unlock; + + reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0)); + mod_reg1 = reg1; + + reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); + mod_reg2 = reg2; + + intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2, + serialized); + + if (reg1 != mod_reg1) { + intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0), + mod_reg1); + changed = true; + } + + if (reg2 != mod_reg2) { + intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), + mod_reg2); + changed = true; + } + + /* Initiate pm demand request only if register values are changed */ + if (!changed) + goto unlock; + + drm_dbg_kms(&i915->drm, + "initate pmdemand request values: (0x%x 0x%x)\n", + mod_reg1, mod_reg2); + + intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0, + XELPDP_PMDEMAND_REQ_ENABLE); + + intel_pmdemand_wait(i915); + +unlock: + mutex_unlock(&i915->display.pmdemand.lock); +} + +static bool +intel_pmdemand_state_changed(const struct intel_pmdemand_state *new, + const struct intel_pmdemand_state *old) +{ + return memcmp(&new->params, &old->params, sizeof(new->params)) != 0; +} + +void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_pmdemand_state *new_pmdemand_state = + intel_atomic_get_new_pmdemand_state(state); + const struct intel_pmdemand_state *old_pmdemand_state = + intel_atomic_get_old_pmdemand_state(state); + + if (DISPLAY_VER(i915) < 14) + return; + + if (!new_pmdemand_state || + !intel_pmdemand_state_changed(new_pmdemand_state, + old_pmdemand_state)) + return; + + WARN_ON(!new_pmdemand_state->base.changed); + + intel_pmdemand_program_params(i915, new_pmdemand_state, + old_pmdemand_state, + intel_atomic_global_state_is_serialized(state)); +} + +void intel_pmdemand_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_pmdemand_state *new_pmdemand_state = + intel_atomic_get_new_pmdemand_state(state); + const struct intel_pmdemand_state *old_pmdemand_state = + intel_atomic_get_old_pmdemand_state(state); + + if (DISPLAY_VER(i915) < 14) + return; + + if (!new_pmdemand_state || + !intel_pmdemand_state_changed(new_pmdemand_state, + old_pmdemand_state)) + return; + + WARN_ON(!new_pmdemand_state->base.changed); + + intel_pmdemand_program_params(i915, new_pmdemand_state, NULL, + intel_atomic_global_state_is_serialized(state)); +} diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h new file mode 100644 index 000000000000..2941a1a18b72 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_PMDEMAND_H__ +#define __INTEL_PMDEMAND_H__ + +#include "intel_display_limits.h" +#include "intel_global_state.h" + +struct drm_i915_private; +struct intel_atomic_state; +struct intel_crtc_state; +struct intel_encoder; +struct intel_plane_state; + +struct pmdemand_params { + u16 qclk_gv_bw; + u8 voltage_index; + u8 qclk_gv_index; + u8 active_pipes; + u8 active_dbufs; + /* Total number of non type C active phys from active_phys_mask */ + u8 active_phys; + u8 plls; + u16 cdclk_freq_mhz; + /* max from ddi_clocks[] */ + u16 ddiclk_max; + u8 scalers; +}; + +struct intel_pmdemand_state { + struct intel_global_state base; + + /* Maintain a persistent list of port clocks across all crtcs */ + int ddi_clocks[I915_MAX_PIPES]; + + /* Maintain a persistent list of non type C phys mask */ + u16 active_combo_phys_mask; + + /* Parameters to be configured in the pmdemand registers */ + struct pmdemand_params params; +}; + +#define to_intel_pmdemand_state(x) container_of((x), \ + struct intel_pmdemand_state, \ + base) + +void intel_pmdemand_init_early(struct drm_i915_private *i915); +int intel_pmdemand_init(struct drm_i915_private *i915); +void intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915, + struct intel_pmdemand_state *pmdemand_state); +void intel_pmdemand_update_port_clock(struct drm_i915_private *i915, + struct intel_pmdemand_state *pmdemand_state, + enum pipe pipe, int port_clock); +void intel_pmdemand_update_phys_mask(struct drm_i915_private *i915, + struct intel_encoder *encoder, + struct intel_pmdemand_state *pmdemand_state, + bool clear_bit); +void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, + u8 dbuf_slices); +void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state); +void intel_pmdemand_post_plane_update(struct intel_atomic_state *state); +int intel_pmdemand_atomic_check(struct intel_atomic_state *state); + +#endif /* __INTEL_PMDEMAND_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 5e7ba594e7e7..73f0f1714b37 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -867,6 +867,7 @@ static void edp_panel_vdd_work(struct work_struct *__work) static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); unsigned long delay; /* @@ -882,7 +883,8 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) * operations. */ delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5); - schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay); + queue_delayed_work(i915->unordered_wq, + &intel_dp->pps.panel_vdd_work, delay); } /* diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index ea0389c5f656..d58ed9b62e67 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -341,7 +341,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) */ intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp)); - schedule_work(&intel_dp->psr.work); + queue_work(dev_priv->unordered_wq, &intel_dp->psr.work); } } @@ -2440,6 +2440,8 @@ static void tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, enum fb_op_origin origin) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled || !intel_dp->psr.active) return; @@ -2453,7 +2455,7 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, return; tgl_psr2_enable_dc3co(intel_dp); - mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work, + mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work, intel_dp->psr.dc3co_exit_delay); } @@ -2493,7 +2495,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) psr_force_hw_tracking_exit(intel_dp); if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits) - schedule_work(&intel_dp->psr.work); + queue_work(dev_priv->unordered_wq, &intel_dp->psr.work); } } diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 36070d86550f..6b01a0b68b97 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -2529,6 +2529,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, plane_config->base = base; offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id)); + drm_WARN_ON(&dev_priv->drm, offset != 0); val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id)); fb->height = REG_FIELD_GET(PLANE_HEIGHT_MASK, val) + 1; diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index d1245c847f1c..063929a42a42 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -2900,7 +2900,7 @@ static int skl_compute_wm(struct intel_atomic_state *state) { struct intel_crtc *crtc; - struct intel_crtc_state *new_crtc_state; + struct intel_crtc_state __maybe_unused *new_crtc_state; int ret, i; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index cd90a30e04d8..ae2f3ab3e73d 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -136,7 +136,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, enum port port = intel_dsi_host->port; struct mipi_dsi_packet packet; ssize_t ret; - const u8 *header, *data; + const u8 *header; i915_reg_t data_reg, ctrl_reg; u32 data_mask, ctrl_mask; @@ -145,7 +145,6 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, return ret; header = packet.header; - data = packet.payload; if (msg->flags & MIPI_DSI_MSG_USE_LPM) { data_reg = MIPI_LP_GEN_DATA(port); @@ -1040,7 +1039,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, unsigned int lane_count = intel_dsi->lane_count; unsigned int bpp, fmt; enum port port; - u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp; + u16 hactive, hfp, hsync, hbp, vfp, vsync; u16 hfp_sw, hsync_sw, hbp_sw; u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw, crtc_hblank_start_sw, crtc_hblank_end_sw; @@ -1105,7 +1104,6 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, /* vertical values are in terms of lines */ vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port)); vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port)); - vbp = intel_de_read(dev_priv, MIPI_VBP_COUNT(port)); adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp; adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 5fb459ea4294..cfd7929587d8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -736,7 +736,6 @@ static int eb_reserve(struct i915_execbuffer *eb) struct eb_vma *ev; unsigned int pass; int err = 0; - bool unpinned; /* * We have one more buffers that we couldn't bind, which could be due to @@ -776,7 +775,7 @@ static int eb_reserve(struct i915_execbuffer *eb) pin_flags |= PIN_NONBLOCK; if (pass >= 1) - unpinned = eb_unbind(eb, pass >= 2); + eb_unbind(eb, pass >= 2); if (pass == 2) { err = mutex_lock_interruptible(&eb->context->vm->mutex); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index cad4a6017f4b..33d5d5178103 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -455,7 +455,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj, struct page *page; void *data, *vaddr; int err; - char c; + char __maybe_unused c; len = PAGE_SIZE - pg; if (len > remain) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index ee531a5c142c..21af0ec52223 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -296,9 +296,7 @@ static const struct intel_wakeref_ops wf_ops = { void intel_engine_init__pm(struct intel_engine_cs *engine) { - struct intel_runtime_pm *rpm = engine->uncore->rpm; - - intel_wakeref_init(&engine->wakeref, rpm, &wf_ops); + intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops); intel_engine_init_heartbeat(engine); intel_gsc_idle_msg_enable(engine); diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 750326434677..2ebd937f3b4c 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -2327,6 +2327,7 @@ static u32 active_ccid(struct intel_engine_cs *engine) static void execlists_capture(struct intel_engine_cs *engine) { + struct drm_i915_private *i915 = engine->i915; struct execlists_capture *cap; if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)) @@ -2375,7 +2376,7 @@ static void execlists_capture(struct intel_engine_cs *engine) goto err_rq; INIT_WORK(&cap->work, execlists_capture_work); - schedule_work(&cap->work); + queue_work(i915->unordered_wq, &cap->work); return; err_rq: @@ -3680,7 +3681,7 @@ static void virtual_context_destroy(struct kref *kref) * lock, we can delegate the free of the engine to an RCU worker. */ INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy); - queue_rcu_work(system_wq, &ve->rcu); + queue_rcu_work(ve->context.engine->i915->unordered_wq, &ve->rcu); } static void virtual_engine_initial_hint(struct virtual_engine *ve) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c index cadfd85785b1..86b5a9ba323d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c @@ -88,10 +88,11 @@ static void pool_free_work(struct work_struct *wrk) { struct intel_gt_buffer_pool *pool = container_of(wrk, typeof(*pool), work.work); + struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool); if (pool_free_older_than(pool, HZ)) - schedule_delayed_work(&pool->work, - round_jiffies_up_relative(HZ)); + queue_delayed_work(gt->i915->unordered_wq, &pool->work, + round_jiffies_up_relative(HZ)); } static void pool_retire(struct i915_active *ref) @@ -99,6 +100,7 @@ static void pool_retire(struct i915_active *ref) struct intel_gt_buffer_pool_node *node = container_of(ref, typeof(*node), active); struct intel_gt_buffer_pool *pool = node->pool; + struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool); struct list_head *list = bucket_for_size(pool, node->obj->base.size); unsigned long flags; @@ -116,8 +118,8 @@ static void pool_retire(struct i915_active *ref) WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */ spin_unlock_irqrestore(&pool->lock, flags); - schedule_delayed_work(&pool->work, - round_jiffies_up_relative(HZ)); + queue_delayed_work(gt->i915->unordered_wq, &pool->work, + round_jiffies_up_relative(HZ)); } void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index 8f888d36f16d..62fd00c9e519 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -376,7 +376,7 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) gt->i915->l3_parity.which_slice |= 1 << 0; - schedule_work(>->i915->l3_parity.error_work); + queue_work(gt->i915->unordered_wq, >->i915->l3_parity.error_work); } void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index c2e69bafd02b..5a942af0a14e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -137,7 +137,7 @@ void intel_gt_pm_init_early(struct intel_gt *gt) * runtime_pm is per-device rather than per-tile, so this is still the * correct structure. */ - intel_wakeref_init(>->wakeref, >->i915->runtime_pm, &wf_ops); + intel_wakeref_init(>->wakeref, gt->i915, &wf_ops); seqcount_mutex_init(>->stats.lock, >->wakeref.mutex); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index 1dfd01668c79..d1a382dfaa1d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -116,7 +116,7 @@ void intel_engine_add_retire(struct intel_engine_cs *engine, GEM_BUG_ON(intel_engine_is_virtual(engine)); if (add_retire(engine, tl)) - schedule_work(&engine->retire_work); + queue_work(engine->i915->unordered_wq, &engine->retire_work); } void intel_engine_init_retire(struct intel_engine_cs *engine) @@ -207,8 +207,8 @@ static void retire_work_handler(struct work_struct *work) struct intel_gt *gt = container_of(work, typeof(*gt), requests.retire_work.work); - schedule_delayed_work(>->requests.retire_work, - round_jiffies_up_relative(HZ)); + queue_delayed_work(gt->i915->unordered_wq, >->requests.retire_work, + round_jiffies_up_relative(HZ)); intel_gt_retire_requests(gt); } @@ -224,8 +224,8 @@ void intel_gt_park_requests(struct intel_gt *gt) void intel_gt_unpark_requests(struct intel_gt *gt) { - schedule_delayed_work(>->requests.retire_work, - round_jiffies_up_relative(HZ)); + queue_delayed_work(gt->i915->unordered_wq, >->requests.retire_work, + round_jiffies_up_relative(HZ)); } void intel_gt_fini_requests(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 195ff72d7a14..e2152f75ba2e 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1625,7 +1625,7 @@ void __intel_init_wedge(struct intel_wedge_me *w, w->name = name; INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me); - schedule_delayed_work(&w->work, timeout); + queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout); } void __intel_fini_wedge(struct intel_wedge_me *w) diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index e68a99205599..e92e626d4994 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -73,13 +73,14 @@ static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) static void rps_timer(struct timer_list *t) { struct intel_rps *rps = from_timer(rps, t, timer); + struct intel_gt *gt = rps_to_gt(rps); struct intel_engine_cs *engine; ktime_t dt, last, timestamp; enum intel_engine_id id; s64 max_busy[3] = {}; timestamp = 0; - for_each_engine(engine, rps_to_gt(rps), id) { + for_each_engine(engine, gt, id) { s64 busy; int i; @@ -123,7 +124,7 @@ static void rps_timer(struct timer_list *t) busy += div_u64(max_busy[i], 1 << i); } - GT_TRACE(rps_to_gt(rps), + GT_TRACE(gt, "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n", busy, (int)div64_u64(100 * busy, dt), max_busy[0], max_busy[1], max_busy[2], @@ -133,12 +134,12 @@ static void rps_timer(struct timer_list *t) rps->cur_freq < rps->max_freq_softlimit) { rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD; rps->pm_interval = 1; - schedule_work(&rps->work); + queue_work(gt->i915->unordered_wq, &rps->work); } else if (100 * busy < rps->power.down_threshold * dt && rps->cur_freq > rps->min_freq_softlimit) { rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD; rps->pm_interval = 1; - schedule_work(&rps->work); + queue_work(gt->i915->unordered_wq, &rps->work); } else { rps->last_adj = 0; } @@ -973,7 +974,7 @@ static int rps_set_boost_freq(struct intel_rps *rps, u32 val) } mutex_unlock(&rps->lock); if (boost) - schedule_work(&rps->work); + queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work); return 0; } @@ -1025,7 +1026,8 @@ void intel_rps_boost(struct i915_request *rq) if (!atomic_fetch_inc(&slpc->num_waiters)) { GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", rq->fence.context, rq->fence.seqno); - schedule_work(&slpc->boost_work); + queue_work(rps_to_gt(rps)->i915->unordered_wq, + &slpc->boost_work); } return; @@ -1041,7 +1043,7 @@ void intel_rps_boost(struct i915_request *rq) rq->fence.context, rq->fence.seqno); if (READ_ONCE(rps->cur_freq) < rps->boost_freq) - schedule_work(&rps->work); + queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work); WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */ } @@ -1900,7 +1902,7 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) gen6_gt_pm_mask_irq(gt, events); rps->pm_iir |= events; - schedule_work(&rps->work); + queue_work(gt->i915->unordered_wq, &rps->work); } void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) @@ -1917,7 +1919,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) gen6_gt_pm_mask_irq(gt, events); rps->pm_iir |= events; - schedule_work(&rps->work); + queue_work(gt->i915->unordered_wq, &rps->work); spin_unlock(gt->irq_lock); } diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c index 542ce6d2de19..78cdfc6f315f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c @@ -27,7 +27,7 @@ static void perf_begin(struct intel_gt *gt) /* Boost gpufreq to max [waitboost] and keep it fixed */ atomic_inc(>->rps.num_waiters); - schedule_work(>->rps.work); + queue_work(gt->i915->unordered_wq, >->rps.work); flush_work(>->rps.work); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c index 0ff864da92df..331cec07c125 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c @@ -301,7 +301,6 @@ guc_capture_alloc_steered_lists(struct intel_guc *guc, const struct __guc_mmio_reg_descr_group *list; struct __guc_mmio_reg_descr_group *extlists; struct __guc_mmio_reg_descr *extarray; - struct sseu_dev_info *sseu; bool has_xehpg_extregs; /* steered registers currently only exist for the render-class */ @@ -318,7 +317,6 @@ guc_capture_alloc_steered_lists(struct intel_guc *guc, if (has_xehpg_extregs) num_steer_regs += ARRAY_SIZE(xehpg_extregs); - sseu = >->info.sseu; for_each_ss_steering(iter, gt, slice, subslice) num_tot_regs += num_steer_regs; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 3c4ae1da0d41..05f9348b7a9d 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -2833,7 +2833,7 @@ static int command_scan(struct parser_exec_state *s, static int scan_workload(struct intel_vgpu_workload *workload) { - unsigned long gma_head, gma_tail, gma_bottom; + unsigned long gma_head, gma_tail; struct parser_exec_state s; int ret = 0; @@ -2843,7 +2843,6 @@ static int scan_workload(struct intel_vgpu_workload *workload) gma_head = workload->rb_start + workload->rb_head; gma_tail = workload->rb_start + workload->rb_tail; - gma_bottom = workload->rb_start + _RING_CTL_BUF_SIZE(workload->rb_ctl); s.buf_type = RING_BUFFER_INSTRUCTION; s.buf_addr_type = GTT_BUFFER; @@ -2874,7 +2873,7 @@ out: static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { - unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail; + unsigned long gma_head, gma_tail, ring_size, ring_tail; struct parser_exec_state s; int ret = 0; struct intel_vgpu_workload *workload = container_of(wa_ctx, @@ -2891,7 +2890,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) PAGE_SIZE); gma_head = wa_ctx->indirect_ctx.guest_gma; gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail; - gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size; s.buf_type = RING_BUFFER_INSTRUCTION; s.buf_addr_type = GTT_BUFFER; diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 97244541ec28..75cbc43b326d 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -132,8 +132,20 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv) if (dev_priv->display.hotplug.dp_wq == NULL) goto out_free_wq; + /* + * The unordered i915 workqueue should be used for all work + * scheduling that do not require running in order, which used + * to be scheduled on the system_wq before moving to a driver + * instance due deprecation of flush_scheduled_work(). + */ + dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0); + if (dev_priv->unordered_wq == NULL) + goto out_free_dp_wq; + return 0; +out_free_dp_wq: + destroy_workqueue(dev_priv->display.hotplug.dp_wq); out_free_wq: destroy_workqueue(dev_priv->wq); out_err: @@ -144,6 +156,7 @@ out_err: static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) { + destroy_workqueue(dev_priv->unordered_wq); destroy_workqueue(dev_priv->display.hotplug.dp_wq); destroy_workqueue(dev_priv->wq); } @@ -759,8 +772,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i915 = i915_driver_create(pdev, ent); if (IS_ERR(i915)) { - ret = PTR_ERR(i915); - goto out_pci_disable; + pci_disable_device(pdev); + return PTR_ERR(i915); } ret = i915_driver_early_probe(i915); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b457a37e67c4..b4cf6f0f636d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -260,6 +260,16 @@ struct drm_i915_private { */ struct workqueue_struct *wq; + /** + * unordered_wq - internal workqueue for unordered work + * + * This workqueue should be used for all unordered work + * scheduling within i915, which used to be scheduled on the + * system_wq before moving to a driver instance due + * deprecation of flush_scheduled_work(). + */ + struct workqueue_struct *unordered_wq; + /* pm private clock gating functions */ const struct drm_i915_clock_gating_funcs *clock_gating_funcs; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8ed7c39c2b30..7a4f462e8b70 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4421,8 +4421,10 @@ #define GEN8_DE_MISC_IMR _MMIO(0x44464) #define GEN8_DE_MISC_IIR _MMIO(0x44468) #define GEN8_DE_MISC_IER _MMIO(0x4446c) -#define GEN8_DE_MISC_GSE (1 << 27) -#define GEN8_DE_EDP_PSR (1 << 19) +#define XELPDP_PMDEMAND_RSPTOUT_ERR REG_BIT(27) +#define GEN8_DE_MISC_GSE REG_BIT(27) +#define GEN8_DE_EDP_PSR REG_BIT(19) +#define XELPDP_PMDEMAND_RSP REG_BIT(3) #define GEN8_PCU_ISR _MMIO(0x444e0) #define GEN8_PCU_IMR _MMIO(0x444e4) @@ -4507,6 +4509,23 @@ #define XELPDP_DP_ALT_HPD_LONG_DETECT REG_BIT(1) #define XELPDP_DP_ALT_HPD_SHORT_DETECT REG_BIT(0) +#define XELPDP_INITIATE_PMDEMAND_REQUEST(dword) _MMIO(0x45230 + 4 * (dword)) +#define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16) +#define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12) +#define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8) +#define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6) +#define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4) +#define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0) + +#define XELPDP_PMDEMAND_REQ_ENABLE REG_BIT(31) +#define XELPDP_PMDEMAND_CDCLK_FREQ_MASK REG_GENMASK(30, 20) +#define XELPDP_PMDEMAND_DDICLK_FREQ_MASK REG_GENMASK(18, 8) +#define XELPDP_PMDEMAND_SCALERS_MASK REG_GENMASK(6, 4) +#define XELPDP_PMDEMAND_PLLS_MASK REG_GENMASK(2, 0) + +#define GEN12_DCPR_STATUS_1 _MMIO(0x46440) +#define XELPDP_PMDEMAND_INFLIGHT_STATUS REG_BIT(26) + #define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004) /* Required on all Ironlake and Sandybridge according to the B-Spec. */ #define ILK_ELPIN_409_SELECT REG_BIT(25) @@ -4666,6 +4685,9 @@ #define DCPR_SEND_RESP_IMM REG_BIT(25) #define DCPR_CLEAR_MEMSTAT_DIS REG_BIT(24) +#define XELPD_CHICKEN_DCPR_3 _MMIO(0x46438) +#define DMD_RSP_TIMEOUT_DISABLE REG_BIT(19) + #define SKL_DFSM _MMIO(0x51000) #define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27) #define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 630a732aaecc..894068bb37b6 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -290,7 +290,7 @@ static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer) if (!i915_request_completed(rq)) { if (llist_add(&rq->watchdog.link, >->watchdog.list)) - schedule_work(>->watchdog.work); + queue_work(gt->i915->unordered_wq, >->watchdog.work); } else { i915_request_put(rq); } diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 2f79d232b04a..6e49caf241a5 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -27,9 +27,7 @@ #include <drm/drm_print.h> #include <drm/i915_pciids.h> -#include "display/intel_cdclk.h" -#include "display/intel_de.h" -#include "display/intel_display.h" +#include "display/intel_display_device.h" #include "gt/intel_gt_regs.h" #include "i915_drv.h" #include "i915_reg.h" @@ -411,125 +409,23 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) { struct intel_device_info *info = mkwrite_device_info(dev_priv); struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv); - struct intel_display_runtime_info *display_runtime = - DISPLAY_RUNTIME_INFO(dev_priv); - enum pipe pipe; - - /* Wa_14011765242: adl-s A0,A1 */ - if (IS_ADLS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A2)) - for_each_pipe(dev_priv, pipe) - display_runtime->num_scalers[pipe] = 0; - else if (DISPLAY_VER(dev_priv) >= 11) { - for_each_pipe(dev_priv, pipe) - display_runtime->num_scalers[pipe] = 2; - } else if (DISPLAY_VER(dev_priv) >= 9) { - display_runtime->num_scalers[PIPE_A] = 2; - display_runtime->num_scalers[PIPE_B] = 2; - display_runtime->num_scalers[PIPE_C] = 1; - } - - BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES); - if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)) - for_each_pipe(dev_priv, pipe) - display_runtime->num_sprites[pipe] = 4; - else if (DISPLAY_VER(dev_priv) >= 11) - for_each_pipe(dev_priv, pipe) - display_runtime->num_sprites[pipe] = 6; - else if (DISPLAY_VER(dev_priv) == 10) - for_each_pipe(dev_priv, pipe) - display_runtime->num_sprites[pipe] = 3; - else if (IS_BROXTON(dev_priv)) { - /* - * Skylake and Broxton currently don't expose the topmost plane as its - * use is exclusive with the legacy cursor and we only want to expose - * one of those, not both. Until we can safely expose the topmost plane - * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, - * we don't expose the topmost plane at all to prevent ABI breakage - * down the line. - */ + if (HAS_DISPLAY(dev_priv)) + intel_display_device_info_runtime_init(dev_priv); - display_runtime->num_sprites[PIPE_A] = 2; - display_runtime->num_sprites[PIPE_B] = 2; - display_runtime->num_sprites[PIPE_C] = 1; - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - for_each_pipe(dev_priv, pipe) - display_runtime->num_sprites[pipe] = 2; - } else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) { - for_each_pipe(dev_priv, pipe) - display_runtime->num_sprites[pipe] = 1; - } - - if (HAS_DISPLAY(dev_priv) && - (IS_DGFX(dev_priv) || DISPLAY_VER(dev_priv) >= 14) && - !(intel_de_read(dev_priv, GU_CNTL_PROTECTED) & DEPRESENT)) { - drm_info(&dev_priv->drm, "Display not present, disabling\n"); - - display_runtime->pipe_mask = 0; + /* Display may have been disabled by runtime init */ + if (!HAS_DISPLAY(dev_priv)) { + dev_priv->drm.driver_features &= ~(DRIVER_MODESET | + DRIVER_ATOMIC); + info->display = &no_display; } - if (HAS_DISPLAY(dev_priv) && IS_GRAPHICS_VER(dev_priv, 7, 8) && - HAS_PCH_SPLIT(dev_priv)) { - u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); - u32 sfuse_strap = intel_de_read(dev_priv, SFUSE_STRAP); - - /* - * SFUSE_STRAP is supposed to have a bit signalling the display - * is fused off. Unfortunately it seems that, at least in - * certain cases, fused off display means that PCH display - * reads don't land anywhere. In that case, we read 0s. - * - * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK - * should be set when taking over after the firmware. - */ - if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || - sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || - (HAS_PCH_CPT(dev_priv) && - !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { - drm_info(&dev_priv->drm, - "Display fused off, disabling\n"); - display_runtime->pipe_mask = 0; - } else if (fuse_strap & IVB_PIPE_C_DISABLE) { - drm_info(&dev_priv->drm, "PipeC fused off\n"); - display_runtime->pipe_mask &= ~BIT(PIPE_C); - display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); - } - } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) { - u32 dfsm = intel_de_read(dev_priv, SKL_DFSM); - - if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { - display_runtime->pipe_mask &= ~BIT(PIPE_A); - display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A); - display_runtime->fbc_mask &= ~BIT(INTEL_FBC_A); - } - if (dfsm & SKL_DFSM_PIPE_B_DISABLE) { - display_runtime->pipe_mask &= ~BIT(PIPE_B); - display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B); - } - if (dfsm & SKL_DFSM_PIPE_C_DISABLE) { - display_runtime->pipe_mask &= ~BIT(PIPE_C); - display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); - } - - if (DISPLAY_VER(dev_priv) >= 12 && - (dfsm & TGL_DFSM_PIPE_D_DISABLE)) { - display_runtime->pipe_mask &= ~BIT(PIPE_D); - display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D); - } - - if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) - display_runtime->has_hdcp = 0; - - if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) - display_runtime->fbc_mask = 0; - - if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) - display_runtime->has_dmc = 0; + /* Disable nuclear pageflip by default on pre-g4x */ + if (!dev_priv->params.nuclear_pageflip && + DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) + dev_priv->drm.driver_features &= ~DRIVER_ATOMIC; - if (IS_DISPLAY_VER(dev_priv, 10, 12) && - (dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE)) - display_runtime->has_dsc = 0; - } + BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES); if (GRAPHICS_VER(dev_priv) == 6 && i915_vtd_active(dev_priv)) { drm_info(&dev_priv->drm, @@ -540,24 +436,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) runtime->rawclk_freq = intel_read_rawclk(dev_priv); drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq); - if (!HAS_DISPLAY(dev_priv)) { - dev_priv->drm.driver_features &= ~(DRIVER_MODESET | - DRIVER_ATOMIC); - info->display = &no_display; - - display_runtime->cpu_transcoder_mask = 0; - memset(display_runtime->num_sprites, 0, sizeof(display_runtime->num_sprites)); - memset(display_runtime->num_scalers, 0, sizeof(display_runtime->num_scalers)); - display_runtime->fbc_mask = 0; - display_runtime->has_hdcp = false; - display_runtime->has_dmc = false; - display_runtime->has_dsc = false; - } - - /* Disable nuclear pageflip by default on pre-g4x */ - if (!dev_priv->params.nuclear_pageflip && - DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - dev_priv->drm.driver_features &= ~DRIVER_ATOMIC; } /* diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index cf5122299b6b..6d8e5e5c0cba 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -658,5 +658,5 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm) init_intel_runtime_pm_wakeref(rpm); INIT_LIST_HEAD(&rpm->lmem_userfault_list); spin_lock_init(&rpm->lmem_userfault_lock); - intel_wakeref_auto_init(&rpm->userfault_wakeref, rpm); + intel_wakeref_auto_init(&rpm->userfault_wakeref, i915); } diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index dfd87d082218..718f2f1b6174 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -8,17 +8,18 @@ #include "intel_runtime_pm.h" #include "intel_wakeref.h" +#include "i915_drv.h" static void rpm_get(struct intel_wakeref *wf) { - wf->wakeref = intel_runtime_pm_get(wf->rpm); + wf->wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm); } static void rpm_put(struct intel_wakeref *wf) { intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref); - intel_runtime_pm_put(wf->rpm, wakeref); + intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); INTEL_WAKEREF_BUG_ON(!wakeref); } @@ -74,7 +75,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags) /* Assume we are not in process context and so cannot sleep. */ if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) { - mod_delayed_work(system_wq, &wf->work, + mod_delayed_work(wf->i915->unordered_wq, &wf->work, FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags)); return; } @@ -94,11 +95,11 @@ static void __intel_wakeref_put_work(struct work_struct *wrk) } void __intel_wakeref_init(struct intel_wakeref *wf, - struct intel_runtime_pm *rpm, + struct drm_i915_private *i915, const struct intel_wakeref_ops *ops, struct intel_wakeref_lockclass *key) { - wf->rpm = rpm; + wf->i915 = i915; wf->ops = ops; __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex); @@ -137,17 +138,17 @@ static void wakeref_auto_timeout(struct timer_list *t) wakeref = fetch_and_zero(&wf->wakeref); spin_unlock_irqrestore(&wf->lock, flags); - intel_runtime_pm_put(wf->rpm, wakeref); + intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); } void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, - struct intel_runtime_pm *rpm) + struct drm_i915_private *i915) { spin_lock_init(&wf->lock); timer_setup(&wf->timer, wakeref_auto_timeout, 0); refcount_set(&wf->count, 0); wf->wakeref = 0; - wf->rpm = rpm; + wf->i915 = i915; } void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) @@ -161,13 +162,14 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) } /* Our mission is that we only extend an already active wakeref */ - assert_rpm_wakelock_held(wf->rpm); + assert_rpm_wakelock_held(&wf->i915->runtime_pm); if (!refcount_inc_not_zero(&wf->count)) { spin_lock_irqsave(&wf->lock, flags); if (!refcount_inc_not_zero(&wf->count)) { INTEL_WAKEREF_BUG_ON(wf->wakeref); - wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm); + wf->wakeref = + intel_runtime_pm_get_if_in_use(&wf->i915->runtime_pm); refcount_set(&wf->count, 1); } spin_unlock_irqrestore(&wf->lock, flags); diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 0b6b4852ab23..ec881b097368 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -39,7 +39,7 @@ struct intel_wakeref { intel_wakeref_t wakeref; - struct intel_runtime_pm *rpm; + struct drm_i915_private *i915; const struct intel_wakeref_ops *ops; struct delayed_work work; @@ -51,13 +51,13 @@ struct intel_wakeref_lockclass { }; void __intel_wakeref_init(struct intel_wakeref *wf, - struct intel_runtime_pm *rpm, + struct drm_i915_private *i915, const struct intel_wakeref_ops *ops, struct intel_wakeref_lockclass *key); -#define intel_wakeref_init(wf, rpm, ops) do { \ +#define intel_wakeref_init(wf, i915, ops) do { \ static struct intel_wakeref_lockclass __key; \ \ - __intel_wakeref_init((wf), (rpm), (ops), &__key); \ + __intel_wakeref_init((wf), (i915), (ops), &__key); \ } while (0) int __intel_wakeref_get_first(struct intel_wakeref *wf); @@ -262,7 +262,7 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf) int intel_wakeref_wait_for_idle(struct intel_wakeref *wf); struct intel_wakeref_auto { - struct intel_runtime_pm *rpm; + struct drm_i915_private *i915; struct timer_list timer; intel_wakeref_t wakeref; spinlock_t lock; @@ -287,7 +287,7 @@ struct intel_wakeref_auto { void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout); void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, - struct intel_runtime_pm *rpm); + struct drm_i915_private *i915); void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf); #endif /* INTEL_WAKEREF_H */ diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 36940ef10108..5c397a2df70e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -391,7 +391,7 @@ static void close_object_list(struct list_head *objects, struct i915_address_space *vm) { struct drm_i915_gem_object *obj, *on; - int ignored; + int __maybe_unused ignored; list_for_each_entry_safe(obj, on, objects, st_link) { struct i915_vma *vma; diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c index daa985e5a19b..8f5ce71fa453 100644 --- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c @@ -523,12 +523,19 @@ static void task_ipc(struct work_struct *work) static int test_ipc(void *arg) { struct task_ipc ipc; + struct workqueue_struct *wq; int ret = 0; + wq = alloc_workqueue("i1915-selftest", 0, 0); + if (wq == NULL) + return -ENOMEM; + /* Test use of i915_sw_fence as an interprocess signaling mechanism */ ipc.in = alloc_fence(); - if (!ipc.in) - return -ENOMEM; + if (!ipc.in) { + ret = -ENOMEM; + goto err_work; + } ipc.out = alloc_fence(); if (!ipc.out) { ret = -ENOMEM; @@ -540,7 +547,7 @@ static int test_ipc(void *arg) ipc.value = 0; INIT_WORK_ONSTACK(&ipc.work, task_ipc); - schedule_work(&ipc.work); + queue_work(wq, &ipc.work); wait_for_completion(&ipc.started); @@ -563,6 +570,9 @@ static int test_ipc(void *arg) free_fence(ipc.out); err_in: free_fence(ipc.in); +err_work: + destroy_workqueue(wq); + return ret; } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 0eda8b4ee17f..09d4bbcdcdbf 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -69,6 +69,7 @@ static void mock_device_release(struct drm_device *dev) i915_gem_drain_workqueue(i915); mock_fini_ggtt(to_gt(i915)->ggtt); + destroy_workqueue(i915->unordered_wq); destroy_workqueue(i915->wq); intel_region_ttm_device_fini(i915); @@ -208,6 +209,10 @@ struct drm_i915_private *mock_gem_device(void) if (!i915->wq) goto err_drv; + i915->unordered_wq = alloc_workqueue("mock-unordered", 0, 0); + if (!i915->unordered_wq) + goto err_wq; + mock_init_contexts(i915); /* allocate the ggtt */ @@ -239,6 +244,8 @@ struct drm_i915_private *mock_gem_device(void) err_context: intel_gt_driver_remove(to_gt(i915)); err_unlock: + destroy_workqueue(i915->unordered_wq); +err_wq: destroy_workqueue(i915->wq); err_drv: intel_region_ttm_device_fini(i915); |