diff options
Diffstat (limited to 'drivers/gpu/drm/i915/display/intel_psr.c')
-rw-r--r-- | drivers/gpu/drm/i915/display/intel_psr.c | 258 |
1 files changed, 202 insertions, 56 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 56c17283ba2d..850b11f20285 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -23,6 +23,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> +#include <drm/drm_debugfs.h> #include "i915_drv.h" #include "i915_reg.h" @@ -32,6 +33,7 @@ #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_aux.h" +#include "intel_frontbuffer.h" #include "intel_hdmi.h" #include "intel_psr.h" #include "intel_psr_regs.h" @@ -234,23 +236,91 @@ static u32 psr_irq_mask_get(struct intel_dp *intel_dp) EDP_PSR_MASK(intel_dp->psr.transcoder); } -static void psr_irq_control(struct intel_dp *intel_dp) +static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - i915_reg_t imr_reg; - u32 mask; + if (DISPLAY_VER(dev_priv) >= 8) + return EDP_PSR_CTL(cpu_transcoder); + else + return HSW_SRD_CTL; +} + +static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return EDP_PSR_DEBUG(cpu_transcoder); + else + return HSW_SRD_DEBUG; +} + +static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return EDP_PSR_PERF_CNT(cpu_transcoder); + else + return HSW_SRD_PERF_CNT; +} + +static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return EDP_PSR_STATUS(cpu_transcoder); + else + return HSW_SRD_STATUS; +} + +static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + if (DISPLAY_VER(dev_priv) >= 12) + return TRANS_PSR_IMR(cpu_transcoder); + else + return EDP_PSR_IMR; +} +static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ if (DISPLAY_VER(dev_priv) >= 12) - imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); + return TRANS_PSR_IIR(cpu_transcoder); + else + return EDP_PSR_IIR; +} + +static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return EDP_PSR_AUX_CTL(cpu_transcoder); + else + return HSW_SRD_AUX_CTL; +} + +static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, int i) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return EDP_PSR_AUX_DATA(cpu_transcoder, i); else - imr_reg = EDP_PSR_IMR; + return HSW_SRD_AUX_DATA(i); +} + +static void psr_irq_control(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; + u32 mask; mask = psr_irq_psr_error_bit_get(intel_dp); if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ) mask |= psr_irq_post_exit_bit_get(intel_dp) | psr_irq_pre_entry_bit_get(intel_dp); - intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask); + intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder), + psr_irq_mask_get(intel_dp), ~mask); } static void psr_event_print(struct drm_i915_private *i915, @@ -296,12 +366,6 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; ktime_t time_ns = ktime_get(); - i915_reg_t imr_reg; - - if (DISPLAY_VER(dev_priv) >= 12) - imr_reg = TRANS_PSR_IMR(cpu_transcoder); - else - imr_reg = EDP_PSR_IMR; if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) { intel_dp->psr.last_entry_attempt = time_ns; @@ -339,7 +403,8 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) * again so we don't care about unmask the interruption * or unset irq_aux_error. */ - intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp)); + intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder), + 0, psr_irq_psr_error_bit_get(intel_dp)); queue_work(dev_priv->unordered_wq, &intel_dp->psr.work); } @@ -467,6 +532,43 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) } } +static void hsw_psr_setup_aux(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; + u32 aux_clock_divider, aux_ctl; + /* write DP_SET_POWER=D0 */ + static const u8 aux_msg[] = { + [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf), + [1] = (DP_SET_POWER >> 8) & 0xff, + [2] = DP_SET_POWER & 0xff, + [3] = 1 - 1, + [4] = DP_SET_POWER_D0, + }; + int i; + + BUILD_BUG_ON(sizeof(aux_msg) > 20); + for (i = 0; i < sizeof(aux_msg); i += 4) + intel_de_write(dev_priv, + psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2), + intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i)); + + aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); + + /* Start with bits set for DDI_AUX_CTL register */ + aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), + aux_clock_divider); + + /* Select only valid bits for SRD_AUX_CTL */ + aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK | + EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | + EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | + EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; + + intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder), + aux_ctl); +} + static void intel_psr_enable_sink(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -528,6 +630,15 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) else val |= EDP_PSR_TP2_TP3_TIME_2500us; + /* + * WA 0479: hsw,bdw + * "Do not skip both TP1 and TP2/TP3" + */ + if (DISPLAY_VER(dev_priv) < 9 && + connector->panel.vbt.psr.tp1_wakeup_time_us == 0 && + connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0) + val |= EDP_PSR_TP2_TP3_TIME_100us; + check_tp3_sel: if (intel_dp_source_supports_tps3(dev_priv) && drm_dp_tps3_supported(intel_dp->dpcd)) @@ -577,7 +688,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) if (DISPLAY_VER(dev_priv) >= 8) val |= EDP_PSR_CRC_ENABLE; - intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder), + intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val); } @@ -639,7 +750,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) } /* Wa_22012278275:adl-p */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) { + if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) { static const u8 map[] = { 2, /* 5 lines */ 1, /* 6 lines */ @@ -685,7 +796,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. */ - intel_de_write(dev_priv, EDP_PSR_CTL(cpu_transcoder), 0); + intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0); intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val); } @@ -697,8 +808,10 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_trans return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B; else if (DISPLAY_VER(dev_priv) >= 12) return cpu_transcoder == TRANSCODER_A; - else + else if (DISPLAY_VER(dev_priv) >= 9) return cpu_transcoder == TRANSCODER_EDP; + else + return false; } static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) @@ -807,7 +920,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, return; /* Wa_16011303918:adl-p */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) + if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) return; /* @@ -963,7 +1076,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; /* JSL and EHL only supports eDP 1.3 */ - if (IS_JSL_EHL(dev_priv)) { + if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n"); return false; } @@ -975,7 +1088,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { + if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n"); return false; } @@ -1033,7 +1146,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, /* Wa_16011303918:adl-p */ if (crtc_state->vrr.enable && - IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { + IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, not compatible with HW stepping + VRR\n"); return false; @@ -1201,13 +1314,15 @@ static void intel_psr_activate(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; - if (transcoder_has_psr2(dev_priv, cpu_transcoder)) - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE); + drm_WARN_ON(&dev_priv->drm, + transcoder_has_psr2(dev_priv, cpu_transcoder) && + intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)) & EDP_PSR_ENABLE); + intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE); + drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active); + lockdep_assert_held(&intel_dp->psr.lock); /* psr1 and psr2 are mutually exclusive.*/ @@ -1247,8 +1362,7 @@ static void wm_optimization_wa(struct intel_dp *intel_dp, bool set_wa_bit = false; /* Wa_14015648006 */ - if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || - IS_DISPLAY_VER(dev_priv, 11, 13)) + if (IS_DISPLAY_VER(dev_priv, 11, 14)) set_wa_bit |= crtc_state->wm_level_disabled; /* Wa_16013835468 */ @@ -1272,6 +1386,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, u32 mask; /* + * Only HSW and BDW have PSR AUX registers that need to be setup. + * SKL+ use hardcoded values PSR AUX transactions + */ + if (DISPLAY_VER(dev_priv) < 9) + hsw_psr_setup_aux(intel_dp); + + /* * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also * mask LPSP to avoid dependency on other drivers that might block * runtime_pm besides preventing other hw tracking issues now we @@ -1282,11 +1403,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, EDP_PSR_DEBUG_MASK_LPSP | EDP_PSR_DEBUG_MASK_MAX_SLEEP; - if (DISPLAY_VER(dev_priv) < 11) + /* + * No separate pipe reg write mask on hsw/bdw, so have to unmask all + * registers in order to keep the CURSURFLIVE tricks working :( + */ + if (IS_DISPLAY_VER(dev_priv, 9, 10)) mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; - intel_de_write(dev_priv, EDP_PSR_DEBUG(cpu_transcoder), - mask); + /* allow PSR with sprite enabled */ + if (IS_HASWELL(dev_priv)) + mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE; + + intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask); psr_irq_control(intel_dp); @@ -1320,7 +1448,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, * All supported adlp panels have 1-based X granularity, this may * cause issues if non-supported panels are used. */ - if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) + if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0, ADLP_1_BASED_X_GRANULARITY); else if (IS_ALDERLAKE_P(dev_priv)) @@ -1328,7 +1456,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, ADLP_1_BASED_X_GRANULARITY); /* Wa_16012604467:adlp,mtl[a0,b0] */ - if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) + if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0, MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS); @@ -1352,10 +1480,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp) * first time that PSR HW tries to activate so lets keep PSR disabled * to avoid any rendering problems. */ - if (DISPLAY_VER(dev_priv) >= 12) - val = intel_de_read(dev_priv, TRANS_PSR_IIR(cpu_transcoder)); - else - val = intel_de_read(dev_priv, EDP_PSR_IIR); + val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder)); val &= psr_irq_psr_error_bit_get(intel_dp); if (val) { intel_dp->psr.sink_not_reliable = true; @@ -1418,7 +1543,7 @@ static void intel_psr_exit(struct intel_dp *intel_dp) drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE); } - val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)); + val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)); drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE); return; @@ -1432,7 +1557,7 @@ static void intel_psr_exit(struct intel_dp *intel_dp) drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE)); } else { - val = intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder), + val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), EDP_PSR_ENABLE, 0); drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE)); @@ -1451,7 +1576,7 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp) psr_status = EDP_PSR2_STATUS(cpu_transcoder); psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; } else { - psr_status = EDP_PSR_STATUS(cpu_transcoder); + psr_status = psr_status_reg(dev_priv, cpu_transcoder); psr_status_mask = EDP_PSR_STATUS_STATE_MASK; } @@ -1489,7 +1614,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) if (intel_dp->psr.psr2_enabled) { /* Wa_16012604467:adlp,mtl[a0,b0] */ - if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) + if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, MTL_CLKGATE_DIS_TRANS(cpu_transcoder), MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0); @@ -1963,7 +2088,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, goto skip_sel_fetch_set_loop; /* Wa_14014971492 */ - if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || + if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) || IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) && crtc_state->splitter.enable) pipe_clip.y1 = 0; @@ -2106,6 +2231,12 @@ static void _intel_psr_post_plane_update(const struct intel_atomic_state *state, if (crtc_state->crc_enabled && psr->enabled) psr_force_hw_tracking_exit(intel_dp); + /* + * Clear possible busy bits in case we have + * invalidate -> flip -> flush sequence. + */ + intel_dp->psr.busy_frontbuffer_bits = 0; + mutex_unlock(&psr->lock); } } @@ -2151,7 +2282,7 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) * defensive enough to cover everything. */ return intel_de_wait_for_clear(dev_priv, - EDP_PSR_STATUS(cpu_transcoder), + psr_status_reg(dev_priv, cpu_transcoder), EDP_PSR_STATUS_STATE_MASK, 50); } @@ -2205,7 +2336,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) reg = EDP_PSR2_STATUS(cpu_transcoder); mask = EDP_PSR2_STATUS_STATE_MASK; } else { - reg = EDP_PSR_STATUS(cpu_transcoder); + reg = psr_status_reg(dev_priv, cpu_transcoder); mask = EDP_PSR_STATUS_STATE_MASK; } @@ -2825,7 +2956,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) "SRDOFFACK", "SRDENT_ON", }; - val = intel_de_read(dev_priv, EDP_PSR_STATUS(cpu_transcoder)); + val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder)); status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val); if (status_val < ARRAY_SIZE(live_status)) status = live_status[status_val]; @@ -2872,7 +3003,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)); enabled = val & EDP_PSR2_ENABLE; } else { - val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)); + val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)); enabled = val & EDP_PSR_ENABLE; } seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", @@ -2884,7 +3015,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) /* * SKL+ Perf counter is reset to 0 everytime DC state is entered */ - val = intel_de_read(dev_priv, EDP_PSR_PERF_CNT(cpu_transcoder)); + val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder)); seq_printf(m, "Performance counter: %u\n", REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val)); @@ -3029,7 +3160,7 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) }; const char *str; int ret; - u8 val; + u8 status, error_status; if (!CAN_PSR(intel_dp)) { seq_puts(m, "PSR Unsupported\n"); @@ -3039,19 +3170,34 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) if (connector->base.status != connector_status_connected) return -ENODEV; - ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); - if (ret != 1) - return ret < 0 ? ret : -EIO; + ret = psr_get_status_and_error_status(intel_dp, &status, &error_status); + if (ret) + return ret; - val &= DP_PSR_SINK_STATE_MASK; - if (val < ARRAY_SIZE(sink_status)) - str = sink_status[val]; + status &= DP_PSR_SINK_STATE_MASK; + if (status < ARRAY_SIZE(sink_status)) + str = sink_status[status]; else str = "unknown"; - seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); + seq_printf(m, "Sink PSR status: 0x%x [%s]\n", status, str); - return 0; + seq_printf(m, "Sink PSR error status: 0x%x", error_status); + + if (error_status & (DP_PSR_RFB_STORAGE_ERROR | + DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | + DP_PSR_LINK_CRC_ERROR)) + seq_puts(m, ":\n"); + else + seq_puts(m, "\n"); + if (error_status & DP_PSR_RFB_STORAGE_ERROR) + seq_puts(m, "\tPSR RFB storage error\n"); + if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) + seq_puts(m, "\tPSR VSC SDP uncorrectable error\n"); + if (error_status & DP_PSR_LINK_CRC_ERROR) + seq_puts(m, "\tPSR Link CRC error\n"); + + return ret; } DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); |